tree:
https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git
pending-5.4
head: 830a1d62935d74cfbfdbf6f806932fbcebf8dfe7
commit: a644f6d0319038a98ba3395dd3aeb89f5594a847 [35/50] bpf: Fix leakage under
speculation on mispredicted branches
config: i386-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
#
https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git/c...
git remote add sashal-linux-stable
https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git
git fetch --no-tags sashal-linux-stable pending-5.4
git checkout a644f6d0319038a98ba3395dd3aeb89f5594a847
# save the attached .config to linux build tree
make W=1 ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
kernel/bpf/verifier.c: In function 'check_cond_jmp_op':
> kernel/bpf/verifier.c:6097:11: error: 'struct
bpf_verifier_env' has no member named 'bypass_spec_v1'
6097 | if
(!env->bypass_spec_v1 &&
| ^~
kernel/bpf/verifier.c:6108:11: error: 'struct bpf_verifier_env' has no member
named 'bypass_spec_v1'
6108 | if (!env->bypass_spec_v1 &&
| ^~
In file included from include/linux/bpf_verifier.h:8,
from kernel/bpf/verifier.c:12:
kernel/bpf/verifier.c: In function 'jit_subprogs':
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'unsigned int (*)(const void *, const struct bpf_insn *)' to 'u64 (*)(u64,
u64, u64, u64, u64)' {aka 'long long unsigned int (*)(long long unsigned int,
long long unsigned int, long long unsigned int, long long unsigned int, long long
unsigned int)'} [-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9016:16: note: in expansion of macro 'BPF_CAST_CALL'
9016 | insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
| ^~~~~~~~~~~~~
kernel/bpf/verifier.c: In function 'fixup_bpf_calls':
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'void * (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64, u64, u64,
u64, u64)' {aka 'long long unsigned int (*)(long long unsigned int, long long
unsigned int, long long unsigned int, long long unsigned int, long long unsigned
int)'} [-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9370:17: note: in expansion of macro 'BPF_CAST_CALL'
9370 | insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
| ^~~~~~~~~~~~~
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'int (* const)(struct bpf_map *, void *, void *, u64)' {aka 'int (*
const)(struct bpf_map *, void *, void *, long long unsigned int)'} to 'u64
(*)(u64, u64, u64, u64, u64)' {aka 'long long unsigned int (*)(long long
unsigned int, long long unsigned int, long long unsigned int, long long unsigned int,
long long unsigned int)'} [-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9374:17: note: in expansion of macro 'BPF_CAST_CALL'
9374 | insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
| ^~~~~~~~~~~~~
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64, u64, u64, u64,
u64)' {aka 'long long unsigned int (*)(long long unsigned int, long long unsigned
int, long long unsigned int, long long unsigned int, long long unsigned int)'}
[-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9378:17: note: in expansion of macro 'BPF_CAST_CALL'
9378 | insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
| ^~~~~~~~~~~~~
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'int (* const)(struct bpf_map *, void *, u64)' {aka 'int (* const)(struct
bpf_map *, void *, long long unsigned int)'} to 'u64 (*)(u64, u64, u64, u64,
u64)' {aka 'long long unsigned int (*)(long long unsigned int, long long unsigned
int, long long unsigned int, long long unsigned int, long long unsigned int)'}
[-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9382:17: note: in expansion of macro 'BPF_CAST_CALL'
9382 | insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
| ^~~~~~~~~~~~~
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64, u64, u64, u64,
u64)' {aka 'long long unsigned int (*)(long long unsigned int, long long unsigned
int, long long unsigned int, long long unsigned int, long long unsigned int)'}
[-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9386:17: note: in expansion of macro 'BPF_CAST_CALL'
9386 | insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
| ^~~~~~~~~~~~~
include/linux/filter.h:341:4: warning: cast between incompatible function types from
'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64, u64, u64, u64,
u64)' {aka 'long long unsigned int (*)(long long unsigned int, long long unsigned
int, long long unsigned int, long long unsigned int, long long unsigned int)'}
[-Wcast-function-type]
341 | ((u64 (*)(u64, u64, u64, u64, u64))(x))
| ^
kernel/bpf/verifier.c:9390:17: note: in expansion of macro 'BPF_CAST_CALL'
9390 | insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
| ^~~~~~~~~~~~~
vim +6097 kernel/bpf/verifier.c
6026
6027 static int check_cond_jmp_op(struct bpf_verifier_env *env,
6028 struct bpf_insn *insn, int *insn_idx)
6029 {
6030 struct bpf_verifier_state *this_branch = env->cur_state;
6031 struct bpf_verifier_state *other_branch;
6032 struct bpf_reg_state *regs =
this_branch->frame[this_branch->curframe]->regs;
6033 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
6034 u8 opcode = BPF_OP(insn->code);
6035 bool is_jmp32;
6036 int pred = -1;
6037 int err;
6038
6039 /* Only conditional jumps are expected to reach here. */
6040 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6041 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
6042 return -EINVAL;
6043 }
6044
6045 if (BPF_SRC(insn->code) == BPF_X) {
6046 if (insn->imm != 0) {
6047 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
6048 return -EINVAL;
6049 }
6050
6051 /* check src1 operand */
6052 err = check_reg_arg(env, insn->src_reg, SRC_OP);
6053 if (err)
6054 return err;
6055
6056 if (is_pointer_value(env, insn->src_reg)) {
6057 verbose(env, "R%d pointer comparison prohibited\n",
6058 insn->src_reg);
6059 return -EACCES;
6060 }
6061 src_reg = ®s[insn->src_reg];
6062 } else {
6063 if (insn->src_reg != BPF_REG_0) {
6064 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
6065 return -EINVAL;
6066 }
6067 }
6068
6069 /* check src2 operand */
6070 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6071 if (err)
6072 return err;
6073
6074 dst_reg = ®s[insn->dst_reg];
6075 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
6076
6077 if (BPF_SRC(insn->code) == BPF_K)
6078 pred = is_branch_taken(dst_reg, insn->imm,
6079 opcode, is_jmp32);
6080 else if (src_reg->type == SCALAR_VALUE &&
6081 tnum_is_const(src_reg->var_off))
6082 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6083 opcode, is_jmp32);
6084 if (pred >= 0) {
6085 err = mark_chain_precision(env, insn->dst_reg);
6086 if (BPF_SRC(insn->code) == BPF_X && !err)
6087 err = mark_chain_precision(env, insn->src_reg);
6088 if (err)
6089 return err;
6090 }
6091
6092 if (pred == 1) {
6093 /* Only follow the goto, ignore fall-through. If needed, push
6094 * the fall-through branch for simulation under speculative
6095 * execution.
6096 */
6097 if (!env->bypass_spec_v1 &&
6098
!sanitize_speculative_path(env, insn, *insn_idx + 1,
6099 *insn_idx))
6100 return -EFAULT;
6101 *insn_idx += insn->off;
6102 return 0;
6103 } else if (pred == 0) {
6104 /* Only follow the fall-through branch, since that's where the
6105 * program will go. If needed, push the goto branch for
6106 * simulation under speculative execution.
6107 */
6108 if (!env->bypass_spec_v1 &&
6109 !sanitize_speculative_path(env, insn,
6110 *insn_idx + insn->off + 1,
6111 *insn_idx))
6112 return -EFAULT;
6113 return 0;
6114 }
6115
6116 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6117 false);
6118 if (!other_branch)
6119 return -EFAULT;
6120 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
6121
6122 /* detect if we are comparing against a constant value so we can adjust
6123 * our min/max values for our dst register.
6124 * this is only legit if both are scalars (or pointers to the same
6125 * object, I suppose, but we don't support that right now), because
6126 * otherwise the different base pointers mean the offsets aren't
6127 * comparable.
6128 */
6129 if (BPF_SRC(insn->code) == BPF_X) {
6130 struct bpf_reg_state *src_reg = ®s[insn->src_reg];
6131 struct bpf_reg_state lo_reg0 = *dst_reg;
6132 struct bpf_reg_state lo_reg1 = *src_reg;
6133 struct bpf_reg_state *src_lo, *dst_lo;
6134
6135 dst_lo = &lo_reg0;
6136 src_lo = &lo_reg1;
6137 coerce_reg_to_size(dst_lo, 4);
6138 coerce_reg_to_size(src_lo, 4);
6139
6140 if (dst_reg->type == SCALAR_VALUE &&
6141 src_reg->type == SCALAR_VALUE) {
6142 if (tnum_is_const(src_reg->var_off) ||
6143 (is_jmp32 && tnum_is_const(src_lo->var_off)))
6144 reg_set_min_max(&other_branch_regs[insn->dst_reg],
6145 dst_reg,
6146 is_jmp32
6147 ? src_lo->var_off.value
6148 : src_reg->var_off.value,
6149 opcode, is_jmp32);
6150 else if (tnum_is_const(dst_reg->var_off) ||
6151 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
6152 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
6153 src_reg,
6154 is_jmp32
6155 ? dst_lo->var_off.value
6156 : dst_reg->var_off.value,
6157 opcode, is_jmp32);
6158 else if (!is_jmp32 &&
6159 (opcode == BPF_JEQ || opcode == BPF_JNE))
6160 /* Comparing for equality, we can combine knowledge */
6161 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6162 &other_branch_regs[insn->dst_reg],
6163 src_reg, dst_reg, opcode);
6164 }
6165 } else if (dst_reg->type == SCALAR_VALUE) {
6166 reg_set_min_max(&other_branch_regs[insn->dst_reg],
6167 dst_reg, insn->imm, opcode, is_jmp32);
6168 }
6169
6170 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6171 * NOTE: these optimizations below are related with pointer comparison
6172 * which will never be JMP32.
6173 */
6174 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
6175 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE)
&&
6176 reg_type_may_be_null(dst_reg->type)) {
6177 /* Mark all identical registers in each branch as either
6178 * safe or unknown depending R == 0 or R != 0 conditional.
6179 */
6180 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6181 opcode == BPF_JNE);
6182 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6183 opcode == BPF_JEQ);
6184 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
6185 this_branch, other_branch) &&
6186 is_pointer_value(env, insn->dst_reg)) {
6187 verbose(env, "R%d pointer comparison prohibited\n",
6188 insn->dst_reg);
6189 return -EACCES;
6190 }
6191 if (env->log.level & BPF_LOG_LEVEL)
6192 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
6193 return 0;
6194 }
6195
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org