tree:
https://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux.git net-next
head: aa831de1390a2d899159b5409b46ba4e67214c21
commit: a5a62545ccadd97050ea857e681e0452ffa3a574 [85/93] net: flow_dissector: Parse PTP L2
packet header
config: x86_64-randconfig-s021-20201217 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.3-184-g1b896707-dirty
#
https://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux.git/commit/?i...
git remote add saeed
https://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux.git
git fetch --no-tags saeed net-next
git checkout a5a62545ccadd97050ea857e681e0452ffa3a574
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
"sparse warnings: (new ones prefixed by >>)"
net/core/flow_dissector.c:179:43: sparse: sparse: restricted __be16 degrades to
integer
> net/core/flow_dissector.c:1265:23: sparse: sparse: invalid
assignment: +=
> net/core/flow_dissector.c:1265:23: sparse: left side has type int
> net/core/flow_dissector.c:1265:23: sparse: right side has type restricted __be16
vim +1265 net/core/flow_dissector.c
883
884 /**
885 * __skb_flow_dissect - extract the flow_keys struct and return it
886 * @net: associated network namespace, derived from @skb if NULL
887 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
888 * @flow_dissector: list of keys to dissect
889 * @target_container: target structure to put dissected values into
890 * @data: raw buffer pointer to the packet, if NULL use skb->data
891 * @proto: protocol for which to get the flow, if @data is NULL use
skb->protocol
892 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
893 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
894 * @flags: flags that control the dissection process, e.g.
895 * FLOW_DISSECTOR_F_STOP_AT_ENCAP.
896 *
897 * The function will try to retrieve individual keys into target specified
898 * by flow_dissector from either the skbuff or a raw buffer specified by the
899 * rest parameters.
900 *
901 * Caller must take care of zeroing target container memory.
902 */
903 bool __skb_flow_dissect(const struct net *net,
904 const struct sk_buff *skb,
905 struct flow_dissector *flow_dissector,
906 void *target_container,
907 void *data, __be16 proto, int nhoff, int hlen,
908 unsigned int flags)
909 {
910 struct flow_dissector_key_control *key_control;
911 struct flow_dissector_key_basic *key_basic;
912 struct flow_dissector_key_addrs *key_addrs;
913 struct flow_dissector_key_tags *key_tags;
914 struct flow_dissector_key_vlan *key_vlan;
915 enum flow_dissect_ret fdret;
916 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
917 bool mpls_el = false;
918 int mpls_lse = 0;
919 int num_hdrs = 0;
920 u8 ip_proto = 0;
921 bool ret;
922
923 if (!data) {
924 data = skb->data;
925 proto = skb_vlan_tag_present(skb) ?
926 skb->vlan_proto : skb->protocol;
927 nhoff = skb_network_offset(skb);
928 hlen = skb_headlen(skb);
929 #if IS_ENABLED(CONFIG_NET_DSA)
930 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
931 proto == htons(ETH_P_XDSA))) {
932 const struct dsa_device_ops *ops;
933 int offset = 0;
934
935 ops = skb->dev->dsa_ptr->tag_ops;
936 /* Tail taggers don't break flow dissection */
937 if (!ops->tail_tag) {
938 if (ops->flow_dissect)
939 ops->flow_dissect(skb, &proto, &offset);
940 else
941 dsa_tag_generic_flow_dissect(skb,
942 &proto,
943 &offset);
944 hlen -= offset;
945 nhoff += offset;
946 }
947 }
948 #endif
949 }
950
951 /* It is ensured by skb_flow_dissector_init() that control key will
952 * be always present.
953 */
954 key_control = skb_flow_dissector_target(flow_dissector,
955 FLOW_DISSECTOR_KEY_CONTROL,
956 target_container);
957
958 /* It is ensured by skb_flow_dissector_init() that basic key will
959 * be always present.
960 */
961 key_basic = skb_flow_dissector_target(flow_dissector,
962 FLOW_DISSECTOR_KEY_BASIC,
963 target_container);
964
965 if (skb) {
966 if (!net) {
967 if (skb->dev)
968 net = dev_net(skb->dev);
969 else if (skb->sk)
970 net = sock_net(skb->sk);
971 }
972 }
973
974 WARN_ON_ONCE(!net);
975 if (net) {
976 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
977 struct bpf_prog_array *run_array;
978
979 rcu_read_lock();
980 run_array = rcu_dereference(init_net.bpf.run_array[type]);
981 if (!run_array)
982 run_array = rcu_dereference(net->bpf.run_array[type]);
983
984 if (run_array) {
985 struct bpf_flow_keys flow_keys;
986 struct bpf_flow_dissector ctx = {
987 .flow_keys = &flow_keys,
988 .data = data,
989 .data_end = data + hlen,
990 };
991 __be16 n_proto = proto;
992 struct bpf_prog *prog;
993
994 if (skb) {
995 ctx.skb = skb;
996 /* we can't use 'proto' in the skb case
997 * because it might be set to skb->vlan_proto
998 * which has been pulled from the data
999 */
1000 n_proto = skb->protocol;
1001 }
1002
1003 prog = READ_ONCE(run_array->items[0].prog);
1004 ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
1005 hlen, flags);
1006 __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
1007 target_container);
1008 rcu_read_unlock();
1009 return ret;
1010 }
1011 rcu_read_unlock();
1012 }
1013
1014 if (dissector_uses_key(flow_dissector,
1015 FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1016 struct ethhdr *eth = eth_hdr(skb);
1017 struct flow_dissector_key_eth_addrs *key_eth_addrs;
1018
1019 key_eth_addrs = skb_flow_dissector_target(flow_dissector,
1020 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1021 target_container);
1022 memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
1023 }
1024
1025 proto_again:
1026 fdret = FLOW_DISSECT_RET_CONTINUE;
1027
1028 switch (proto) {
1029 case htons(ETH_P_IP): {
1030 const struct iphdr *iph;
1031 struct iphdr _iph;
1032
1033 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1034 if (!iph || iph->ihl < 5) {
1035 fdret = FLOW_DISSECT_RET_OUT_BAD;
1036 break;
1037 }
1038
1039 nhoff += iph->ihl * 4;
1040
1041 ip_proto = iph->protocol;
1042
1043 if (dissector_uses_key(flow_dissector,
1044 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1045 key_addrs = skb_flow_dissector_target(flow_dissector,
1046 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1047 target_container);
1048
1049 memcpy(&key_addrs->v4addrs, &iph->saddr,
1050 sizeof(key_addrs->v4addrs));
1051 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1052 }
1053
1054 if (ip_is_fragment(iph)) {
1055 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1056
1057 if (iph->frag_off & htons(IP_OFFSET)) {
1058 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1059 break;
1060 } else {
1061 key_control->flags |= FLOW_DIS_FIRST_FRAG;
1062 if (!(flags &
1063 FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
1064 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1065 break;
1066 }
1067 }
1068 }
1069
1070 __skb_flow_dissect_ipv4(skb, flow_dissector,
1071 target_container, data, iph);
1072
1073 break;
1074 }
1075 case htons(ETH_P_IPV6): {
1076 const struct ipv6hdr *iph;
1077 struct ipv6hdr _iph;
1078
1079 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1080 if (!iph) {
1081 fdret = FLOW_DISSECT_RET_OUT_BAD;
1082 break;
1083 }
1084
1085 ip_proto = iph->nexthdr;
1086 nhoff += sizeof(struct ipv6hdr);
1087
1088 if (dissector_uses_key(flow_dissector,
1089 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1090 key_addrs = skb_flow_dissector_target(flow_dissector,
1091 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1092 target_container);
1093
1094 memcpy(&key_addrs->v6addrs, &iph->saddr,
1095 sizeof(key_addrs->v6addrs));
1096 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1097 }
1098
1099 if ((dissector_uses_key(flow_dissector,
1100 FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
1101 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
1102 ip6_flowlabel(iph)) {
1103 __be32 flow_label = ip6_flowlabel(iph);
1104
1105 if (dissector_uses_key(flow_dissector,
1106 FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
1107 key_tags = skb_flow_dissector_target(flow_dissector,
1108 FLOW_DISSECTOR_KEY_FLOW_LABEL,
1109 target_container);
1110 key_tags->flow_label = ntohl(flow_label);
1111 }
1112 if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
1113 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1114 break;
1115 }
1116 }
1117
1118 __skb_flow_dissect_ipv6(skb, flow_dissector,
1119 target_container, data, iph);
1120
1121 break;
1122 }
1123 case htons(ETH_P_8021AD):
1124 case htons(ETH_P_8021Q): {
1125 const struct vlan_hdr *vlan = NULL;
1126 struct vlan_hdr _vlan;
1127 __be16 saved_vlan_tpid = proto;
1128
1129 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
1130 skb && skb_vlan_tag_present(skb)) {
1131 proto = skb->protocol;
1132 } else {
1133 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
1134 data, hlen, &_vlan);
1135 if (!vlan) {
1136 fdret = FLOW_DISSECT_RET_OUT_BAD;
1137 break;
1138 }
1139
1140 proto = vlan->h_vlan_encapsulated_proto;
1141 nhoff += sizeof(*vlan);
1142 }
1143
1144 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
1145 dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
1146 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
1147 dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
1148 } else {
1149 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1150 break;
1151 }
1152
1153 if (dissector_uses_key(flow_dissector, dissector_vlan)) {
1154 key_vlan = skb_flow_dissector_target(flow_dissector,
1155 dissector_vlan,
1156 target_container);
1157
1158 if (!vlan) {
1159 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
1160 key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
1161 } else {
1162 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
1163 VLAN_VID_MASK;
1164 key_vlan->vlan_priority =
1165 (ntohs(vlan->h_vlan_TCI) &
1166 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1167 }
1168 key_vlan->vlan_tpid = saved_vlan_tpid;
1169 }
1170
1171 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1172 break;
1173 }
1174 case htons(ETH_P_PPP_SES): {
1175 struct {
1176 struct pppoe_hdr hdr;
1177 __be16 proto;
1178 } *hdr, _hdr;
1179 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
1180 if (!hdr) {
1181 fdret = FLOW_DISSECT_RET_OUT_BAD;
1182 break;
1183 }
1184
1185 proto = hdr->proto;
1186 nhoff += PPPOE_SES_HLEN;
1187 switch (proto) {
1188 case htons(PPP_IP):
1189 proto = htons(ETH_P_IP);
1190 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1191 break;
1192 case htons(PPP_IPV6):
1193 proto = htons(ETH_P_IPV6);
1194 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1195 break;
1196 default:
1197 fdret = FLOW_DISSECT_RET_OUT_BAD;
1198 break;
1199 }
1200 break;
1201 }
1202 case htons(ETH_P_TIPC): {
1203 struct tipc_basic_hdr *hdr, _hdr;
1204
1205 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
1206 data, hlen, &_hdr);
1207 if (!hdr) {
1208 fdret = FLOW_DISSECT_RET_OUT_BAD;
1209 break;
1210 }
1211
1212 if (dissector_uses_key(flow_dissector,
1213 FLOW_DISSECTOR_KEY_TIPC)) {
1214 key_addrs = skb_flow_dissector_target(flow_dissector,
1215 FLOW_DISSECTOR_KEY_TIPC,
1216 target_container);
1217 key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
1218 key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
1219 }
1220 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1221 break;
1222 }
1223
1224 case htons(ETH_P_MPLS_UC):
1225 case htons(ETH_P_MPLS_MC):
1226 fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
1227 target_container, data,
1228 nhoff, hlen, mpls_lse,
1229 &mpls_el);
1230 nhoff += sizeof(struct mpls_label);
1231 mpls_lse++;
1232 break;
1233 case htons(ETH_P_FCOE):
1234 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
1235 fdret = FLOW_DISSECT_RET_OUT_BAD;
1236 break;
1237 }
1238
1239 nhoff += FCOE_HEADER_LEN;
1240 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1241 break;
1242
1243 case htons(ETH_P_ARP):
1244 case htons(ETH_P_RARP):
1245 fdret = __skb_flow_dissect_arp(skb, flow_dissector,
1246 target_container, data,
1247 nhoff, hlen);
1248 break;
1249
1250 case htons(ETH_P_BATMAN):
1251 fdret = __skb_flow_dissect_batadv(skb, key_control, data,
1252 &proto, &nhoff, hlen, flags);
1253 break;
1254
1255 case htons(ETH_P_1588): {
1256 struct ptp_header *hdr, _hdr;
1257
1258 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
1259 hlen, &_hdr);
1260 if (!hdr || (hlen - nhoff) < sizeof(_hdr)) {
1261 fdret = FLOW_DISSECT_RET_OUT_BAD;
1262 break;
1263 }
1264
1265 nhoff += hdr->message_length;
1266 fdret =
FLOW_DISSECT_RET_OUT_GOOD;
1267 break;
1268 }
1269
1270 default:
1271 fdret = FLOW_DISSECT_RET_OUT_BAD;
1272 break;
1273 }
1274
1275 /* Process result of proto processing */
1276 switch (fdret) {
1277 case FLOW_DISSECT_RET_OUT_GOOD:
1278 goto out_good;
1279 case FLOW_DISSECT_RET_PROTO_AGAIN:
1280 if (skb_flow_dissect_allowed(&num_hdrs))
1281 goto proto_again;
1282 goto out_good;
1283 case FLOW_DISSECT_RET_CONTINUE:
1284 case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1285 break;
1286 case FLOW_DISSECT_RET_OUT_BAD:
1287 default:
1288 goto out_bad;
1289 }
1290
1291 ip_proto_again:
1292 fdret = FLOW_DISSECT_RET_CONTINUE;
1293
1294 switch (ip_proto) {
1295 case IPPROTO_GRE:
1296 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
1297 target_container, data,
1298 &proto, &nhoff, &hlen, flags);
1299 break;
1300
1301 case NEXTHDR_HOP:
1302 case NEXTHDR_ROUTING:
1303 case NEXTHDR_DEST: {
1304 u8 _opthdr[2], *opthdr;
1305
1306 if (proto != htons(ETH_P_IPV6))
1307 break;
1308
1309 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
1310 data, hlen, &_opthdr);
1311 if (!opthdr) {
1312 fdret = FLOW_DISSECT_RET_OUT_BAD;
1313 break;
1314 }
1315
1316 ip_proto = opthdr[0];
1317 nhoff += (opthdr[1] + 1) << 3;
1318
1319 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1320 break;
1321 }
1322 case NEXTHDR_FRAGMENT: {
1323 struct frag_hdr _fh, *fh;
1324
1325 if (proto != htons(ETH_P_IPV6))
1326 break;
1327
1328 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
1329 data, hlen, &_fh);
1330
1331 if (!fh) {
1332 fdret = FLOW_DISSECT_RET_OUT_BAD;
1333 break;
1334 }
1335
1336 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1337
1338 nhoff += sizeof(_fh);
1339 ip_proto = fh->nexthdr;
1340
1341 if (!(fh->frag_off & htons(IP6_OFFSET))) {
1342 key_control->flags |= FLOW_DIS_FIRST_FRAG;
1343 if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
1344 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1345 break;
1346 }
1347 }
1348
1349 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1350 break;
1351 }
1352 case IPPROTO_IPIP:
1353 proto = htons(ETH_P_IP);
1354
1355 key_control->flags |= FLOW_DIS_ENCAPSULATION;
1356 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1357 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1358 break;
1359 }
1360
1361 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1362 break;
1363
1364 case IPPROTO_IPV6:
1365 proto = htons(ETH_P_IPV6);
1366
1367 key_control->flags |= FLOW_DIS_ENCAPSULATION;
1368 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1369 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1370 break;
1371 }
1372
1373 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1374 break;
1375
1376
1377 case IPPROTO_MPLS:
1378 proto = htons(ETH_P_MPLS_UC);
1379 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1380 break;
1381
1382 case IPPROTO_TCP:
1383 __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1384 data, nhoff, hlen);
1385 break;
1386
1387 case IPPROTO_ICMP:
1388 case IPPROTO_ICMPV6:
1389 __skb_flow_dissect_icmp(skb, flow_dissector, target_container,
1390 data, nhoff, hlen);
1391 break;
1392
1393 default:
1394 break;
1395 }
1396
1397 if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
1398 __skb_flow_dissect_ports(skb, flow_dissector, target_container,
1399 data, nhoff, ip_proto, hlen);
1400
1401 /* Process result of IP proto processing */
1402 switch (fdret) {
1403 case FLOW_DISSECT_RET_PROTO_AGAIN:
1404 if (skb_flow_dissect_allowed(&num_hdrs))
1405 goto proto_again;
1406 break;
1407 case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1408 if (skb_flow_dissect_allowed(&num_hdrs))
1409 goto ip_proto_again;
1410 break;
1411 case FLOW_DISSECT_RET_OUT_GOOD:
1412 case FLOW_DISSECT_RET_CONTINUE:
1413 break;
1414 case FLOW_DISSECT_RET_OUT_BAD:
1415 default:
1416 goto out_bad;
1417 }
1418
1419 out_good:
1420 ret = true;
1421
1422 out:
1423 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1424 key_basic->n_proto = proto;
1425 key_basic->ip_proto = ip_proto;
1426
1427 return ret;
1428
1429 out_bad:
1430 ret = false;
1431 goto out;
1432 }
1433 EXPORT_SYMBOL(__skb_flow_dissect);
1434
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org