network: do not drop duplicated entries in loop

Fixes #37456.
This commit is contained in:
Yu Watanabe
2025-05-15 06:36:55 +09:00
committed by Daan De Meyer
parent 946eea7a7f
commit 6a4fe38f7f
3 changed files with 40 additions and 11 deletions

View File

@@ -2393,16 +2393,22 @@ int address_section_verify(Address *address) {
return 0;
}
DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(
trivial_hash_ops_address_detach,
void,
trivial_hash_func,
trivial_compare_func,
Address,
address_detach);
int network_drop_invalid_addresses(Network *network) {
_cleanup_set_free_ Set *addresses = NULL;
_cleanup_set_free_ Set *addresses = NULL, *duplicated_addresses = NULL;
Address *address;
int r;
assert(network);
ORDERED_HASHMAP_FOREACH(address, network->addresses_by_section) {
Address *dup;
if (address_section_verify(address) < 0) {
/* Drop invalid [Address] sections or Address= settings in [Network].
* Note that address_detach() will drop the address from addresses_by_section. */
@@ -2411,7 +2417,7 @@ int network_drop_invalid_addresses(Network *network) {
}
/* Always use the setting specified later. So, remove the previously assigned setting. */
dup = set_remove(addresses, address);
Address *dup = set_remove(addresses, address);
if (dup) {
log_warning("%s: Duplicated address %s is specified at line %u and %u, "
"dropping the address setting specified at line %u.",
@@ -2420,8 +2426,12 @@ int network_drop_invalid_addresses(Network *network) {
address->section->line,
dup->section->line, dup->section->line);
/* address_detach() will drop the address from addresses_by_section. */
address_detach(dup);
/* Do not call address_detach() for 'dup' now, as we can remove only the current
* entry in the loop. We will drop the address from addresses_by_section later. */
r = set_ensure_put(&duplicated_addresses, &trivial_hash_ops_address_detach, dup);
if (r < 0)
return log_oom();
assert(r > 0);
}
/* Use address_hash_ops, instead of address_hash_ops_detach. Otherwise, the Address objects

View File

@@ -682,8 +682,16 @@ static int neighbor_section_verify(Neighbor *neighbor) {
return 0;
}
DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(
trivial_hash_ops_neighbor_detach,
void,
trivial_hash_func,
trivial_compare_func,
Neighbor,
neighbor_detach);
int network_drop_invalid_neighbors(Network *network) {
_cleanup_set_free_ Set *neighbors = NULL;
_cleanup_set_free_ Set *neighbors = NULL, *duplicated_neighbors = NULL;
Neighbor *neighbor;
int r;
@@ -708,8 +716,13 @@ int network_drop_invalid_neighbors(Network *network) {
IN_ADDR_TO_STRING(neighbor->dst_addr.family, &neighbor->dst_addr.address),
neighbor->section->line,
dup->section->line, dup->section->line);
/* neighbor_detach() will drop the neighbor from neighbors_by_section. */
neighbor_detach(dup);
/* Do not call nexthop_detach() for 'dup' now, as we can remove only the current
* entry in the loop. We will drop the nexthop from nexthops_by_section later. */
r = set_ensure_put(&duplicated_neighbors, &trivial_hash_ops_neighbor_detach, dup);
if (r < 0)
return log_oom();
assert(r > 0);
}
/* Use neighbor_hash_ops, instead of neighbor_hash_ops_detach. Otherwise, the Neighbor objects

View File

@@ -1279,6 +1279,7 @@ static int nexthop_section_verify(NextHop *nh) {
int network_drop_invalid_nexthops(Network *network) {
_cleanup_hashmap_free_ Hashmap *nexthops = NULL;
_cleanup_set_free_ Set *duplicated_nexthops = NULL;
NextHop *nh;
int r;
@@ -1301,8 +1302,13 @@ int network_drop_invalid_nexthops(Network *network) {
dup->section->filename,
nh->id, nh->section->line,
dup->section->line, dup->section->line);
/* nexthop_detach() will drop the nexthop from nexthops_by_section. */
nexthop_detach(dup);
/* Do not call nexthop_detach() for 'dup' now, as we can remove only the current
* entry in the loop. We will drop the nexthop from nexthops_by_section later. */
r = set_ensure_put(&duplicated_nexthops, &nexthop_hash_ops, dup);
if (r < 0)
return log_oom();
assert(r > 0);
}
r = hashmap_ensure_put(&nexthops, NULL, UINT32_TO_PTR(nh->id), nh);