From 118800c1090b0c3fe99cf674c80d3233f0b55731 Mon Sep 17 00:00:00 2001 From: "Mohamed S. Mahmoud" Date: Fri, 13 Sep 2024 12:13:37 -0400 Subject: [PATCH] NETOBSERV-1675: network events agent packages update (#712) Signed-off-by: Mohamed Mahmoud --- go.mod | 32 +- go.sum | 107 +- vendor/github.com/cenkalti/hub/.gitignore | 22 + vendor/github.com/cenkalti/hub/.travis.yml | 3 + vendor/github.com/cenkalti/hub/LICENSE | 20 + vendor/github.com/cenkalti/hub/README.md | 5 + vendor/github.com/cenkalti/hub/hub.go | 82 + vendor/github.com/cenkalti/rpc2/.gitignore | 23 + vendor/github.com/cenkalti/rpc2/.travis.yml | 9 + vendor/github.com/cenkalti/rpc2/LICENSE | 21 + vendor/github.com/cenkalti/rpc2/README.md | 82 + vendor/github.com/cenkalti/rpc2/client.go | 364 +++ vendor/github.com/cenkalti/rpc2/codec.go | 125 + vendor/github.com/cenkalti/rpc2/debug.go | 12 + .../cenkalti/rpc2/jsonrpc/jsonrpc.go | 226 ++ vendor/github.com/cenkalti/rpc2/server.go | 181 ++ vendor/github.com/cenkalti/rpc2/state.go | 25 + .../containernetworking/cni/LICENSE | 202 ++ .../containernetworking/cni/libcni/api.go | 679 +++++ .../containernetworking/cni/libcni/conf.go | 270 ++ .../cni/pkg/invoke/args.go | 128 + .../cni/pkg/invoke/delegate.go | 80 + .../cni/pkg/invoke/exec.go | 187 ++ .../cni/pkg/invoke/find.go | 48 + .../cni/pkg/invoke/os_unix.go | 20 + .../cni/pkg/invoke/os_windows.go | 18 + .../cni/pkg/invoke/raw_exec.go | 88 + .../cni/pkg/types/020/types.go | 189 ++ .../cni/pkg/types/040/types.go | 306 ++ .../cni/pkg/types/100/types.go | 307 ++ .../containernetworking/cni/pkg/types/args.go | 122 + .../cni/pkg/types/create/create.go | 56 + .../cni/pkg/types/internal/convert.go | 92 + .../cni/pkg/types/internal/create.go | 66 + .../cni/pkg/types/types.go | 234 ++ .../cni/pkg/utils/utils.go | 84 + .../cni/pkg/version/conf.go | 26 + .../cni/pkg/version/plugin.go | 144 + .../cni/pkg/version/reconcile.go | 49 + .../cni/pkg/version/version.go | 89 + .../containernetworking/plugins/LICENSE | 201 ++ .../plugins/pkg/ip/addr_linux.go | 68 + .../plugins/pkg/ip/cidr.go | 105 + .../containernetworking/plugins/pkg/ip/ip.go | 105 + .../plugins/pkg/ip/ipforward_linux.go | 62 + .../plugins/pkg/ip/ipmasq_linux.go | 126 + .../plugins/pkg/ip/link_linux.go | 261 ++ .../plugins/pkg/ip/route_linux.go | 52 + .../plugins/pkg/ip/utils_linux.go | 116 + .../plugins/pkg/ns/README.md | 41 + .../plugins/pkg/ns/ns_linux.go | 234 ++ .../plugins/pkg/utils/sysctl/sysctl_linux.go | 78 + vendor/github.com/coreos/go-iptables/LICENSE | 191 ++ vendor/github.com/coreos/go-iptables/NOTICE | 5 + .../coreos/go-iptables/iptables/iptables.go | 680 +++++ .../coreos/go-iptables/iptables/lock.go | 84 + .../cpuguy83/go-md2man/v2/LICENSE.md | 21 + .../cpuguy83/go-md2man/v2/md2man/md2man.go | 16 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 348 +++ .../pkg/decode/decode_protobuf.go | 11 + .../pkg/ebpf/bpf_arm64_bpfel.go | 83 +- .../pkg/ebpf/bpf_arm64_bpfel.o | Bin 126648 -> 150336 bytes .../pkg/ebpf/bpf_powerpc_bpfel.go | 83 +- .../pkg/ebpf/bpf_powerpc_bpfel.o | Bin 125880 -> 149528 bytes .../pkg/ebpf/bpf_s390_bpfeb.go | 83 +- .../pkg/ebpf/bpf_s390_bpfeb.o | Bin 141768 -> 165184 bytes .../pkg/ebpf/bpf_x86_bpfel.go | 83 +- .../pkg/ebpf/bpf_x86_bpfel.o | Bin 126448 -> 150088 bytes .../netobserv-ebpf-agent/pkg/ebpf/tracer.go | 596 ++-- .../netobserv-ebpf-agent/pkg/flow/deduper.go | 30 +- .../netobserv-ebpf-agent/pkg/flow/record.go | 38 +- .../pkg/flow/tracer_map.go | 6 +- .../pkg/ifaces/informer.go | 2 +- .../pkg/ifaces/watcher.go | 3 + .../pkg/metrics/metrics.go | 9 + .../pkg/pbflow/flow.pb.go | 73 +- .../netobserv-ebpf-agent/pkg/pbflow/proto.go | 33 +- vendor/github.com/ovn-org/libovsdb/LICENSE | 202 ++ vendor/github.com/ovn-org/libovsdb/NOTICE | 13 + .../ovn-org/libovsdb/cache/cache.go | 1284 ++++++++ .../github.com/ovn-org/libovsdb/cache/doc.go | 16 + .../ovn-org/libovsdb/cache/uuidset.go | 101 + .../github.com/ovn-org/libovsdb/client/api.go | 593 ++++ .../ovn-org/libovsdb/client/api_test_model.go | 167 ++ .../ovn-org/libovsdb/client/client.go | 1480 +++++++++ .../ovn-org/libovsdb/client/condition.go | 248 ++ .../ovn-org/libovsdb/client/config.go | 27 + .../github.com/ovn-org/libovsdb/client/doc.go | 164 + .../ovn-org/libovsdb/client/metrics.go | 88 + .../ovn-org/libovsdb/client/monitor.go | 136 + .../ovn-org/libovsdb/client/options.go | 164 + .../ovn-org/libovsdb/database/database.go | 33 + .../ovn-org/libovsdb/database/doc.go | 5 + .../ovn-org/libovsdb/database/references.go | 71 + .../ovn-org/libovsdb/mapper/info.go | 179 ++ .../ovn-org/libovsdb/mapper/mapper.go | 317 ++ .../ovn-org/libovsdb/model/client.go | 171 ++ .../ovn-org/libovsdb/model/database.go | 118 + .../ovn-org/libovsdb/model/model.go | 130 + .../ovn-org/libovsdb/ovsdb/bindings.go | 427 +++ .../ovn-org/libovsdb/ovsdb/condition.go | 223 ++ .../ovn-org/libovsdb/ovsdb/error.go | 373 +++ .../github.com/ovn-org/libovsdb/ovsdb/map.go | 92 + .../ovn-org/libovsdb/ovsdb/monitor_select.go | 88 + .../ovn-org/libovsdb/ovsdb/mutation.go | 87 + .../ovn-org/libovsdb/ovsdb/named_uuid.go | 165 + .../ovn-org/libovsdb/ovsdb/notation.go | 129 + .../github.com/ovn-org/libovsdb/ovsdb/row.go | 26 + .../github.com/ovn-org/libovsdb/ovsdb/rpc.go | 79 + .../ovn-org/libovsdb/ovsdb/schema.go | 641 ++++ .../libovsdb/ovsdb/serverdb/.gitignore | 1 + .../libovsdb/ovsdb/serverdb/database.go | 182 ++ .../ovn-org/libovsdb/ovsdb/serverdb/gen.go | 6 + .../ovn-org/libovsdb/ovsdb/serverdb/model.go | 99 + .../github.com/ovn-org/libovsdb/ovsdb/set.go | 109 + .../ovn-org/libovsdb/ovsdb/update3.go | 51 + .../ovn-org/libovsdb/ovsdb/updates.go | 35 + .../ovn-org/libovsdb/ovsdb/updates2.go | 19 + .../github.com/ovn-org/libovsdb/ovsdb/uuid.go | 59 + .../ovn-org/libovsdb/updates/difference.go | 209 ++ .../ovn-org/libovsdb/updates/doc.go | 15 + .../ovn-org/libovsdb/updates/merge.go | 160 + .../ovn-org/libovsdb/updates/mutate.go | 297 ++ .../ovn-org/libovsdb/updates/references.go | 797 +++++ .../ovn-org/libovsdb/updates/updates.go | 528 ++++ .../ovn-kubernetes/go-controller/LICENSE | 202 ++ .../observability-lib/ovsdb/.gitignore | 1 + .../observability-lib/ovsdb/bridge.go | 570 ++++ .../ovsdb/flow_sample_collector_set.go | 143 + .../observability-lib/ovsdb/gen.go | 3 + .../observability-lib/ovsdb/observ_model.go | 11 + .../sampledecoder/db_client.go | 118 + .../sampledecoder/sample_decoder.go | 290 ++ .../go-controller/pkg/cni/types/types.go | 89 + .../go-controller/pkg/config/cni.go | 173 ++ .../go-controller/pkg/config/config.go | 2662 +++++++++++++++++ .../go-controller/pkg/config/utils.go | 345 +++ .../pkg/cryptorand/cryptorand.go | 47 + .../go-controller/pkg/libovsdb/ops/acl.go | 181 ++ .../pkg/libovsdb/ops/address_set.go | 231 ++ .../go-controller/pkg/libovsdb/ops/chassis.go | 166 + .../go-controller/pkg/libovsdb/ops/copp.go | 47 + .../pkg/libovsdb/ops/db_object_ids.go | 332 ++ .../pkg/libovsdb/ops/db_object_types.go | 328 ++ .../go-controller/pkg/libovsdb/ops/dhcp.go | 84 + .../go-controller/pkg/libovsdb/ops/lbgroup.go | 88 + .../pkg/libovsdb/ops/loadbalancer.go | 151 + .../pkg/libovsdb/ops/mac_binding.go | 61 + .../go-controller/pkg/libovsdb/ops/meter.go | 66 + .../go-controller/pkg/libovsdb/ops/model.go | 516 ++++ .../pkg/libovsdb/ops/model_client.go | 511 ++++ .../pkg/libovsdb/ops/named_uuid.go | 28 + .../pkg/libovsdb/ops/nb_global.go | 64 + .../pkg/libovsdb/ops/portbinding.go | 53 + .../pkg/libovsdb/ops/portgroup.go | 329 ++ .../go-controller/pkg/libovsdb/ops/qos.go | 119 + .../go-controller/pkg/libovsdb/ops/router.go | 1189 ++++++++ .../go-controller/pkg/libovsdb/ops/sample.go | 221 ++ .../pkg/libovsdb/ops/sb_global.go | 27 + .../go-controller/pkg/libovsdb/ops/switch.go | 481 +++ .../pkg/libovsdb/ops/template_var.go | 112 + .../pkg/libovsdb/ops/transact.go | 98 + .../go-controller/pkg/nbdb/.gitignore | 1 + .../go-controller/pkg/nbdb/acl.go | 303 ++ .../go-controller/pkg/nbdb/address_set.go | 118 + .../go-controller/pkg/nbdb/bfd.go | 237 ++ .../pkg/nbdb/chassis_template_var.go | 120 + .../go-controller/pkg/nbdb/connection.go | 209 ++ .../go-controller/pkg/nbdb/copp.go | 120 + .../go-controller/pkg/nbdb/dhcp_options.go | 120 + .../go-controller/pkg/nbdb/dhcp_relay.go | 145 + .../go-controller/pkg/nbdb/dns.go | 147 + .../pkg/nbdb/forwarding_group.go | 136 + .../go-controller/pkg/nbdb/gateway_chassis.go | 132 + .../go-controller/pkg/nbdb/gen.go | 3 + .../go-controller/pkg/nbdb/ha_chassis.go | 93 + .../pkg/nbdb/ha_chassis_group.go | 118 + .../go-controller/pkg/nbdb/load_balancer.go | 290 ++ .../pkg/nbdb/load_balancer_group.go | 85 + .../pkg/nbdb/load_balancer_health_check.go | 120 + .../go-controller/pkg/nbdb/logical_router.go | 356 +++ .../pkg/nbdb/logical_router_policy.go | 229 ++ .../pkg/nbdb/logical_router_port.go | 385 +++ .../pkg/nbdb/logical_router_static_route.go | 216 ++ .../go-controller/pkg/nbdb/logical_switch.go | 362 +++ .../pkg/nbdb/logical_switch_port.go | 444 +++ .../go-controller/pkg/nbdb/meter.go | 158 + .../go-controller/pkg/nbdb/meter_band.go | 107 + .../go-controller/pkg/nbdb/mirror.go | 125 + .../go-controller/pkg/nbdb/model.go | 2262 ++++++++++++++ .../go-controller/pkg/nbdb/nat.go | 285 ++ .../go-controller/pkg/nbdb/nb_global.go | 218 ++ .../go-controller/pkg/nbdb/port_group.go | 149 + .../go-controller/pkg/nbdb/qos.go | 180 ++ .../go-controller/pkg/nbdb/sample.go | 85 + .../pkg/nbdb/sample_collector.go | 105 + .../go-controller/pkg/nbdb/sampling_app.go | 103 + .../go-controller/pkg/nbdb/ssl.go | 117 + .../pkg/nbdb/static_mac_binding.go | 72 + .../pkg/observability/observability.go | 303 ++ .../go-controller/pkg/sbdb/.gitignore | 1 + .../go-controller/pkg/sbdb/address_set.go | 85 + .../go-controller/pkg/sbdb/bfd.go | 179 ++ .../go-controller/pkg/sbdb/chassis.go | 225 ++ .../go-controller/pkg/sbdb/chassis_private.go | 124 + .../pkg/sbdb/chassis_template_var.go | 87 + .../go-controller/pkg/sbdb/connection.go | 221 ++ .../pkg/sbdb/controller_event.go | 126 + .../pkg/sbdb/datapath_binding.go | 118 + .../go-controller/pkg/sbdb/dhcp_options.go | 82 + .../go-controller/pkg/sbdb/dhcpv6_options.go | 77 + .../go-controller/pkg/sbdb/dns.go | 178 ++ .../go-controller/pkg/sbdb/encap.go | 109 + .../go-controller/pkg/sbdb/fdb.go | 72 + .../go-controller/pkg/sbdb/gateway_chassis.go | 151 + .../go-controller/pkg/sbdb/gen.go | 3 + .../go-controller/pkg/sbdb/ha_chassis.go | 112 + .../pkg/sbdb/ha_chassis_group.go | 149 + .../go-controller/pkg/sbdb/igmp_group.go | 147 + .../go-controller/pkg/sbdb/ip_multicast.go | 228 ++ .../go-controller/pkg/sbdb/load_balancer.go | 294 ++ .../pkg/sbdb/logical_dp_group.go | 79 + .../go-controller/pkg/sbdb/logical_flow.go | 253 ++ .../go-controller/pkg/sbdb/mac_binding.go | 78 + .../go-controller/pkg/sbdb/meter.go | 100 + .../go-controller/pkg/sbdb/meter_band.go | 74 + .../go-controller/pkg/sbdb/mirror.go | 125 + .../go-controller/pkg/sbdb/model.go | 1884 ++++++++++++ .../go-controller/pkg/sbdb/multicast_group.go | 97 + .../go-controller/pkg/sbdb/port_binding.go | 586 ++++ .../go-controller/pkg/sbdb/port_group.go | 85 + .../go-controller/pkg/sbdb/rbac_permission.go | 122 + .../go-controller/pkg/sbdb/rbac_role.go | 87 + .../go-controller/pkg/sbdb/sb_global.go | 182 ++ .../go-controller/pkg/sbdb/service_monitor.go | 213 ++ .../go-controller/pkg/sbdb/ssl.go | 117 + .../pkg/sbdb/static_mac_binding.go | 78 + .../go-controller/pkg/types/const.go | 245 ++ .../go-controller/pkg/types/errors.go | 44 + .../pkg/types/resource_status.go | 21 + .../russross/blackfriday/v2/.gitignore | 8 + .../russross/blackfriday/v2/.travis.yml | 17 + .../russross/blackfriday/v2/LICENSE.txt | 29 + .../russross/blackfriday/v2/README.md | 335 +++ .../russross/blackfriday/v2/block.go | 1612 ++++++++++ .../github.com/russross/blackfriday/v2/doc.go | 46 + .../russross/blackfriday/v2/entities.go | 2236 ++++++++++++++ .../github.com/russross/blackfriday/v2/esc.go | 70 + .../russross/blackfriday/v2/html.go | 952 ++++++ .../russross/blackfriday/v2/inline.go | 1228 ++++++++ .../russross/blackfriday/v2/markdown.go | 950 ++++++ .../russross/blackfriday/v2/node.go | 360 +++ .../russross/blackfriday/v2/smartypants.go | 457 +++ vendor/github.com/safchain/ethtool/.gitignore | 27 + .../github.com/safchain/ethtool/.golangci.yml | 14 + vendor/github.com/safchain/ethtool/.yamllint | 7 + vendor/github.com/safchain/ethtool/LICENSE | 202 ++ vendor/github.com/safchain/ethtool/Makefile | 4 + vendor/github.com/safchain/ethtool/README.md | 55 + vendor/github.com/safchain/ethtool/ethtool.go | 1012 +++++++ .../safchain/ethtool/ethtool_cmd.go | 208 ++ .../safchain/ethtool/ethtool_msglvl.go | 114 + vendor/github.com/urfave/cli/v2/.flake8 | 2 + vendor/github.com/urfave/cli/v2/.gitignore | 7 + .../urfave/cli/v2/CODE_OF_CONDUCT.md | 74 + vendor/github.com/urfave/cli/v2/LICENSE | 21 + vendor/github.com/urfave/cli/v2/README.md | 66 + vendor/github.com/urfave/cli/v2/app.go | 542 ++++ vendor/github.com/urfave/cli/v2/args.go | 54 + vendor/github.com/urfave/cli/v2/category.go | 79 + vendor/github.com/urfave/cli/v2/cli.go | 23 + vendor/github.com/urfave/cli/v2/command.go | 301 ++ vendor/github.com/urfave/cli/v2/context.go | 273 ++ vendor/github.com/urfave/cli/v2/docs.go | 148 + vendor/github.com/urfave/cli/v2/errors.go | 131 + vendor/github.com/urfave/cli/v2/fish.go | 192 ++ vendor/github.com/urfave/cli/v2/flag.go | 388 +++ vendor/github.com/urfave/cli/v2/flag_bool.go | 106 + .../github.com/urfave/cli/v2/flag_duration.go | 105 + .../github.com/urfave/cli/v2/flag_float64.go | 106 + .../urfave/cli/v2/flag_float64_slice.go | 163 + .../github.com/urfave/cli/v2/flag_generic.go | 108 + vendor/github.com/urfave/cli/v2/flag_int.go | 106 + vendor/github.com/urfave/cli/v2/flag_int64.go | 105 + .../urfave/cli/v2/flag_int64_slice.go | 159 + .../urfave/cli/v2/flag_int_slice.go | 173 ++ vendor/github.com/urfave/cli/v2/flag_path.go | 95 + .../github.com/urfave/cli/v2/flag_string.go | 95 + .../urfave/cli/v2/flag_string_slice.go | 171 ++ .../urfave/cli/v2/flag_timestamp.go | 152 + vendor/github.com/urfave/cli/v2/flag_uint.go | 105 + .../github.com/urfave/cli/v2/flag_uint64.go | 105 + vendor/github.com/urfave/cli/v2/funcs.go | 44 + vendor/github.com/urfave/cli/v2/help.go | 368 +++ vendor/github.com/urfave/cli/v2/parse.go | 94 + vendor/github.com/urfave/cli/v2/sort.go | 29 + vendor/github.com/urfave/cli/v2/template.go | 120 + .../github.com/vishvananda/netlink/.gitignore | 1 + .../vishvananda/netlink/.travis.yml | 19 - .../github.com/vishvananda/netlink/README.md | 2 +- vendor/github.com/vishvananda/netlink/addr.go | 1 + .../vishvananda/netlink/addr_linux.go | 98 +- .../vishvananda/netlink/bpf_linux.go | 24 + .../vishvananda/netlink/bridge_linux.go | 45 +- .../github.com/vishvananda/netlink/chain.go | 22 + .../vishvananda/netlink/chain_linux.go | 112 + .../github.com/vishvananda/netlink/class.go | 58 +- .../vishvananda/netlink/class_linux.go | 40 +- .../vishvananda/netlink/conntrack_linux.go | 612 +++- .../netlink/conntrack_unspecified.go | 19 + .../vishvananda/netlink/devlink_linux.go | 897 +++++- .../github.com/vishvananda/netlink/filter.go | 184 +- .../vishvananda/netlink/filter_linux.go | 535 +++- .../vishvananda/netlink/handle_linux.go | 45 +- .../vishvananda/netlink/handle_unspecified.go | 32 +- .../vishvananda/netlink/inet_diag.go | 40 + .../vishvananda/netlink/ipset_linux.go | 581 ++++ vendor/github.com/vishvananda/netlink/link.go | 439 ++- .../vishvananda/netlink/link_linux.go | 1087 ++++++- .../github.com/vishvananda/netlink/neigh.go | 1 + .../vishvananda/netlink/neigh_linux.go | 85 +- .../netlink/netlink_unspecified.go | 54 +- .../vishvananda/netlink/netns_linux.go | 6 +- .../vishvananda/netlink/nl/addr_linux.go | 14 +- .../vishvananda/netlink/nl/conntrack_linux.go | 45 +- .../vishvananda/netlink/nl/devlink_linux.go | 118 +- .../vishvananda/netlink/nl/ip6tnl_linux.go | 21 + .../vishvananda/netlink/nl/ipset_linux.go | 227 ++ .../vishvananda/netlink/nl/link_linux.go | 216 +- .../vishvananda/netlink/nl/lwt_linux.go | 29 + .../vishvananda/netlink/nl/nl_linux.go | 346 ++- .../netlink/nl/parse_attr_linux.go | 79 + .../vishvananda/netlink/nl/rdma_link_linux.go | 4 + .../vishvananda/netlink/nl/route_linux.go | 4 +- .../vishvananda/netlink/nl/seg6_linux.go | 4 +- .../vishvananda/netlink/nl/seg6local_linux.go | 4 + .../vishvananda/netlink/nl/syscall.go | 10 +- .../vishvananda/netlink/nl/tc_linux.go | 747 ++++- .../vishvananda/netlink/nl/vdpa_linux.go | 41 + .../vishvananda/netlink/nl/xfrm_linux.go | 10 +- .../netlink/nl/xfrm_state_linux.go | 29 +- .../vishvananda/netlink/proc_event_linux.go | 208 ++ .../vishvananda/netlink/protinfo.go | 24 +- .../vishvananda/netlink/protinfo_linux.go | 4 + .../github.com/vishvananda/netlink/qdisc.go | 92 +- .../vishvananda/netlink/qdisc_linux.go | 148 +- .../vishvananda/netlink/rdma_link_linux.go | 97 +- .../github.com/vishvananda/netlink/route.go | 94 +- .../vishvananda/netlink/route_linux.go | 946 +++++- .../vishvananda/netlink/route_unspecified.go | 10 + vendor/github.com/vishvananda/netlink/rule.go | 50 +- .../vishvananda/netlink/rule_linux.go | 159 +- .../vishvananda/netlink/rule_nonlinux.go | 8 + .../github.com/vishvananda/netlink/socket.go | 77 + .../vishvananda/netlink/socket_linux.go | 484 ++- .../vishvananda/netlink/socket_xdp_linux.go | 195 ++ vendor/github.com/vishvananda/netlink/tcp.go | 92 + .../vishvananda/netlink/tcp_linux.go | 368 +++ .../vishvananda/netlink/unix_diag.go | 27 + .../vishvananda/netlink/vdpa_linux.go | 463 +++ .../github.com/vishvananda/netlink/virtio.go | 132 + .../vishvananda/netlink/xdp_diag.go | 34 + .../vishvananda/netlink/xdp_linux.go | 46 + .../netlink/{xfrm.go => xfrm_linux.go} | 2 +- .../vishvananda/netlink/xfrm_policy.go | 96 - .../vishvananda/netlink/xfrm_policy_linux.go | 113 +- .../vishvananda/netlink/xfrm_state.go | 131 - .../vishvananda/netlink/xfrm_state_linux.go | 236 +- .../vishvananda/netlink/xfrm_unspecified.go | 7 + vendor/golang.org/x/exp/slices/slices.go | 44 +- vendor/golang.org/x/exp/slog/handler.go | 18 + vendor/golang.org/x/sys/cpu/cpu.go | 19 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 137 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../golang.org/x/sys/unix/syscall_darwin.go | 37 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 7 + .../x/sys/unix/zerrors_darwin_arm64.go | 7 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 20 + .../x/sys/unix/zsyscall_darwin_amd64.s | 5 + .../x/sys/unix/zsyscall_darwin_arm64.go | 20 + .../x/sys/unix/zsyscall_darwin_arm64.s | 5 + .../x/sys/unix/ztypes_darwin_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_arm64.go | 13 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 33 + .../x/sys/windows/syscall_windows.go | 4 + .../golang.org/x/sys/windows/types_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 38 + vendor/gopkg.in/gcfg.v1/LICENSE | 28 + vendor/gopkg.in/gcfg.v1/README | 4 + vendor/gopkg.in/gcfg.v1/doc.go | 145 + vendor/gopkg.in/gcfg.v1/errors.go | 57 + vendor/gopkg.in/gcfg.v1/read.go | 257 ++ vendor/gopkg.in/gcfg.v1/scanner/errors.go | 121 + vendor/gopkg.in/gcfg.v1/scanner/scanner.go | 342 +++ vendor/gopkg.in/gcfg.v1/set.go | 329 ++ vendor/gopkg.in/gcfg.v1/token/position.go | 435 +++ vendor/gopkg.in/gcfg.v1/token/serialize.go | 56 + vendor/gopkg.in/gcfg.v1/token/token.go | 83 + vendor/gopkg.in/gcfg.v1/types/bool.go | 23 + vendor/gopkg.in/gcfg.v1/types/doc.go | 4 + vendor/gopkg.in/gcfg.v1/types/enum.go | 44 + vendor/gopkg.in/gcfg.v1/types/int.go | 86 + vendor/gopkg.in/gcfg.v1/types/scan.go | 23 + .../natefinch/lumberjack.v2/.gitignore | 23 + .../natefinch/lumberjack.v2/.travis.yml | 11 + .../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 + .../natefinch/lumberjack.v2/README.md | 179 ++ .../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 + .../natefinch/lumberjack.v2/chown_linux.go | 19 + .../natefinch/lumberjack.v2/lumberjack.go | 541 ++++ vendor/gopkg.in/warnings.v0/LICENSE | 24 + vendor/gopkg.in/warnings.v0/README | 77 + vendor/gopkg.in/warnings.v0/warnings.go | 194 ++ .../api/resource/v1alpha3/generated.pb.go | 258 +- .../api/resource/v1alpha3/generated.proto | 2 +- vendor/k8s.io/api/resource/v1alpha3/types.go | 2 +- .../v1alpha3/types_swagger_doc_generated.go | 2 +- .../klog/v2/internal/verbosity/verbosity.go | 303 ++ vendor/k8s.io/klog/v2/textlogger/options.go | 154 + .../k8s.io/klog/v2/textlogger/textlogger.go | 187 ++ .../klog/v2/textlogger/textlogger_slog.go | 52 + vendor/k8s.io/utils/exec/README.md | 5 + vendor/k8s.io/utils/exec/doc.go | 18 + vendor/k8s.io/utils/exec/exec.go | 256 ++ vendor/k8s.io/utils/exec/fixup_go118.go | 32 + vendor/k8s.io/utils/exec/fixup_go119.go | 40 + vendor/modules.txt | 94 +- .../controllerutil/controllerutil.go | 20 +- 438 files changed, 75020 insertions(+), 1557 deletions(-) create mode 100644 vendor/github.com/cenkalti/hub/.gitignore create mode 100644 vendor/github.com/cenkalti/hub/.travis.yml create mode 100644 vendor/github.com/cenkalti/hub/LICENSE create mode 100644 vendor/github.com/cenkalti/hub/README.md create mode 100644 vendor/github.com/cenkalti/hub/hub.go create mode 100644 vendor/github.com/cenkalti/rpc2/.gitignore create mode 100644 vendor/github.com/cenkalti/rpc2/.travis.yml create mode 100644 vendor/github.com/cenkalti/rpc2/LICENSE create mode 100644 vendor/github.com/cenkalti/rpc2/README.md create mode 100644 vendor/github.com/cenkalti/rpc2/client.go create mode 100644 vendor/github.com/cenkalti/rpc2/codec.go create mode 100644 vendor/github.com/cenkalti/rpc2/debug.go create mode 100644 vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/cenkalti/rpc2/server.go create mode 100644 vendor/github.com/cenkalti/rpc2/state.go create mode 100644 vendor/github.com/containernetworking/cni/LICENSE create mode 100644 vendor/github.com/containernetworking/cni/libcni/api.go create mode 100644 vendor/github.com/containernetworking/cni/libcni/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/find.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/020/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/040/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/100/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/create/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/utils/utils.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/plugin.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/reconcile.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/version.go create mode 100644 vendor/github.com/containernetworking/plugins/LICENSE create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/ip.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/README.md create mode 100644 vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go create mode 100644 vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go create mode 100644 vendor/github.com/coreos/go-iptables/LICENSE create mode 100644 vendor/github.com/coreos/go-iptables/NOTICE create mode 100644 vendor/github.com/coreos/go-iptables/iptables/iptables.go create mode 100644 vendor/github.com/coreos/go-iptables/iptables/lock.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go create mode 100644 vendor/github.com/ovn-org/libovsdb/LICENSE create mode 100644 vendor/github.com/ovn-org/libovsdb/NOTICE create mode 100644 vendor/github.com/ovn-org/libovsdb/cache/cache.go create mode 100644 vendor/github.com/ovn-org/libovsdb/cache/doc.go create mode 100644 vendor/github.com/ovn-org/libovsdb/cache/uuidset.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/api.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/api_test_model.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/client.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/condition.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/config.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/doc.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/metrics.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/monitor.go create mode 100644 vendor/github.com/ovn-org/libovsdb/client/options.go create mode 100644 vendor/github.com/ovn-org/libovsdb/database/database.go create mode 100644 vendor/github.com/ovn-org/libovsdb/database/doc.go create mode 100644 vendor/github.com/ovn-org/libovsdb/database/references.go create mode 100644 vendor/github.com/ovn-org/libovsdb/mapper/info.go create mode 100644 vendor/github.com/ovn-org/libovsdb/mapper/mapper.go create mode 100644 vendor/github.com/ovn-org/libovsdb/model/client.go create mode 100644 vendor/github.com/ovn-org/libovsdb/model/database.go create mode 100644 vendor/github.com/ovn-org/libovsdb/model/model.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/error.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/map.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/row.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/set.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go create mode 100644 vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/difference.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/doc.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/merge.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/mutate.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/references.go create mode 100644 vendor/github.com/ovn-org/libovsdb/updates/updates.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go create mode 100644 vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go create mode 100644 vendor/github.com/safchain/ethtool/.gitignore create mode 100644 vendor/github.com/safchain/ethtool/.golangci.yml create mode 100644 vendor/github.com/safchain/ethtool/.yamllint create mode 100644 vendor/github.com/safchain/ethtool/LICENSE create mode 100644 vendor/github.com/safchain/ethtool/Makefile create mode 100644 vendor/github.com/safchain/ethtool/README.md create mode 100644 vendor/github.com/safchain/ethtool/ethtool.go create mode 100644 vendor/github.com/safchain/ethtool/ethtool_cmd.go create mode 100644 vendor/github.com/safchain/ethtool/ethtool_msglvl.go create mode 100644 vendor/github.com/urfave/cli/v2/.flake8 create mode 100644 vendor/github.com/urfave/cli/v2/.gitignore create mode 100644 vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/urfave/cli/v2/LICENSE create mode 100644 vendor/github.com/urfave/cli/v2/README.md create mode 100644 vendor/github.com/urfave/cli/v2/app.go create mode 100644 vendor/github.com/urfave/cli/v2/args.go create mode 100644 vendor/github.com/urfave/cli/v2/category.go create mode 100644 vendor/github.com/urfave/cli/v2/cli.go create mode 100644 vendor/github.com/urfave/cli/v2/command.go create mode 100644 vendor/github.com/urfave/cli/v2/context.go create mode 100644 vendor/github.com/urfave/cli/v2/docs.go create mode 100644 vendor/github.com/urfave/cli/v2/errors.go create mode 100644 vendor/github.com/urfave/cli/v2/fish.go create mode 100644 vendor/github.com/urfave/cli/v2/flag.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_bool.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_duration.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_float64.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_float64_slice.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_generic.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_int.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_int64.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_int64_slice.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_int_slice.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_path.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_string.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_string_slice.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_timestamp.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_uint.go create mode 100644 vendor/github.com/urfave/cli/v2/flag_uint64.go create mode 100644 vendor/github.com/urfave/cli/v2/funcs.go create mode 100644 vendor/github.com/urfave/cli/v2/help.go create mode 100644 vendor/github.com/urfave/cli/v2/parse.go create mode 100644 vendor/github.com/urfave/cli/v2/sort.go create mode 100644 vendor/github.com/urfave/cli/v2/template.go delete mode 100644 vendor/github.com/vishvananda/netlink/.travis.yml create mode 100644 vendor/github.com/vishvananda/netlink/chain.go create mode 100644 vendor/github.com/vishvananda/netlink/chain_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/inet_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/ipset_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/ipset_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/lwt_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/proc_event_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/rule_nonlinux.go create mode 100644 vendor/github.com/vishvananda/netlink/socket_xdp_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/tcp.go create mode 100644 vendor/github.com/vishvananda/netlink/tcp_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/unix_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/vdpa_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/virtio.go create mode 100644 vendor/github.com/vishvananda/netlink/xdp_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/xdp_linux.go rename vendor/github.com/vishvananda/netlink/{xfrm.go => xfrm_linux.go} (95%) delete mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go delete mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_unspecified.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 vendor/gopkg.in/gcfg.v1/LICENSE create mode 100644 vendor/gopkg.in/gcfg.v1/README create mode 100644 vendor/gopkg.in/gcfg.v1/doc.go create mode 100644 vendor/gopkg.in/gcfg.v1/errors.go create mode 100644 vendor/gopkg.in/gcfg.v1/read.go create mode 100644 vendor/gopkg.in/gcfg.v1/scanner/errors.go create mode 100644 vendor/gopkg.in/gcfg.v1/scanner/scanner.go create mode 100644 vendor/gopkg.in/gcfg.v1/set.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/position.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/serialize.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/token.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/bool.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/doc.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/enum.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/int.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/scan.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go create mode 100644 vendor/gopkg.in/warnings.v0/LICENSE create mode 100644 vendor/gopkg.in/warnings.v0/README create mode 100644 vendor/gopkg.in/warnings.v0/warnings.go create mode 100644 vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go create mode 100644 vendor/k8s.io/klog/v2/textlogger/options.go create mode 100644 vendor/k8s.io/klog/v2/textlogger/textlogger.go create mode 100644 vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go create mode 100644 vendor/k8s.io/utils/exec/README.md create mode 100644 vendor/k8s.io/utils/exec/doc.go create mode 100644 vendor/k8s.io/utils/exec/exec.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go118.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go119.go diff --git a/go.mod b/go.mod index e89ea3109..e21da9379 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/netobserv/gopipes v0.3.0 github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 - github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240905095613-6adad95c9d84 + github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240913155426-6ac7c5ccbf59 github.com/netsampler/goflow2 v1.3.7 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.20.3 @@ -45,9 +45,9 @@ require ( google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 sigs.k8s.io/e2e-framework v0.4.0 ) @@ -55,8 +55,14 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/hub v1.0.1 // indirect + github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cilium/ebpf v0.16.0 // indirect + github.com/containernetworking/cni v1.1.2 // indirect + github.com/containernetworking/plugins v1.2.0 // indirect + github.com/coreos/go-iptables v0.6.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -98,6 +104,8 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 // indirect + github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20240902083137-5d2310e77f87 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pion/dtls/v2 v2.2.4 // indirect @@ -108,6 +116,8 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect github.com/rs/xid v1.5.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect @@ -115,7 +125,8 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect + github.com/urfave/cli/v2 v2.2.0 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect @@ -126,23 +137,26 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.30.1 // indirect + k8s.io/component-base v0.30.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect lukechampine.com/uint128 v1.2.0 // indirect - sigs.k8s.io/controller-runtime v0.18.2 // indirect + sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 75394b8b0..abf6337b7 100644 --- a/go.sum +++ b/go.sum @@ -85,6 +85,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s= +github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= @@ -120,10 +122,16 @@ github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+Wji github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= +github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= +github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -142,10 +150,17 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -183,8 +198,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -304,6 +319,7 @@ github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZs github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -367,6 +383,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -413,6 +431,7 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -480,6 +499,7 @@ github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwI github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -528,6 +548,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0/go.mod h1:wxt2YWRVItDtaQmVSmaN5ubE2L1c9CiNoHQwSJnM8Ko= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -615,6 +637,8 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -626,6 +650,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -657,10 +683,13 @@ github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+ github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU= github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw= github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E= -github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240905095613-6adad95c9d84 h1:/Rb4zn+aAvqfYVC7/k0Xzms0iS22VBlm/oTCq9GVFvU= -github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240905095613-6adad95c9d84/go.mod h1:FFxuXUCPjWgqGwzLQjdCdJMYp13gomo+z1RaCqOSIHg= +github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240913155426-6ac7c5ccbf59 h1:pwApWoWIGyA+ZVaEFITOk0v4Wpyf9gV9dFw4bIUl9IM= +github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240913155426-6ac7c5ccbf59/go.mod h1:1LTwG/8SxgJ7VkDZ4+WSYNY/2F2ZNmkpcYEmqQwGjos= github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc= github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -670,13 +699,20 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -694,6 +730,10 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20240902083137-5d2310e77f87 h1:NHexPw6RbB7bf9YpoK+Cz3HTFYQc9jM60ai0EANquCU= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20240902083137-5d2310e77f87/go.mod h1:bLkGnzUmNsq1ubG2kbXWXbIP7mMnXLaCxOXGyN6NPWQ= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -784,9 +824,12 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 h1:q815fjV3G+4JvXNo2VwT2m+/msMU0sUkCK68CgHV9Y8= +github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91/go.mod h1:qIWCTaK0xQlXNlNlIVoZjKMZFopqfMZcg4JcRqGoYc0= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= @@ -869,10 +912,11 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= +github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vladimirvivien/gexe v0.3.0 h1:4xwiOwGrDob5OMR6E92B9olDXYDglXdHhzR1ggYtWJM= @@ -987,8 +1031,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1047,6 +1091,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1054,6 +1099,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= @@ -1109,17 +1155,18 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1149,6 +1196,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1163,9 +1211,10 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1262,6 +1311,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -1372,6 +1422,7 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= @@ -1387,14 +1438,20 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1420,18 +1477,18 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= -k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= -k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= -k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= +k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -1451,8 +1508,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q= -sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml new file mode 100644 index 000000000..b05e4c53f --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.2 + diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md new file mode 100644 index 000000000..d3f211818 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/README.md @@ -0,0 +1,5 @@ +hub +=== + +[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub) +[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub) diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go new file mode 100644 index 000000000..24c5efa86 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/hub.go @@ -0,0 +1,82 @@ +// Package hub provides a simple event dispatcher for publish/subscribe pattern. +package hub + +import "sync" + +type Kind int + +// Event is an interface for published events. +type Event interface { + Kind() Kind +} + +// Hub is an event dispatcher, publishes events to the subscribers +// which are subscribed for a specific event type. +// Optimized for publish calls. +// The handlers may be called in order different than they are registered. +type Hub struct { + subscribers map[Kind][]handler + m sync.RWMutex + seq uint64 +} + +type handler struct { + f func(Event) + id uint64 +} + +// Subscribe registers f for the event of a specific kind. +func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) { + var cancelled bool + h.m.Lock() + h.seq++ + id := h.seq + if h.subscribers == nil { + h.subscribers = make(map[Kind][]handler) + } + h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f}) + h.m.Unlock() + return func() { + h.m.Lock() + if cancelled { + h.m.Unlock() + return + } + cancelled = true + a := h.subscribers[kind] + for i, f := range a { + if f.id == id { + a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1] + break + } + } + if len(a) == 0 { + delete(h.subscribers, kind) + } + h.m.Unlock() + } +} + +// Publish an event to the subscribers. +func (h *Hub) Publish(e Event) { + h.m.RLock() + if handlers, ok := h.subscribers[e.Kind()]; ok { + for _, h := range handlers { + h.f(e) + } + } + h.m.RUnlock() +} + +// DefaultHub is the default Hub used by Publish and Subscribe. +var DefaultHub Hub + +// Subscribe registers f for the event of a specific kind in the DefaultHub. +func Subscribe(kind Kind, f func(Event)) (cancel func()) { + return DefaultHub.Subscribe(kind, f) +} + +// Publish an event to the subscribers in DefaultHub. +func Publish(e Event) { + DefaultHub.Publish(e) +} diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml new file mode 100644 index 000000000..ae8233c2b --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.15 + - tip + +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE new file mode 100644 index 000000000..d565b1b1f --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md new file mode 100644 index 000000000..3dffd26e4 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/README.md @@ -0,0 +1,82 @@ +rpc2 +==== + +[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2) +[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2) + +rpc2 is a fork of net/rpc package in the standard library. +The main goal is to add bi-directional support to calls. +That means server can call the methods of client. +This is not possible with net/rpc package. +In order to do this it adds a `*Client` argument to method signatures. + +Install +-------- + + go get github.com/cenkalti/rpc2 + +Example server +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + srv := rpc2.NewServer() + srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error { + + // Reversed call (server to client) + var rep Reply + client.Call("mult", Args{2, 3}, &rep) + fmt.Println("mult result:", rep) + + *reply = Reply(args.A + args.B) + return nil + }) + + lis, _ := net.Listen("tcp", "127.0.0.1:5000") + srv.Accept(lis) +} +``` + +Example Client +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + conn, _ := net.Dial("tcp", "127.0.0.1:5000") + + clt := rpc2.NewClient(conn) + clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error { + *reply = Reply(args.A * args.B) + return nil + }) + go clt.Run() + + var rep Reply + clt.Call("add", Args{1, 2}, &rep) + fmt.Println("add result:", rep) +} +``` diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go new file mode 100644 index 000000000..cc9956976 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/client.go @@ -0,0 +1,364 @@ +// Package rpc2 provides bi-directional RPC client and server similar to net/rpc. +package rpc2 + +import ( + "context" + "errors" + "io" + "log" + "reflect" + "sync" +) + +// Client represents an RPC Client. +// There may be multiple outstanding Calls associated +// with a single Client, and a Client may be used by +// multiple goroutines simultaneously. +type Client struct { + mutex sync.Mutex // protects pending, seq, request + sending sync.Mutex + request Request // temp area used in send() + seq uint64 + pending map[uint64]*Call + closing bool + shutdown bool + server bool + codec Codec + handlers map[string]*handler + disconnect chan struct{} + State *State // additional information to associate with client + blocking bool // whether to block request handling +} + +// NewClient returns a new Client to handle requests to the +// set of services at the other end of the connection. +// It adds a buffer to the write side of the connection so +// the header and payload are sent as a unit. +func NewClient(conn io.ReadWriteCloser) *Client { + return NewClientWithCodec(NewGobCodec(conn)) +} + +// NewClientWithCodec is like NewClient but uses the specified +// codec to encode requests and decode responses. +func NewClientWithCodec(codec Codec) *Client { + return &Client{ + codec: codec, + pending: make(map[uint64]*Call), + handlers: make(map[string]*handler), + disconnect: make(chan struct{}), + seq: 1, // 0 means notification. + } +} + +// SetBlocking puts the client in blocking mode. +// In blocking mode, received requests are processes synchronously. +// If you have methods that may take a long time, other subsequent requests may time out. +func (c *Client) SetBlocking(blocking bool) { + c.blocking = blocking +} + +// Run the client's read loop. +// You must run this method before calling any methods on the server. +func (c *Client) Run() { + c.readLoop() +} + +// DisconnectNotify returns a channel that is closed +// when the client connection has gone away. +func (c *Client) DisconnectNotify() chan struct{} { + return c.disconnect +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (c *Client) Handle(method string, handlerFunc interface{}) { + addHandler(c.handlers, method, handlerFunc) +} + +// readLoop reads messages from codec. +// It reads a reqeust or a response to the previous request. +// If the message is request, calls the handler function. +// If the message is response, sends the reply to the associated call. +func (c *Client) readLoop() { + var err error + var req Request + var resp Response + for err == nil { + req = Request{} + resp = Response{} + if err = c.codec.ReadHeader(&req, &resp); err != nil { + break + } + + if req.Method != "" { + // request comes to server + if err = c.readRequest(&req); err != nil { + debugln("rpc2: error reading request:", err.Error()) + } + } else { + // response comes to client + if err = c.readResponse(&resp); err != nil { + debugln("rpc2: error reading response:", err.Error()) + } + } + } + // Terminate pending calls. + c.sending.Lock() + c.mutex.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrShutdown + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.Error = err + call.done() + } + c.mutex.Unlock() + c.sending.Unlock() + if err != io.EOF && !closing && !c.server { + debugln("rpc2: client protocol error:", err) + } + close(c.disconnect) + if !closing { + c.codec.Close() + } +} + +func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) { + // Invoke the method, providing a new value for the reply. + replyv := reflect.New(method.replyType.Elem()) + + returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv}) + + // Do not send response if request is a notification. + if req.Seq == 0 { + return + } + + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + resp := &Response{ + Seq: req.Seq, + Error: errmsg, + } + if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil { + debugln("rpc2: error writing response:", err.Error()) + } +} + +func (c *Client) readRequest(req *Request) error { + method, ok := c.handlers[req.Method] + if !ok { + resp := &Response{ + Seq: req.Seq, + Error: "rpc2: can't find method " + req.Method, + } + return c.codec.WriteResponse(resp, resp) + } + + // Decode the argument value. + var argv reflect.Value + argIsValue := false // if true, need to indirect before calling. + if method.argType.Kind() == reflect.Ptr { + argv = reflect.New(method.argType.Elem()) + } else { + argv = reflect.New(method.argType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err := c.codec.ReadRequestBody(argv.Interface()); err != nil { + return err + } + if argIsValue { + argv = argv.Elem() + } + + if c.blocking { + c.handleRequest(*req, method, argv) + } else { + go c.handleRequest(*req, method, argv) + } + + return nil +} + +func (c *Client) readResponse(resp *Response) error { + seq := resp.Seq + c.mutex.Lock() + call := c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + + var err error + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + call.done() + default: + err = c.codec.ReadResponseBody(call.Reply) + if err != nil { + call.Error = errors.New("reading body " + err.Error()) + } + call.done() + } + + return err +} + +// Close waits for active calls to finish and closes the codec. +func (c *Client) Close() error { + c.mutex.Lock() + if c.shutdown || c.closing { + c.mutex.Unlock() + return ErrShutdown + } + c.closing = true + c.mutex.Unlock() + return c.codec.Close() +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.Method = method + call.Args = args + call.Reply = reply + if done == nil { + done = make(chan *Call, 10) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + log.Panic("rpc2: done channel is unbuffered") + } + } + call.Done = done + c.send(call) + return call +} + +// CallWithContext invokes the named function, waits for it to complete, and +// returns its error status, or an error from Context timeout. +func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error { + call := c.Go(method, args, reply, make(chan *Call, 1)) + select { + case <-call.Done: + return call.Error + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (c *Client) Call(method string, args interface{}, reply interface{}) error { + return c.CallWithContext(context.Background(), method, args, reply) +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + debugln("rpc2: discarding Call reply due to insufficient Done chan capacity") + } +} + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +// ErrShutdown is returned when the connection is closing or closed. +var ErrShutdown = errors.New("connection is shut down") + +// Call represents an active RPC. +type Call struct { + Method string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Strobes when call is complete. +} + +func (c *Client) send(call *Call) { + c.sending.Lock() + defer c.sending.Unlock() + + // Register this call. + c.mutex.Lock() + if c.shutdown || c.closing { + call.Error = ErrShutdown + c.mutex.Unlock() + call.done() + return + } + seq := c.seq + c.seq++ + c.pending[seq] = call + c.mutex.Unlock() + + // Encode and send the request. + c.request.Seq = seq + c.request.Method = call.Method + err := c.codec.WriteRequest(&c.request, call.Args) + if err != nil { + c.mutex.Lock() + call = c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + if call != nil { + call.Error = err + call.done() + } + } +} + +// Notify sends a request to the receiver but does not wait for a return value. +func (c *Client) Notify(method string, args interface{}) error { + c.sending.Lock() + defer c.sending.Unlock() + + if c.shutdown || c.closing { + return ErrShutdown + } + + c.request.Seq = 0 + c.request.Method = method + return c.codec.WriteRequest(&c.request, args) +} diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go new file mode 100644 index 000000000..b097d9aaa --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/codec.go @@ -0,0 +1,125 @@ +package rpc2 + +import ( + "bufio" + "encoding/gob" + "io" + "sync" +) + +// A Codec implements reading and writing of RPC requests and responses. +// The client calls ReadHeader to read a message header. +// The implementation must populate either Request or Response argument. +// Depending on which argument is populated, ReadRequestBody or +// ReadResponseBody is called right after ReadHeader. +// ReadRequestBody and ReadResponseBody may be called with a nil +// argument to force the body to be read and then discarded. +type Codec interface { + // ReadHeader must read a message and populate either the request + // or the response by inspecting the incoming message. + ReadHeader(*Request, *Response) error + + // ReadRequestBody into args argument of handler function. + ReadRequestBody(interface{}) error + + // ReadResponseBody into reply argument of handler function. + ReadResponseBody(interface{}) error + + // WriteRequest must be safe for concurrent use by multiple goroutines. + WriteRequest(*Request, interface{}) error + + // WriteResponse must be safe for concurrent use by multiple goroutines. + WriteResponse(*Response, interface{}) error + + // Close is called when client/server finished with the connection. + Close() error +} + +// Request is a header written before every RPC call. +type Request struct { + Seq uint64 // sequence number chosen by client + Method string +} + +// Response is a header written before every RPC return. +type Response struct { + Seq uint64 // echoes that of the request + Error string // error, if any. +} + +type gobCodec struct { + rwc io.ReadWriteCloser + dec *gob.Decoder + enc *gob.Encoder + encBuf *bufio.Writer + mutex sync.Mutex +} + +type message struct { + Seq uint64 + Method string + Error string +} + +// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn. +func NewGobCodec(conn io.ReadWriteCloser) Codec { + buf := bufio.NewWriter(conn) + return &gobCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } +} + +func (c *gobCodec) ReadHeader(req *Request, resp *Response) error { + var msg message + if err := c.dec.Decode(&msg); err != nil { + return err + } + + if msg.Method != "" { + req.Seq = msg.Seq + req.Method = msg.Method + } else { + resp.Seq = msg.Seq + resp.Error = msg.Error + } + return nil +} + +func (c *gobCodec) ReadRequestBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) ReadResponseBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) Close() error { + return c.rwc.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go new file mode 100644 index 000000000..ec1b62521 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/debug.go @@ -0,0 +1,12 @@ +package rpc2 + +import "log" + +// DebugLog controls the printing of internal and I/O errors. +var DebugLog = false + +func debugln(v ...interface{}) { + if DebugLog { + log.Println(v...) + } +} diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..87e116887 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go @@ -0,0 +1,226 @@ +// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package. +// +// Beside struct types, JSONCodec allows using positional arguments. +// Use []interface{} as the type of argument when sending and receiving methods. +// +// Positional arguments example: +// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error { +// *result = args[0].(float64) + args[1].(float64) +// return nil +// }) +// +// var result float64 +// client.Call("add", []interface{}{1, 2}, &result) +// +package jsonrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sync" + + "github.com/cenkalti/rpc2" +) + +type jsonCodec struct { + dec *json.Decoder // for reading JSON values + enc *json.Encoder // for writing JSON values + c io.Closer + + // temporary work space + msg message + serverRequest serverRequest + clientResponse clientResponse + + // JSON-RPC clients can use arbitrary json values as request IDs. + // Package rpc expects uint64 request IDs. + // We assign uint64 sequence numbers to incoming requests + // but save the original request ID in the pending map. + // When rpc responds, we use the sequence number in + // the response to find the original request ID. + mutex sync.Mutex // protects seq, pending + pending map[uint64]*json.RawMessage + seq uint64 +} + +// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn. +func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec { + return &jsonCodec{ + dec: json.NewDecoder(conn), + enc: json.NewEncoder(conn), + c: conn, + pending: make(map[uint64]*json.RawMessage), + } +} + +// serverRequest and clientResponse combined +type message struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// Unmarshal to +type serverRequest struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` +} +type clientResponse struct { + Id uint64 `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// to Marshal +type serverResponse struct { + Id *json.RawMessage `json:"id"` + Result interface{} `json:"result"` + Error interface{} `json:"error"` +} +type clientRequest struct { + Method string `json:"method"` + Params interface{} `json:"params"` + Id *uint64 `json:"id"` +} + +func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error { + c.msg = message{} + if err := c.dec.Decode(&c.msg); err != nil { + return err + } + + if c.msg.Method != "" { + // request comes to server + c.serverRequest.Id = c.msg.Id + c.serverRequest.Method = c.msg.Method + c.serverRequest.Params = c.msg.Params + + req.Method = c.serverRequest.Method + + // JSON request id can be any JSON value; + // RPC package expects uint64. Translate to + // internal uint64 and save JSON on the side. + if c.serverRequest.Id == nil { + // Notification + } else { + c.mutex.Lock() + c.seq++ + c.pending[c.seq] = c.serverRequest.Id + c.serverRequest.Id = nil + req.Seq = c.seq + c.mutex.Unlock() + } + } else { + // response comes to client + err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id) + if err != nil { + return err + } + c.clientResponse.Result = c.msg.Result + c.clientResponse.Error = c.msg.Error + + resp.Error = "" + resp.Seq = c.clientResponse.Id + if c.clientResponse.Error != nil || c.clientResponse.Result == nil { + x, ok := c.clientResponse.Error.(string) + if !ok { + return fmt.Errorf("invalid error %v", c.clientResponse.Error) + } + if x == "" { + x = "unspecified error" + } + resp.Error = x + } + } + return nil +} + +var errMissingParams = errors.New("jsonrpc: request body missing params") + +func (c *jsonCodec) ReadRequestBody(x interface{}) error { + if x == nil { + return nil + } + if c.serverRequest.Params == nil { + return errMissingParams + } + + var err error + + // Check if x points to a slice of any kind + rt := reflect.TypeOf(x) + if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice { + // If it's a slice, unmarshal as is + err = json.Unmarshal(*c.serverRequest.Params, x) + } else { + // Anything else unmarshal into a slice containing x + params := &[]interface{}{x} + err = json.Unmarshal(*c.serverRequest.Params, params) + } + + return err +} + +func (c *jsonCodec) ReadResponseBody(x interface{}) error { + if x == nil { + return nil + } + return json.Unmarshal(*c.clientResponse.Result, x) +} + +func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error { + req := &clientRequest{Method: r.Method} + + // Check if param is a slice of any kind + if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice { + // If it's a slice, leave as is + req.Params = param + } else { + // Put anything else into a slice + req.Params = []interface{}{param} + } + + if r.Seq == 0 { + // Notification + req.Id = nil + } else { + seq := r.Seq + req.Id = &seq + } + return c.enc.Encode(req) +} + +var null = json.RawMessage([]byte("null")) + +func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error { + c.mutex.Lock() + b, ok := c.pending[r.Seq] + if !ok { + c.mutex.Unlock() + return errors.New("invalid sequence number in response") + } + delete(c.pending, r.Seq) + c.mutex.Unlock() + + if b == nil { + // Invalid request so no id. Use JSON null. + b = &null + } + resp := serverResponse{Id: b} + if r.Error == "" { + resp.Result = x + } else { + resp.Error = r.Error + } + return c.enc.Encode(resp) +} + +func (c *jsonCodec) Close() error { + return c.c.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go new file mode 100644 index 000000000..2a5be7ed6 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/server.go @@ -0,0 +1,181 @@ +package rpc2 + +import ( + "io" + "log" + "net" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/cenkalti/hub" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() +var typeOfClient = reflect.TypeOf((*Client)(nil)) + +const ( + clientConnected hub.Kind = iota + clientDisconnected +) + +// Server responds to RPC requests made by Client. +type Server struct { + handlers map[string]*handler + eventHub *hub.Hub +} + +type handler struct { + fn reflect.Value + argType reflect.Type + replyType reflect.Type +} + +type connectionEvent struct { + Client *Client +} + +type disconnectionEvent struct { + Client *Client +} + +func (connectionEvent) Kind() hub.Kind { return clientConnected } +func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected } + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + handlers: make(map[string]*handler), + eventHub: &hub.Hub{}, + } +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (s *Server) Handle(method string, handlerFunc interface{}) { + addHandler(s.handlers, method, handlerFunc) +} + +func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) { + if _, ok := handlers[mname]; ok { + panic("rpc2: multiple registrations for " + mname) + } + + method := reflect.ValueOf(handlerFunc) + mtype := method.Type() + // Method needs three ins: *client, *args, *reply. + if mtype.NumIn() != 3 { + log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn()) + } + // First arg must be a pointer to rpc2.Client. + clientType := mtype.In(0) + if clientType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "client type not a pointer:", clientType) + } + if clientType != typeOfClient { + log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client") + } + // Second arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + log.Panicln(mname, "argument type not exported:", argType) + } + // Third arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "reply type not a pointer:", replyType) + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + log.Panicln("method", mname, "reply type not exported:", replyType) + } + // Method needs one out. + if mtype.NumOut() != 1 { + log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut()) + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + log.Panicln("method", mname, "returns", returnType.String(), "not error") + } + handlers[mname] = &handler{ + fn: method, + argType: argType, + replyType: replyType, + } +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} + +// Is this an exported - upper case - name? +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// OnConnect registers a function to run when a client connects. +func (s *Server) OnConnect(f func(*Client)) { + s.eventHub.Subscribe(clientConnected, func(e hub.Event) { + go f(e.(connectionEvent).Client) + }) +} + +// OnDisconnect registers a function to run when a client disconnects. +func (s *Server) OnDisconnect(f func(*Client)) { + s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) { + go f(e.(disconnectionEvent).Client) + }) +} + +// Accept accepts connections on the listener and serves requests +// for each incoming connection. Accept blocks; the caller typically +// invokes it in a go statement. +func (s *Server) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Print("rpc.Serve: accept:", err.Error()) + return + } + go s.ServeConn(conn) + } +} + +// ServeConn runs the server on a single connection. +// ServeConn blocks, serving the connection until the client hangs up. +// The caller typically invokes ServeConn in a go statement. +// ServeConn uses the gob wire format (see package gob) on the +// connection. To use an alternate codec, use ServeCodec. +func (s *Server) ServeConn(conn io.ReadWriteCloser) { + s.ServeCodec(NewGobCodec(conn)) +} + +// ServeCodec is like ServeConn but uses the specified codec to +// decode requests and encode responses. +func (s *Server) ServeCodec(codec Codec) { + s.ServeCodecWithState(codec, NewState()) +} + +// ServeCodecWithState is like ServeCodec but also gives the ability to +// associate a state variable with the client that persists across RPC calls. +func (s *Server) ServeCodecWithState(codec Codec, state *State) { + defer codec.Close() + + // Client also handles the incoming connections. + c := NewClientWithCodec(codec) + c.server = true + c.handlers = s.handlers + c.State = state + + s.eventHub.Publish(connectionEvent{c}) + c.Run() + s.eventHub.Publish(disconnectionEvent{c}) +} diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go new file mode 100644 index 000000000..7a4f23e6d --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/state.go @@ -0,0 +1,25 @@ +package rpc2 + +import "sync" + +type State struct { + store map[string]interface{} + m sync.RWMutex +} + +func NewState() *State { + return &State{store: make(map[string]interface{})} +} + +func (s *State) Get(key string) (value interface{}, ok bool) { + s.m.RLock() + value, ok = s.store[key] + s.m.RUnlock() + return +} + +func (s *State) Set(key string, value interface{}) { + s.m.Lock() + s.store[key] = value + s.m.Unlock() +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 000000000..0d82a2dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 000000000..3cd6a59d1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,270 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containernetworking/cni/pkg/types" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 000000000..3cdb4bc8d --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 000000000..8defe4dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 000000000..3ad07aa8f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,187 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 000000000..e62029eb7 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 000000000..9bcfb4553 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 000000000..7665125b1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 000000000..5ab5cc885 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 000000000..99b151ff2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 000000000..3633b0eaa --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 000000000..0e1e8b857 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 000000000..7516f03ef --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 000000000..ed28b33e8 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 000000000..bdbe4b0a5 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 000000000..963630912 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 000000000..fba17dfc0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 000000000..b8ec38874 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 000000000..808c33b83 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 000000000..17b22b6b0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 000000000..25c3810b2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 000000000..1326f8038 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go new file mode 100644 index 000000000..b4db50b9a --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go @@ -0,0 +1,68 @@ +// Copyright 2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "syscall" + "time" + + "github.com/vishvananda/netlink" +) + +const SETTLE_INTERVAL = 50 * time.Millisecond + +// SettleAddresses waits for all addresses on a link to leave tentative state. +// This is particularly useful for ipv6, where all addresses need to do DAD. +// There is no easy way to wait for this as an event, so just loop until the +// addresses are no longer tentative. +// If any addresses are still tentative after timeout seconds, then error. +func SettleAddresses(ifName string, timeout int) error { + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("failed to retrieve link: %v", err) + } + + deadline := time.Now().Add(time.Duration(timeout) * time.Second) + for { + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("could not list addresses: %v", err) + } + + if len(addrs) == 0 { + return nil + } + + ok := true + for _, addr := range addrs { + if addr.Flags&(syscall.IFA_F_TENTATIVE|syscall.IFA_F_DADFAILED) > 0 { + ok = false + break // Break out of the `range addrs`, not the `for` + } + } + + if ok { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("link %s still has tentative addresses after %d seconds", + ifName, + timeout) + } + + time.Sleep(SETTLE_INTERVAL) + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go new file mode 100644 index 000000000..8b380fc74 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go @@ -0,0 +1,105 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1, if IP is invalid, return nil +func NextIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Add(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// PrevIP returns IP decremented by 1, if IP is invalid, return nil +func PrevIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Sub(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +// incomparable : -2 +func Cmp(a, b net.IP) int { + normalizedA := normalizeIP(a) + normalizedB := normalizeIP(b) + + if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 { + return ipToInt(normalizedA).Cmp(ipToInt(normalizedB)) + } + + return -2 +} + +func ipToInt(ip net.IP) *big.Int { + return big.NewInt(0).SetBytes(ip) +} + +func intToIP(i *big.Int, isIPv6 bool) net.IP { + intBytes := i.Bytes() + + if len(intBytes) == net.IPv4len || len(intBytes) == net.IPv6len { + return intBytes + } + + if isIPv6 { + return append(make([]byte, net.IPv6len-len(intBytes)), intBytes...) + } + + return append(make([]byte, net.IPv4len-len(intBytes)), intBytes...) +} + +// normalizeIP will normalize IP by family, +// IPv4 : 4-byte form +// IPv6 : 16-byte form +// others : nil +func normalizeIP(ip net.IP) net.IP { + if ipTo4 := ip.To4(); ipTo4 != nil { + return ipTo4 + } + return ip.To16() +} + +// Network masks off the host portion of the IP, if IPNet is invalid, +// return nil +func Network(ipn *net.IPNet) *net.IPNet { + if ipn == nil { + return nil + } + + maskedIP := ipn.IP.Mask(ipn.Mask) + if maskedIP == nil { + return nil + } + + return &net.IPNet{ + IP: maskedIP, + Mask: ipn.Mask, + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go new file mode 100644 index 000000000..4469e1b5d --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go @@ -0,0 +1,105 @@ +// Copyright 2021 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + "strings" +) + +// IP is a CNI maintained type inherited from net.IPNet which can +// represent a single IP address with or without prefix. +type IP struct { + net.IPNet +} + +// newIP will create an IP with net.IP and net.IPMask +func newIP(ip net.IP, mask net.IPMask) *IP { + return &IP{ + IPNet: net.IPNet{ + IP: ip, + Mask: mask, + }, + } +} + +// ParseIP will parse string s as an IP, and return it. +// The string s must be formed like [/]. +// If s is not a valid textual representation of an IP, +// will return nil. +func ParseIP(s string) *IP { + if strings.ContainsAny(s, "/") { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil + } + return newIP(ip, ipNet.Mask) + } else { + ip := net.ParseIP(s) + if ip == nil { + return nil + } + return newIP(ip, nil) + } +} + +// ToIP will return a net.IP in standard form from this IP. +// If this IP can not be converted to a valid net.IP, will return nil. +func (i *IP) ToIP() net.IP { + switch { + case i.IP.To4() != nil: + return i.IP.To4() + case i.IP.To16() != nil: + return i.IP.To16() + default: + return nil + } +} + +// String returns the string form of this IP. +func (i *IP) String() string { + if len(i.Mask) > 0 { + return i.IPNet.String() + } + return i.IP.String() +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String, +// But when len(ip) is zero, will return an empty slice. +func (i *IP) MarshalText() ([]byte, error) { + if len(i.IP) == 0 { + return []byte{}, nil + } + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The textual bytes are expected in a form accepted by Parse, +// But when len(b) is zero, will return an empty IP. +func (i *IP) UnmarshalText(b []byte) error { + if len(b) == 0 { + *i = IP{} + return nil + } + + ip := ParseIP(string(b)) + if ip == nil { + return fmt.Errorf("invalid IP address %s", string(b)) + } + *i = *ip + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go new file mode 100644 index 000000000..0e8b6b691 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go @@ -0,0 +1,62 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "bytes" + "os" + + current "github.com/containernetworking/cni/pkg/types/100" +) + +func EnableIP4Forward() error { + return echo1("/proc/sys/net/ipv4/ip_forward") +} + +func EnableIP6Forward() error { + return echo1("/proc/sys/net/ipv6/conf/all/forwarding") +} + +// EnableForward will enable forwarding for all configured +// address families +func EnableForward(ips []*current.IPConfig) error { + v4 := false + v6 := false + + for _, ip := range ips { + isV4 := ip.Address.IP.To4() != nil + if isV4 && !v4 { + if err := EnableIP4Forward(); err != nil { + return err + } + v4 = true + } else if !isV4 && !v6 { + if err := EnableIP6Forward(); err != nil { + return err + } + v6 = true + } + } + return nil +} + +func echo1(f string) error { + if content, err := os.ReadFile(f); err == nil { + if bytes.Equal(bytes.TrimSpace(content), []byte("1")) { + return nil + } + } + return os.WriteFile(f, []byte("1"), 0644) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go new file mode 100644 index 000000000..cc640a605 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go @@ -0,0 +1,126 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/coreos/go-iptables/iptables" +) + +// SetupIPMasq installs iptables rules to masquerade traffic +// coming from ip of ipn and going outside of ipn +func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + var multicastNet string + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + multicastNet = "ff00::/8" + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + multicastNet = "224.0.0.0/4" + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + // Create chain if doesn't exist + exists := false + chains, err := ipt.ListChains("nat") + if err != nil { + return fmt.Errorf("failed to list chains: %v", err) + } + for _, ch := range chains { + if ch == chain { + exists = true + break + } + } + if !exists { + if err = ipt.NewChain("nat", chain); err != nil { + return err + } + } + + // Packets to this network should not be touched + if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Don't masquerade multicast - pods should be able to talk to other pods + // on the local network via multicast. + if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Packets from the specific IP of this network will hit the chain + return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) +} + +// TeardownIPMasq undoes the effects of SetupIPMasq +func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + // for downward compatibility + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + err = ipt.ClearChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + + } + + err = ipt.DeleteChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + } + + return nil +} + +// isNotExist returnst true if the error is from iptables indicating +// that the target does not exist. +func isNotExist(err error) bool { + e, ok := err.(*iptables.Error) + if !ok { + return false + } + return e.IsNotExist() +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go new file mode 100644 index 000000000..91f931b57 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go @@ -0,0 +1,261 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "crypto/rand" + "errors" + "fmt" + "net" + "os" + + "github.com/safchain/ethtool" + "github.com/vishvananda/netlink" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containernetworking/plugins/pkg/utils/sysctl" +) + +var ( + ErrLinkNotFound = errors.New("link not found") +) + +// makeVethPair is called from within the container's network namespace +func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netlink.Link, error) { + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: name, + MTU: mtu, + }, + PeerName: peer, + PeerNamespace: netlink.NsFd(int(hostNS.Fd())), + } + if mac != "" { + m, err := net.ParseMAC(mac) + if err != nil { + return nil, err + } + veth.LinkAttrs.HardwareAddr = m + } + if err := netlink.LinkAdd(veth); err != nil { + return nil, err + } + // Re-fetch the container link to get its creation-time parameters, e.g. index and mac + veth2, err := netlink.LinkByName(name) + if err != nil { + netlink.LinkDel(veth) // try and clean up the link if possible. + return nil, err + } + + return veth2, nil +} + +func peerExists(name string) bool { + if _, err := netlink.LinkByName(name); err != nil { + return false + } + return true +} + +func makeVeth(name, vethPeerName string, mtu int, mac string, hostNS ns.NetNS) (peerName string, veth netlink.Link, err error) { + for i := 0; i < 10; i++ { + if vethPeerName != "" { + peerName = vethPeerName + } else { + peerName, err = RandomVethName() + if err != nil { + return + } + } + + veth, err = makeVethPair(name, peerName, mtu, mac, hostNS) + switch { + case err == nil: + return + + case os.IsExist(err): + if peerExists(peerName) && vethPeerName == "" { + continue + } + err = fmt.Errorf("container veth name provided (%v) already exists", name) + return + + default: + err = fmt.Errorf("failed to make veth pair: %v", err) + return + } + } + + // should really never be hit + err = fmt.Errorf("failed to find a unique veth name") + return +} + +// RandomVethName returns string "veth" with random prefix (hashed from entropy) +func RandomVethName() (string, error) { + entropy := make([]byte, 4) + _, err := rand.Read(entropy) + if err != nil { + return "", fmt.Errorf("failed to generate random veth name: %v", err) + } + + // NetworkManager (recent versions) will ignore veth devices that start with "veth" + return fmt.Sprintf("veth%x", entropy), nil +} + +func RenameLink(curName, newName string) error { + link, err := netlink.LinkByName(curName) + if err == nil { + err = netlink.LinkSetName(link, newName) + } + return err +} + +func ifaceFromNetlinkLink(l netlink.Link) net.Interface { + a := l.Attrs() + return net.Interface{ + Index: a.Index, + MTU: a.MTU, + Name: a.Name, + HardwareAddr: a.HardwareAddr, + Flags: a.Flags, + } +} + +// SetupVethWithName sets up a pair of virtual ethernet devices. +// Call SetupVethWithName from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// hostVethName: If hostVethName is not specified, the host-side veth name will use a random string. +// On success, SetupVethWithName returns (hostVeth, containerVeth, nil) +func SetupVethWithName(contVethName, hostVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + hostVethName, contVeth, err := makeVeth(contVethName, hostVethName, mtu, contVethMac, hostNS) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + + var hostVeth netlink.Link + err = hostNS.Do(func(_ ns.NetNS) error { + hostVeth, err = netlink.LinkByName(hostVethName) + if err != nil { + return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err) + } + + if err = netlink.LinkSetUp(hostVeth); err != nil { + return fmt.Errorf("failed to set %q up: %v", hostVethName, err) + } + + // we want to own the routes for this interface + _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0") + return nil + }) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + return ifaceFromNetlinkLink(hostVeth), ifaceFromNetlinkLink(contVeth), nil +} + +// SetupVeth sets up a pair of virtual ethernet devices. +// Call SetupVeth from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// On success, SetupVeth returns (hostVeth, containerVeth, nil) +func SetupVeth(contVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + return SetupVethWithName(contVethName, "", mtu, contVethMac, hostNS) +} + +// DelLinkByName removes an interface link. +func DelLinkByName(ifName string) error { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return ErrLinkNotFound + } + return fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + return nil +} + +// DelLinkByNameAddr remove an interface and returns its addresses +func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return nil, ErrLinkNotFound + } + return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL) + if err != nil { + return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return nil, fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + out := []*net.IPNet{} + for _, addr := range addrs { + if addr.IP.IsGlobalUnicast() { + out = append(out, addr.IPNet) + } + } + + return out, nil +} + +// GetVethPeerIfindex returns the veth link object, the peer ifindex of the +// veth, or an error. This peer ifindex will only be valid in the peer's +// network namespace. +func GetVethPeerIfindex(ifName string) (netlink.Link, int, error) { + link, err := netlink.LinkByName(ifName) + if err != nil { + return nil, -1, fmt.Errorf("could not look up %q: %v", ifName, err) + } + if _, ok := link.(*netlink.Veth); !ok { + return nil, -1, fmt.Errorf("interface %q was not a veth interface", ifName) + } + + // veth supports IFLA_LINK (what vishvananda/netlink calls ParentIndex) + // on 4.1 and higher kernels + peerIndex := link.Attrs().ParentIndex + if peerIndex <= 0 { + // Fall back to ethtool for 4.0 and earlier kernels + e, err := ethtool.NewEthtool() + if err != nil { + return nil, -1, fmt.Errorf("failed to initialize ethtool: %v", err) + } + defer e.Close() + + stats, err := e.Stats(link.Attrs().Name) + if err != nil { + return nil, -1, fmt.Errorf("failed to request ethtool stats: %v", err) + } + n, ok := stats["peer_ifindex"] + if !ok { + return nil, -1, fmt.Errorf("failed to find 'peer_ifindex' in ethtool stats") + } + if n > 32767 || n == 0 { + return nil, -1, fmt.Errorf("invalid 'peer_ifindex' %d", n) + } + peerIndex = int(n) + } + + return link, peerIndex, nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go new file mode 100644 index 000000000..e92b6c53e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go @@ -0,0 +1,52 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// AddRoute adds a universally-scoped route to a device. +func AddRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: ipn, + Gw: gw, + }) +} + +// AddHostRoute adds a host-scoped route to a device. +func AddHostRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_HOST, + Dst: ipn, + Gw: gw, + }) +} + +// AddDefaultRoute sets the default route on the given gateway. +func AddDefaultRoute(gw net.IP, dev netlink.Link) error { + var defNet *net.IPNet + if gw.To4() != nil { + _, defNet, _ = net.ParseCIDR("0.0.0.0/0") + } else { + _, defNet, _ = net.ParseCIDR("::/0") + } + return AddRoute(defNet, gw, dev) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go new file mode 100644 index 000000000..943117e18 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go @@ -0,0 +1,116 @@ +//go:build linux +// +build linux + +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/containernetworking/cni/pkg/types" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/vishvananda/netlink" +) + +func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) error { + + // Ensure ips + for _, ips := range resultIPs { + ourAddr := netlink.Addr{IPNet: &ips.Address} + match := false + + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("Cannot find container link %v", ifName) + } + + addrList, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("Cannot obtain List of IP Addresses") + } + + for _, addr := range addrList { + if addr.Equal(ourAddr) { + match = true + break + } + } + if match == false { + return fmt.Errorf("Failed to match addr %v on interface %v", ourAddr, ifName) + } + + // Convert the host/prefixlen to just prefix for route lookup. + _, ourPrefix, err := net.ParseCIDR(ourAddr.String()) + + findGwy := &netlink.Route{Dst: ourPrefix} + routeFilter := netlink.RT_FILTER_DST + + family := netlink.FAMILY_V6 + if ips.Address.IP.To4() != nil { + family = netlink.FAMILY_V4 + } + + gwy, err := netlink.RouteListFiltered(family, findGwy, routeFilter) + if err != nil { + return fmt.Errorf("Error %v trying to find Gateway %v for interface %v", err, ips.Gateway, ifName) + } + if gwy == nil { + return fmt.Errorf("Failed to find Gateway %v for interface %v", ips.Gateway, ifName) + } + } + + return nil +} + +func ValidateExpectedRoute(resultRoutes []*types.Route) error { + + // Ensure that each static route in prevResults is found in the routing table + for _, route := range resultRoutes { + find := &netlink.Route{Dst: &route.Dst, Gw: route.GW} + routeFilter := netlink.RT_FILTER_DST | netlink.RT_FILTER_GW + var family int + + switch { + case route.Dst.IP.To4() != nil: + family = netlink.FAMILY_V4 + // Default route needs Dst set to nil + if route.Dst.String() == "0.0.0.0/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + case len(route.Dst.IP) == net.IPv6len: + family = netlink.FAMILY_V6 + // Default route needs Dst set to nil + if route.Dst.String() == "::/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + default: + return fmt.Errorf("Invalid static route found %v", route) + } + + wasFound, err := netlink.RouteListFiltered(family, find, routeFilter) + if err != nil { + return fmt.Errorf("Expected Route %v not route table lookup error %v", route, err) + } + if wasFound == nil { + return fmt.Errorf("Expected Route %v not found in routing table", route) + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 000000000..1e265c7a0 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 000000000..f260f2813 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,234 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = unix.NSFS_MAGIC + PROCFS_MAGIC = unix.PROC_SUPER_MAGIC +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go new file mode 100644 index 000000000..469e9be9e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go @@ -0,0 +1,78 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysctl + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// Sysctl provides a method to set/get values from /proc/sys - in linux systems +// new interface to set/get values of variables formerly handled by sysctl syscall +// If optional `params` have only one string value - this function will +// set this value into corresponding sysctl variable +func Sysctl(name string, params ...string) (string, error) { + if len(params) > 1 { + return "", fmt.Errorf("unexcepted additional parameters") + } else if len(params) == 1 { + return setSysctl(name, params[0]) + } + return getSysctl(name) +} + +func getSysctl(name string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + data, err := os.ReadFile(fullName) + if err != nil { + return "", err + } + + return string(data[:len(data)-1]), nil +} + +func setSysctl(name, value string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + if err := os.WriteFile(fullName, []byte(value), 0644); err != nil { + return "", err + } + + return getSysctl(name) +} + +// Normalize names by using slash as separator +// Sysctl names can use dots or slashes as separator: +// - if dots are used, dots and slashes are interchanged. +// - if slashes are used, slashes and dots are left intact. +// Separator in use is determined by first occurrence. +func toNormalName(name string) string { + interchange := false + for _, c := range name { + if c == '.' { + interchange = true + break + } + if c == '/' { + break + } + } + + if interchange { + r := strings.NewReplacer(".", "/", "/", ".") + return r.Replace(name) + } + return name +} diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go new file mode 100644 index 000000000..85047e59d --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -0,0 +1,680 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + if e.ExitStatus() != 1 { + return false + } + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" + return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + waitSupportSecond bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever +} + +// Stat represents a structured statistic entry. +type Stat struct { + Packets uint64 `json:"pkts"` + Bytes uint64 `json:"bytes"` + Target string `json:"target"` + Protocol string `json:"prot"` + Opt string `json:"opt"` + Input string `json:"in"` + Output string `json:"out"` + Source *net.IPNet `json:"source"` + Destination *net.IPNet `json:"destination"` + Options string `json:"options"` +} + +type option func(*IPTables) + +func IPFamily(proto Protocol) option { + return func(ipt *IPTables) { + ipt.proto = proto + } +} + +func Timeout(timeout int) option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto)) + if err != nil { + return nil, err + } + ipt.path = path + + vstring, err := getIptablesVersionString(path) + if err != nil { + return nil, fmt.Errorf("could not get iptables version: %v", err) + } + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + if err != nil { + return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) + } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode + + checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.waitSupportSecond = waitSupportSecond + ipt.hasRandomFully = randomFullyPresent + + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +// ParseStat parses a single statistic row into a Stat struct. The input should +// be a string slice that is returned from calling the Stat method. +func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { + // For forward-compatibility, expect at least 10 fields in the stat + if len(stat) < 10 { + return parsed, fmt.Errorf("stat contained fewer fields than expected") + } + + // Convert the fields that are not plain strings + parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse packets") + } + parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse bytes") + } + _, parsed.Source, err = net.ParseCIDR(stat[7]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse source") + } + _, parsed.Destination, err = net.ParseCIDR(stat[8]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse destination") + } + + // Put the fields that are strings + parsed.Target = stat[2] + parsed.Protocol = stat[3] + parsed.Opt = stat[4] + parsed.Input = stat[5] + parsed.Output = stat[6] + parsed.Options = stat[9] + + return parsed, nil +} + +// StructuredStats returns statistics as structured data which may be further +// parsed and marshaled. +func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { + rawStats, err := ipt.Stats(table, chain) + if err != nil { + return nil, err + } + + structStats := []Stat{} + for _, rawStat := range rawStats { + stat, err := ipt.ParseStat(rawStat) + if err != nil { + return nil, err + } + structStats = append(structStats, stat) + } + + return structStats, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +const existsErr = 1 + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + +func (ipt *IPTables) ClearAll() error { + return ipt.run("-F") +} + +func (ipt *IPTables) DeleteAll() error { + return ipt.run("-X") +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + if ipt.timeout != 0 && ipt.waitSupportSecond { + args = append(args, strconv.Itoa(ipt.timeout)) + } + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + syscall.Close(fmu.fd) + return err + } + defer ul.Unlock() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol) string { + if proto == ProtocolIPv6 { + return "ip6tables" + } else { + return "iptables" + } +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +//Checks if an iptablse version is after 1.6.0, when --wait support second +func iptablesWaitSupportSecond(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 >= 6 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go new file mode 100644 index 000000000..a88e92b4e --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (_ nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..42bf32aab --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,16 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..4b19188d9 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,348 @@ +package md2man + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + firstDD bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = "\n.EE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + walkAction := blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + escapeSpecialChars(w, node.Literal) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) + } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte("+>hHdJcB8R?tu9>G%-r<|xBB26=>F z_hg~rJah($>wptFf7z`M%5WY>70Oo$4d>}|G1$%4r9L%0*nPOgQ6Pk1w>DrnPl|)M zI(Rq_6(H%XJ>JjWL3u4_Xnqh3954KEs+E6Ww#BXd1IJn1%0KXSi>sdn12q=6@(&zq zaV!78EQ?$D2WDE_`pbY0#Nkvc|G*4OxAG56x44ympxWYA{()&0xAG56wYZgkz_+-S zf8c0~3qe1pSEwJ;qF_M#Z-c7AbM|2KI~@;-#|bw+wAUP}{zl>6Bq8K{RPtnjp63*&>qX@X9h8~i{mbP zN`v{OtJk1h%eNlpioP;_LQcDd7mQIq%j+vsKV<%*Pn*pPD%5}0gNU9nTK)zO!U(}w zt+(|cV%He;N9q+lX_e!+!uE6XhhCEpTeMtJKJN=#w0xnj_j6ok{-IATh37)U0*QJ- zx%S_Dd1Kmt8>U-&h58N4q4lTZLB9T0I63XF`SOm{{@ReS^s(B%^X0A7{=Xi^NqIM@ zoEzU8tR5RyE5Bi((p0E)f~C_AAFPJUl|QWJdA+EHIbelzw7sJI`NG$Bi}HQpv$jy* z5k0enuYM8bvp&K(=Km3SX@@!5p5a7ocg{1YIh?3|(KlrCoydJ^myY+t=5M{YUCNQr zaer8cYp)k-#I8mi|J}T@7uovEr;Apg9GdL)!hEr7mp%`MHQDQhQBsbqJ{R0Nihd^c zw%GGwq8ij3BbVh?!^4F7N7$%-6n0S?(s$XopVf8`C#pY$EjF%~>2t5pZ}qw7`fcAH z?cXjQPSo^5J?j0z&=WEah7)z1Z5{fN#ZM4B3i2mPymjblOV^HANIzN9`-b*Odfy%$ zXF~r>;SXyuJ@zMQC;yBQd}$~D^bve%C!ez-v`0jbk9g>xO7?zyX(zvS1Yg?8f2Z)n ziKhrH*iWmVXO9qh9&kLpSKV{Ox?os`39op3uk!k3-gGbg6@OmZo=536oktaq&xQG| z`Zd#~0@VzepRqhHzbk-gQq)e)|BCfej`=rxefS(HAAN3(_S`EjSB2Li=h-S>P3>5(qCC=X zYR?+Mmwr>bYy@BWP3_VVd>{J4+9f0S(r;=PkKjwcsjU}&SW_o7|6HtDB=K~-%V>wu z4Mx`*Z83VW(bYzqgog1Rp;5nm)^9_s^?|MmFku=UrfvExVgR~=@(sHr7y7zRit^Q7T`xuXYOk%ECQ@tE-eV=+H*`}8dod^>MD?k? z$Bf{sy)#DeeQ-j5`Ut+-TRnoW_D&OiIMEkc@Q0}q?;F}x!rr4LeWbl_8^Kq5r;OnH z;Do-;H==sfUYnmq{7~(+b@@a}MeA25V0}YZm$297V-bB?zcwF>@YPdv%@`<*U7_IGo5>)LtE6`i8P4?A3W)RG-?b^Smfu?bZ2SlBc_N)E{k{-=h^(TZMPCP?sLI0T&PYvll zLi9e>ceDyGK zuhaDDJU&`a>VF$c=<6_jZAJRpP2bWI_SyPs#CW^a^ld59*K7K8-aOL2&5|DFYy0+? zzV0G@q3LTZVc)hA`m(|gC%#u`!T&Ckcxp)3AyGTD{@x?$QNG&uZqs*Zk-mP@x4MLV z*ObtAwduRENZ(bauc?H-D@y3wVfrpF(zo68wUp3zT?u`=Oy38K^j&NEbe$8eC#}B^ zl+gEn)3>uo-}_A8h7$IDxP-o&Oy7+~`aWd(bX_*mz8gyDyWaE-6zSV-`npQkH&jC3 zpzyerpip8z`14mih%0KW{i(B~z;ug2^59s_poNDDCU@t-aPPOt6==?sM zYULl$d41vhN$2z7R4f00&f`OS{~;LA`Fl9k%0Hk3bvV_^KcMsVaH^GmfZoLXt^5Pp z&hA`4xNe@L=l5L)=1Sb{vlh;u-XZ*8V2;W|Jxk!uN#5xB)4ec<7FKy$Pkm}0_K(yr zkJe&&{WbfvAMTNC4|uM{8gS>rZojs@=4rX#?H{+-=>C`6@2>Ffk^bWLxl8S9G~52l zy=uk*_CNH%rN8D0$(QA1(bDnfG_G#orEh*#;^~|JYV;+eFB*M8Xs|{z3{qM_uAN-_ zRFxg({TPoFo}aNkVdoe&OpsE)b@IU)^;>seutxoy=Oy%zAg%o%f8M0s%xh40!+xgP z!}9o$zZd0T54acImY!JQ={ORsQGcgi`bUsbe|LToq_uzK&&yKUe`qJ~bmu(5TJ`(< zIaFE=X*dx=2th{sN&Xxvt^Fu(|4Nfo>57nzs2eq$)Z1D`h2D7@#YWqzF4}}{Nchv z{bwP{dZ)g0{MX_q(S62WA7KC7`AL;`nmE)j>}Ck2`Xur`h165^(Gt(MNA*-qM<=0Qmh zQrht@IsA_jcj@C$ovhKITKjR;n9rH~NkT6<{CT0{to_}672e&#uPW1i9?Y=%uD1Fv zDtFZw?YI54_vyH3_0)f1PUA)O)nEIt@cTZl>3w@O?%sncq$_{+c|Q_+`i4~h?5Zd7 zc1!u(^LvKYf4-fq9;;kCPqleKwayPX9!dM@@CAQ$?Pv3eYMo~k%QeHV83H#isiybq zdkg#PQ+|-P=W}X~%E4%nQ+M*FR5^dOd}mm`MSeY{%JQw&`VMATzD07ERDD+RsgTo! zLO-jj&~Z3;je2l?P&G#N=i4=<<>-%FyB3wly=Umo%k%AO^_y?kA#2yFTgA@4z1FUC zKPqwhBiq&45mf7QAee6BKhmZC?0HjF?&?J|3TCL^=KVn3-7Aa>>KMTcdoC5#UtwI( z=VLHK2hNfDri#7!_RU!P7U`SPq|cootqtz#p-_)Do|s?c>&^Vby~p6{)!jekL~Wm- zTFaAfr|DKMt0z~#?)f~WNhdlkpD8EW_*ZS^oo?kVs^=+9?m3|CcgczCrTi80_AJjs zb$kk{U#lH#T(N%9U+cyRt3T_P{TFIJZk%xAL*K{Ee@>*Ac;4wJ+5A0UzfRx&<8)~~ z%SF%ZmFEg|5`tG_f%I)+|_F^V}_=q{n;`>^)VVpd03;)em76y z^N^wMwkKpfyrk}QiM#yW`5s$mCawy3O6&O~OMZH&`3<9;7$#VM2o`C)s{Caef3@A3 zZJZuXYx;!UO8NX}7zcBX2F>RKzU0%Reld)4F-)jeI{Z$h6P76rK5VpE>69D(Ecv_V z*Maioh&D1`_Hv8^e|Ns>hps+@YMT$DoYWjlxA|b5WP^Cod~ll02T^^}ehXF3y@ytX z_tRLjL0aqg5_=ER<>&Sd+~Lt0lSgIs*6cN!eec{f+Ai zx=Mw-<%Im#%*yMR{8RMO!n!j^AFJu8Z^=JZqjA_P^=acGg!wH|j<7ueJJKX!_S$ zeWM&=XN%Q0l0|>Q^!rL*_j&^PF+|&e`rqjKqRKl7HIM#e`hseoQF-4|$}`pKOS=vF zgVmQS&(vuu4=qxj1rp7#3-jv=yywX*+`6L5;%;64`qvppi@kkAkBaYS%En5)x_DLD zJNBC=xbeDbOr@_@Ig&eL+|*CTFzJQEd# zK>uQWHms7US2fP&cQbS#3ufASq)4uLoTbk|0zxp8T1L}n>yh=^fr5-Z=Wx5^YkoZC zJU5>$k*^Nx-jr3A?$#s4`I_7%tF%7dcs_DHGA#BM)+4`=xSPkhaj~!-x!>eZ5_-wu z&j=m49{H^B^XrkB7*G&KtpBXsRTVlv?!WME@teTvssB>9KH0y%+&ZQ24o%O`6HH$r zUFQ|EgGVI)K6?*l_L1L|xSO{IZ*)B}lcQ{;ovps}elztDNe^b)y1@C3$>sgVtw&~Z zbQn#a@`Lp{j}KCFR1R(~eiKNvupXIFBHvXecck-It?!^XUz5A!$lK+H*IbX3s0Zgi zRpZqD{!6W0^Xn07*P`;c`L$a&_T8cF=e{r2f2qdZyn1%kbz*1V$5sFA^RAV+YgcDS zFyk=ck64d12^;fZS1&dXo~iTQ(&Ivv>YHivcGtdEfBE*Ep$#6)&fYAfB#w54%Jo;bW{43 zo8R}{X8p_Qt5bc2bj@c>jn_^v$+j$?uam zJw^3iQqO5pj@heD73#)S*Pj1<>lK~xcKf@pjVj?H_HY=CR+Ca#x7Az)sjhJ6(Gz+3j$B_inkgG2>;KlNU?RBlkoHL$ckS6>`gz7f@b z=Sx;j`tk5~5zCjC_dh^>k)3_Fza;sLrUzA$zrS@zK}(`4(oSVr`7U4 z3|BDjdCK}`y*oc09?<(Hm|q9>-b84>U*OIK!}!<5u3)NEzZcr?Uj&v<82_rJ>pVP+ ztN#S{UPc)IhNLrJ4)vw;Rlh3a`>#d$s=o!-=>sE-4;SUD{#nTPAxUrju=aCTFHge` z@vrz7@^;+^yC&$gcfWRY{>u7y?dANo^>*{yA+1L~2c3MWcIOkQ>Yy_3${!cnf&GE{ z$(Q@AJ^J>l{;Jp+@@7bLtW8daC!{q$_x-hWTZ@#Vzs>x#&Fs3JL;9#&w(sP_hk^5Aij?YoP>ibW%E3Uu0QqZU4c(BdZ zA=e-yguXj<{Nndd+5XLC$79Eh^SBF`obRXo_Wi5=_y*D6AGh`{8kd}33?HD~XF+eY z-C17uyD)AX;Pa8?bn~_ea}hys=k{T(`aw`_{jgU3A+YoRuvYydu=DG%R{g`hM-bMk zp9Iw`HR`)o{l&eH5Z0>S1lwsH)75_hJBJNx_4yXqd2v{)?HE++{2;7-Lh327bLX&D z+c&7zb#7RzejZfod@!t4e-Em)eZpGx(_otAU!(pSOu~Yb_SC4~29vD(HR`{?Bpd(i zJ*{98M_bCPKL_@`|FA~=I+$eTuTlRFCaGV9_MTR7Ia`7EtG@@6bbS=osQ(3%bbl(W zQNIf&S@~`4yni<|pnsMGq~WpQP^8`fa^K=dscG$QPKem5cel{PL@>u7VC^h3`Y&h)-Ro zqNpCXwVwkB+jM>yU3XDW@%x2rFY0mM^JKgNSJl`id}92Gyr)Ocbno@K^SdCuK;+!} zBS9)5arfR{uy&!QqtI-JAXTSvWXhiAe)pllwVQTd`980-!(u(Zbo(j2NS}lI!o~W& zPn17P__`j7@_pemH9PC%l3+8Hj^iC#EX_r-d_WBJ3ex}FM;(Q{Tl?}UGh zuG8GQC0J7rY3yHGdXQcuad)m7q!(*C+DhtWt!~t;hka5nCrYyCe)ocWA1wE~AE)o& zU4`dWc!b^`9-(@}BQ$^aJfXk)kPnYgJEDB4haVT5{44jt8vy3i&oK#sKCC3_OJB#wJJYe*BNYApXuZEIiDV{&-wJuT_Wf9f6_az&+CIY^!2Eo{P(nV z9+-~5+xT0J?l77)`hKHV8@;4Jn3)|~cH^;fR3WS^W`5dsrAcULke1`*+#P#_cuwzPGpU=TMEm)Y*=X0Rl z-wQKZZa&BG=egfCnXq1N_kt~^C!_fXTP&Xpr7_+ep)|}WKiJ~(QTu`|W@iQuKFV*g za%9-cuwQb7&95!Lry(Z&aLox7=6-qgVu!5ro>5)clysWVC(asaX3f+(h?RR^^ zio;A^$FI=NF~f>uj6cKZbfeWqX*KhkYScG+w9#2cX9`Wtv3k?$_j*BPoXhCBvU}c< zFZG$x@jI;0=S?rP$T(M_<9uq4+8e!3^&KzyMfs|4w%McS2L*k{2|v)+6y$ev=s{tV zuly6#-zvNYiL<><2AVx}i^SOvXxH!@iFy+nl?G#Vp6c2&v~mXaJ4d0FFBog{)r^)a zu-`=rGg|Iotj$+5>IcDCo3CcnAA+%JUzpMM55}sWh8gvPU@ROEAhxZjlu?S1)R ztn+Vut_EYB|Er$_V_pBz1z<4N^)L0CKt>7rfwo65$MQEnVn3y2`TA+0HRxCN`&MB_ z{i|T-F_!M^P=71fseV|nL;bH{r}|}ZverkKQGX1^qTvw2j4!m%4(b=Ky~2$8PoW*u zUkdGH{*rG8?T3YS(*9X!2lfBJ&PT$ES(1P2NkY7*QT++PUQqhgD$p}Q3;u@+MBqG= zIo&HUK5w<2Ts*9pCn;gYT%+ocVTJCWhZS0`u)_5NEk{_P%*`d#Gv;OO5f#+BKY^6Mwfaq2DeU?LKte1KbzR&<@}KX7y|5 zha4XnN}ZE5$8M2dVCuL1%>FjrKXCj0{cZC(O~5(89HX<0`bH-kooKYus7x<#pZmQ; z{hnQa+jFm|zI{fYF#4#`hmGcp-e>e~qjwq|GAqKc+jGX(eXMpnA8xn(|BBN1UxikcEtdMODytX2Ev}!| zNj&vIdma9;&-CERwfijOoNss7&GNeOdBRjN3(wJU0`mL$ z`SSv|pI>3=cAk+xFR=5B{CR=;L*cx@&NK4o1?nF`w>olIW9J$9^MYrkUJB<0wx6Fr zFL37>+ONX=d4{dO^5+@0{>quL|cV+P~a*mnl0|=iOypI`1xX^X~UZ0i0i_OPu?S^yAMz#j=c;6n?KWzPE za$~-EJGWUq+BjDzm)1w?<*KK`(+T+-wX^?q z*vF9}Y9H4@Z`8gK>x^1$myzvf?T%t6kPpZ8`OL3I+qJ@*ju`xANQj)@asJpkX>|W} z?eX`Nvldynat793VO-~-!8SA$LWOwEe~&%P@wMy;mrH zzYgQ_`?o&i)3sev?sTor!_wnBG;Zf4!@D7b(2M(I{!qJ9>nXyQai;c~5q$c8SbMeb z!<(-X%J&>t59!*i67Soq>zc6kN{P2$+GBKw(A2apW{>}y^KOZ!?LCaNy(bpN2ZSHS zuQR&K=(R>aX!HX{-*5DNLeuvDU&8p!#{aOk-I^btE3*LIxn+g@-nZ*t!M3R)$n!7ycW}8c z@%;Buw@=e__&NJUaCx=Hp_l!~o&V;~&$rLW%Zt8kI!@=$&vzW7=@@%N-?o_=NB*L3 zfkgK^*OqaAX^C_5rCQqPc{j@&E-i6xzLdG3K9D~{FDNWz1<6T=)NM41+`stADYL2%*Xvd2iHF2`{djFq4>F(I;;z+*vE0{*GW{~D-pXV z&l84w|FIW^mU2#>JAyCenLJ1M;Z3@a>*nvgPt$E5ed}=|*EeM2=43m6(&~`>sVSVS z<#qEt>X|QmEq9df3t#Ii$|onBto0h@YdO>}qkJv5`e!&x8<_JDv458Od2v54?yv5> z81`@6D)T4OV!1-U)_siV{~KEfc^BB9X-V<<^KgqwxP4K#zV5Sntg`>J(r53ZRmC)) z$?&ge2u{%0D)3Ro*yrOX>_#QM9R!{@8Tb}*fm z9wS7d@8wc&1yWr7728|7 ze0Pam6`t-_P!EIqenr3ij@-?oXuJe}mxXzc{{|jU} z=b)T!zWaLHgZFWqEWR&wyS7vGx#8NA%#ef&a+u=KB-FQtNvfeSLg4Ae6?S={jxBw>xn|X+K&qP>UyBH zSNAbpy>Nd*{YKXvwCg_FRn#7%^$!kh&L3M}qaTgd?%Xl0LPr_(`ySwnzu3Gj-_K&F z$@e|_?wBKfNP8KYV?UF5huUpD%$Msr4WUFF{U4clxT*7h|^|Ekd+7=6s>_lI_Kl9C^!tZ#pZi|IYmTcnPP=i~ zjgzkbxc8|`_Ll=ZFQemN>F;y=yVPIl?{oZE()pZ8pj_<89111RrIg0m zFFAte$1jv)2jcErxitj=1UDWR&QYv=)<`nyN$bJgSK(zO;eSfKxOwI4sSl2v(fT+H zaaXR_TOT9OZT>!gt&sm-t@HW+3I6qu)T5pY>H7-qcLCk|3hwvCD?EK)^ZyHegRYeK zn^NTs?R$Rw-9d)%X5F`sj#GDGy-{kt?#`2{DsPqhgDaN`rTKmPLNnvP+N{W{owuEgCwZ?L^V)1AH@ zjT*<|SoEDJ(URZ8>Kl4aoD$3!Q~g~{dxN@ka)rVi}ufN+r=6$Y5%+S*SGx=)t9$F zxxc<0EgFBr_Ma^G1()jsiTn4`-rG{bFW8?}{rUP|tMQWdYrE$4ZP({o!GG58uW!c& zjlW_0-zN9D{T${86Z7wRxcPG59`!f(eBrt5e8`1!)o-KxIl@=}j`F2n&(-#h@_mul z{t@NVFT=Uo&!T+xzi_Vh$0(om9?sQ%98T5#%XtT?*yDZ|%N1@|icgepj}SQ*8f~0& z^W5V7(_tGgs}A~vDB$yg`os9&JASI3|9i*J-%7py@A#?yR2cvNcl>0Hl=>cV;raEy z<0tDWI*$GSWBlZ($+@EL=Wraw7WdzAZluq_3h$P`QeV|UdTFuKlYkI`|JS#TCP7>C||j^yvMZ)1f-0O~@bZJfHcLlH)z2g;!tAba!`l`(3GY zd#1-<+uhmar@J$$p4Lt4{Y{x(e_cmsuRmk$cyFZK=B~9WS0vNj)8qGaLt#PFC=#Xa z-jwO-?d)#%Gg~s7dO|-hmf6}Gid99risTPud|p?PiqzV*EN-5g?%dq5cGJurzdI9V zy0>Kf5Y?0MTQ~VV8!~=3WY=t7cf7v{mDS~Lw8~x5sPpd8>iYji-mSkX6Q82(IZVxX z{epvn2K=`P9S=x2PWbt#OmF2Zj(qN7Xb;ZIfg{1*)DcW|`FjAR)1ix(bM3;N4ef=V z$JH10C2-Z?LO7JqL6;5~(v)KiGC{5%97XW{~{oO@r(aPh2|fwNNX zE7Fb@TyMt3zP7F6 z+&BW)-M~3egi4-(d_|yFNqfGjxD53q_C0B7lR%#~%-4CIQH(eKb*-&YtsC0GcGA&S zQlu>$pZPp5Y_IZ2&_5Vv9sEVHYk!TQZn;BcuWwee{SQaRtlL)LX}~NOLk;jzpgyiS zhFgH^3=abH=dxLc`*2+dOuJsd)nb_SoB=)>lupoz%LioLUW$wQ=~&x=d2a&vM&N0{ zOMyQQj8Mtl%ug7mk9}IPyj_++{L6^Lf67^(rONOf$k5gnTu%Wrzb;(Q0kh2XkC%Z_ z-|}m%H_R-m!qzHd1Als9dh1hh5iuwagk-&1z0%AYI zV^adZhR&131g;~1nU=-n`d<$CDDbO6S?|XHBOE$IqY}YKjCw7IeH1IU&-nwAVtl4s z*iW8$FyP*DAC&gY#Kk~cj|ENOqOHVPT(p&AL=M*}%oz3^hYwe7z}P#;)v$kl(xW6H zHW#E`@2})`*m=O{FXhbN^>IqNKF&5|yK_wNak(*J5ZJZ34 zqtBMJ%&xwfeh>KUdo0hF40BBThGF{c_YKqbrwlIze$Mb};6E7N0Q^_OTY)Q33hKWa z*95~i0Uv4jPT(nq?*py|rY}5<>o~)Y0-uj-A+9HIodo_O(3fzX2D})Q^XGbCmVG`j z>gTh=dP<~}S@MXaG6ZvbE@7)MW`HzB9{%+-0E&+Z3 z80{nSUo*Myn%onh@DcAB&`Qu>g0f%y0n`WmD`*0=0zz5PLn(}m-b#v$W|8tji1vlk zlqLRu25FqE(AWu65j>PUDgDy zb%wLR^dpufhpQ9(<)Hg;@h74hL7BmOfceL=bF88~1IKwjLm0mX0>qPXeFzw8Js+0^ zM!k=Kk9L+i{vLmqIfLi4_Yx@QL_Th&51$6gxd-=zY4;XT`syxF&Nc1^WjVeN%6|DfQ2OIU zWXk?o1IoF>GEkOv9VqMM3Q*1!_JDGp@DM1+zvn>tJdVLwwkKch=JWkDP#^RH(5axC zL8pP<0LnJMA9OnCk3naEz6^Q{=vz?OnV_eF&H`NxdMs!kD4!2Eg1#N}^Pqe_d=GRs z=&wMJ2jx%t@HsIJbPnjrpnN9qUmEdw5P+Tlx)YRR?d_l^fqo72WYC|0a{PG-bUrA5 zYLw&aEKrVv3qel>y#$p0eJLo%!|OrmV=V-o0a^#T6m$`2 z2DBcu4-~=8`{*(ZbQSs4D*B2GNIZlN_g>s{7U*_R@D4r?cpIDn?Xu%J6K*m}(t<08 zi(w5vaMLxA-vF4f99w~NQ2u`KFCZTl3;@#?vcNYQ9t6G>n7)<+zQgdNz@Ij}5BQ6~ z8zJ)o@V9~KgTQ4!F#J2<9~u5V{w>}!hF=8#Im3Sh{=MNp0sqbLpMfjk^wjwh^fPSY z2P!KL{&v89=pRQL_TcOMsofpmCxB-e&H~Q{W*Kw9Cj(!>dx1{_z6Mx2B?wg8ue9;K zbqz}(bD`noz!w`{3H&a@Brvg@d0Yg(js0GW;RFQ`R^iGLXo#(J0QsVecp1pAHeM?* z+b9xZ6=*yAxxV~-cst}=-tUXt^HJc7aB~9J?S`|!7Gf;ZUEo8dtnFy`C6-9Li4ztF ziLu=Gfe%-bv6S-3LOM~*)dzG+c^-mH%4EI<41P4kc3RBIKMt9+@qPll-te!15h~Z> z<3+<6;Fk=q1AaxZtCzCqdIHG|sHQ@H-c5k1DB1*2W`gltL&|Y5sw{X7|*Y=TElpTm7QxC&#|(1 z8OF1$tiv$+Uaa3Ro@Hg%7{>Fg?4yQ{0>0BQo@-_I8pgA&EN2+cx3cdT_JN-QhS~DG z0g?4Y0`yt%`7CY0^&4Qco%HQL8h;RRh7ebd0HGJxK3p$DW-F+4CR70R_%=rO0VfP! z3fuyGFl1;$7jU)VEbx58yMQ?t+5(wD;AO_Y8~8%QIp9{qj{-B$-{^lm;0MCdk8Z<# z;3LR>0|DgADW5exzhaxy9oRRY1feh=J;jsv2oH}uNKLg*{@FKAFDT=WD|F_?vky$6G$N%U`rS69C#+vw4%`C#-`(%tgM3|pKmOf)?J}nX)afbJzbFbB z83#)Bsi@39(vM==p(sL zFkS@zV)R)q!2SaKK9k_j>)r*Nz(B_DV)5QA@Kj)qSA)RwfIkhK1Fi+84LLjr>r8$h za0BqS!RLbQU50ab5OxB859}=Z%B3csz*zD=VCruH{s=JZAPaoEVYbgbCZ7X-5cvC$ zaf8rffC-f2tKh%Hdx5_T%(8IN_cSirnE?GIE|#kW^hLv2&@p(Za_&0_d>An2H9260 zzwiURx8Y*B5}>nj{TKP@@b3hsP9OLzV2-5;e58PTfLnmOf&U7e1%5B^Pk?j4R{?`~ z2|Sp08*Tyq2rzBP0{@fY9Pk%`{{}u6qhAGnl`_DO0n>&Y@V^7I4)*~+4@`&P;^=R{ z%*zKJhl#`ez=@;0vLg*I0G@7mA@B)?>wp&;UUZaK-e7ns^dy0|LQf;`dc!Tiq2Vsz z?Z8qN;Ol^IhI|fq2>5YeE~GyLO#8WrzTdD9{8iu{uoJ*PFx&$C3t%o9v%tRvW?2S- zUp9Ov@S&Ix(YCvRrvcvsoCAIbu+-I5tZ9MESwGV}ZwYV(a00jq_^)7Rfm?vnycf6? zSo&Qx+SITQ+z%{v0$&MCds=`83~vDbsNt?^uk3S%1K@8P?ymO6{sfrk*jdOw2YfE` z1^%O9ZXT4O5i4mY@S(utfpfq|0>25E8w)cHCxA~foCRKFI0xJWdVu$^Z`ne+X>iyMW(>3LHMjE);ZRjE(o7(OFpk_p%bCXJO27~)di2XU{fk`kqfY`SEDjQL_YgZd+RQRO zt9nHGkH8#P8OredbC_`_KQY38YXlz`!IVdcO@x7`f{##nw-6~~c@B6P_GJo9}FGFyeC`;x@;`JaM67BaJdc^>x#lld+9 z4+AG~y$t*{&>XH;fhAuQXaeRm#C-l8ZJ5s|3$a%s_)B0O3mFU-k3aF9-fAa1QudU=S~XhIM7h0z3P2 zz^+^m1G}<33Tz>ka|n<4S0m;6a)kd7@GZy-^(}j>(K2$sh5aRq>kh~e590a^@K-={ zxGcaWv>ebA4Z{CdaLC8h-i6rL9Rj zIE%|dEJi^D+RT1e1wB6y4weHEY^<35JPo69AjMcpe%J{$+nS25`=9=Lbc#=LC2WFix_`nx{ zW^v5|{&&zEF4j4Hzysx&{~%xj*g|Zd0kF^uG(6(~ibB7d3w^HKrve{o>8Bg^5kC_c zZcsiS_*~#;O=hLxEaDdcZy_IzD(&qDpU<{N;HwO;2HtJB z1^C0juv69+L%=@=9t8h(V1&w_3DG$V$BEB@&v_lgeZYGZdJh7B%v-Z1$w_$2vRTyHj<13t#E2Yb#m%yO-E^1y+U2i^&cHZS*aFW-N2dGVbx z;smZ++`YgbclQF{1FEgA2ZZy0Pcoi_q$-0R8WfFY0eHNGVp+R5^ z((itY2n#nBR~quHQ`*hv%kM!M0`UI;nqWjjjAN69*oy{WO1^*b-YvKfdUyE1(R0nJ9|{P&4B zxpY_e5RV-e(OC)H;ER$<`>A?Hq+L&*ruB?w@lFSRv*}-CcnXV5t!jf;1{9^YwPZdsLtd}g zF$=QKqClgS{Q=0l75UPhhk#vqzY9!xmiKYP^MM(Tf_y@dd`O*Bwj7s4Kw(>j56Upb z%EdCi%`o}2-v?zGKSl-u%SfDM1mU%0E}SXn*wVf!VA{{H!7z&LZ3K4p78p)|-v>Ma zv;}s&*YH~4?S?m?D_&)|75III+kme%+z!0ka0l>*3~vPfh~Z7Zw;JvQ-V01SyKp(5 z$pZhA@pl2Ac&yZCIp!T+sdAkL8CR|az*DVV&H_e|y)wT>^{{ERl|wY#&2?amjG z%kjs1-vn&_;~im`{Heh7dDio6;OW3VFa!FZ%oS_Fp8-C@BH&{TF9)7!copz0!|wt< z*6;>k&JiL(T#V(0Z5%6I5I)O6KK#Z7;XD6iS{HQEuZZ6b?0n`b!z{}#;0ip$D03^Y z%?m5;G>o*edkmwTWe*x22mEEjsGrG?0D~ubTt9+J(Mg*iGv-Oi5Wfj#{md|$r0fO5 z7z4}xWElQh_6jgp~uy8m0{X%fm2Hf$V#3cy>s#0LZlxQ zaO^2ABcmu9yh9FN1IY8dLbEm<^ABk9sYq%#*+;8~-Q37~Z@b9O4lekzXc~0lAa4$mTe+iKf$=AuaK7_3D zZ88s$=W!p%!Ka!HxHEPJaK{O9%vGKPgJX(YOYt2kVjq_~!%hJ6eMa(IaPc`xoW->T zdWZ*c@qHBH94?;I67R#s=NT~#*>3VaFxN@sCxH3>5OE7I-^V-?G>gl^)ip120aTAiG&#UFd3W--#c7XBy)H{M*QeF=s6uSb3j23t~Ysz?U&BA zz(VNW3ps>?mzW&LesafRVnRL3N~DLDes+oUyDa_OqI8Zy^!M*udIRtawenn(y@2RD zsqTCy3qxOlOb=)dSLLZT?f`QhLOzXk`#cHYpF#%ixdYjFe*!*&G6-d-$ynq0zy}*n z08a)+HG3_%js`voG>gl9XDSC=13uiAd6k`F7>nAnrG~NAC_CRU))-}NhH+M19su({ z;?TDD0MniAZgp zt}Tco9Gtd$TzT0aQb?fBv&>&zPciS{4CEHJ$~o+g4KUcN^XZJP5q23>hrO9uqL@IS2eHVD39S3jCnq zCl-6LZyEm4V(l-Kd1|p&@i_P_*FMNU3w#}Q!mnR2`IaSK?5~E`Eb+u_(A;SEc?@{0|3Kqg~&BXD4N6Yy5Uoxtxg+y#4f8r}+>yA59onOh9^ z1AoHsyMZ4t{2t(M8h$VElZGz?{!d_*Jq!JRGCT-8ro(&&_%Op_?gc*4 z@F4JMhI7D64etY9VVGY|z7Ti-_3Q($aWcT0oXlBx2h6Y!ywh+3_`|?#>lWZWz--4Z z;7=Le3j9Tr&jLSeco6t8!#Us|0p9={_5nZRWX{I>LQV$w&xZE_mv7Ya^2=3+8}@;x z8cqPuG28-ts^Ki~GQ)$w=NZlcUu1Y6@CL&?yxnNn2krx2k1{5JuQ8kjz8P5hBk(5- z?*o3oFu&CGWy3!3BZd>eKQi0`{4>K@;NKb^1pW)~ji{d-@Yqd;8?oO2%&{=H0_!}( z-b%~~fVnr90B$sX7Wfk2k5LbB2k@=H{9%cHhnw*ZjLB2+O~5hg0lpoWWo!ZdoZ&3+ zmkbXAKVmos`~)!X-3R_5BV2hj%@N-;LjT# z1Re(d6;$Sc9|qpbdb<#3dcYs&y}sP? z3EWCPLN)MvfV0PA9R$3M_(aVAfG-Em0XG37dkA3M2L1_P0=U<3?iBbW@R8)#qE8!c z0lw967WmVK2Z3|I+o>P;5#TF;`Q@=EfUg2h0RIg5T(Gmie*nIQ^1$T*e1-Cu*GvND z9652CSK%A3Kg}yU(ePs6C5D#(Uuc+fsCL7Rz*`Nk2EM{@3-FDGHvr!bJOO!i0pAP! z0oafQ&H;arHlL2OYhe074*180_W?f-d@XH01Mdt1W39Y{=lGmk>;#)(Y&XuDC^HKa z6yR$C6Tp*!+re)EJ{nkT0ImVv2!0m$1mG=@&jFudGLHhgebIfui@~1+ndg9)8-4+} z+3-ui?=p-p7M86uTn4z~{9v5`9|GJ7oCCHH!#U+-Q*b8u zw9m)22sl7m0(dDfWwN-=17?3v3}4M}{F}cv}mYeZXNM-5Z1IB)=d09^+pH+-vxK!0$GE zE%3q6GY9gw0JAI%cNq48KVvunoCD@~(Sqv{;7OoaT#p;R8u(emyMTXZnDze(Fz+42 z^(GVwYa*$uBY-#49$YhkWjq2tl`^oc5=l#ew@?P50T@Bvl{_D~9DfNgqUCja_pCR! zcRvVx3FNne=5VF-UfF|d2krxpApquYcQEt<(+3zX1MW9IsFc@~qcYo(*I4jdkRB~pevckE<@fH9!A`R?3+&1`2z)*Gtj`?q zEx@SD@*Luy2EGTB;eKH8Rmgk`7=NA*%y(<90!{!w1txI|@H4=CR%L;IZ#V}$W;5!D z^#cdA5PK8&hk)jmLij<)?J3*4}3o`>tq*XxQ|499ky7$YW$lJ z|E|do0zYAx{_s;^w3m#XzXbk})z$OBX{56v`OXjDiAdn$^Ya$aEUv#ohI!?HW4($W z20qB}qrit5eggO{z^s2C_b{&uK^c7LXI?o*G{im(c?OU}ZZ`Z8&|_7H{1(MKff>#Q zH4#Q*T=Q6nF~f^2UzF2p19t81{LpvOHfnto_p{v#YShOZpJ@#6i*oiO=iB7DJw_k6 z2YJ!|6Sx?hd|$+d1Ie?lX!B(e`D+dPRHz|#TfxxrZ^liI??>>|2tGK14=>ys`zPS) zKDoQ{lfcIT(>LaT<@W2CdJ6bQfhKTqu7qhYecM|IKIi;7TrJ?Ezepu61D`US?=%|r zfiHA=fEiFfvWK}2{5!$p836UQA^r)(H-RD?{3(-1JIEM(|9e!<$@46OdAs|*hIHO{ zm)!?E{+_b(=QSW*0_qWH|B_^Cy4(FS`S zEHi}e!?9w8$vpr*lPzQmm>%&JCE{^9PW7JLEoXa=$x(9OppaF}Xh+K<)_0{fEi@@c?osLGERf`?JX% zwnKcCQ$6orjYsk|!}N)7MDQaK{M`utegyw8f}f1wA4l-M2>y=<{&@r+0o&lCVjJyX z;PdhrLFx~)4f~)h9|QdaTSXFld@fTc{G*`raMSr2%c~sepEFGI`3Sx@g71sq`y==Z zhGV$*ixK{pA~m9202gFM$_=PRI3Y;B}z%%ijWD4NBj80r)mh`q%G)e*j7! zdJ*_lQ2NQAfRDsQUw8?0HZIo3bd(oR>hK`|29}rSoe1pu-f@sY5PWI`H%4$eg8BVw zC*Kv}^Y_0T|I!HmiU{UAmriD9gufeeILE&w!oMSexu@x5J{#db62S?-AoEy+|KkY$ zbp*d0!DFr{+ycOnN+0<8k+=Z-Uh<}psGzJh zy|F7P<_Jo6uFZI@>yoXT)@HVPo9CVk^ZB`XQ`owGQ)aCnZlK1^^U0rIV5c{vy1mUO z%p)70d2v>N6lY@w1x;JoEP{gRFG7djn!X;?LfDgnZ5ul`btZdSH)fJOp_kdTc7MjY zj?{XvySq1aqG0JRudBT$$zPO4C2k6nsK(B2t2+Edv}ZruoYWtBMy+lwVtJcSK9N@A zGjDZACw{1z9|h&GwIc-^*+w+LBCxn8<07;onNDpEGZ9m8Bbt)po6>z=d#2CZlIqyZ z*6%V^srBo-GwUI+*4key#TA96avl-Eth~HOm`LcY@912U>PX_Zr8o6tx>4P&9iT~M zEAp9jty_~FnN416R}w#~opR}75+vYAjcDJMYzG&q-?Y}jxem;ua}^{lLP>*j9hm3Q z-&7$HWk~1B=RA*I*TmjHP)F}erF-bxJ?XB+Qqy#3=^fz0#AVButf*hP(yLc;?t<0H zMax#y*EKC`Sni5rDy;At!>tSI>gpStycP9lLEPh=i zwmiA`oCQm~l@}OTv9d`yjVqTbSWtJig60)V6x7$%E2vwwLIH2j%TZ}QLVfv&Sq=a2 z`1|G=b=_olsyErbt~--ShV5(6EqXjq{HA!f*U_2AFQlW&@QdmpNK%{DZpHx92T#dl zHg@%>UwB<`NZc&F23{sNtvq{Sa(P2ja?y&0#>V1BEw68~Hl__D?rUgTTEAjx!%8h{)XZfi6oS8O`O-xzMyNuuUU6=+vEiI$br+0CgZ||WjVl_O8b%~p-K?lPe_`|D z#UqmCrsTQxBP*4)eeR-@(C&*HnwO7kV@q#dUcVYG-84!&o5bP;%g$+DQ9mMUvuq)p z`Q`6QF76OrUj!Xo?pM>K=&?gIJbVq zl92|}q7{qlPB~%T3CYFTrsLB`Yxs*N^O#%%r)lq49#`;sqG0MpWja z`ty^cjez8%2^NhUyI8Hs6)40AUCYrAM~+L-o@|1X)s2vET7VHl9@?WOHLWg@xUBBn zQUkhm=FuxqhWiDJ7OkjXv7%wcNXM1*l5Xvubqkk`Ecqh%`r;LwRTK@1WXePK++@?T zbL-JzMi*h9Uc6xBg9!5)Pp0FkZp>2)QwEt2bai4z{ti6+-%&_7{`hy`Z%x;_cjTwi z$8X@$#m~#k3VXUYr}ZDMj5;ClYna=ye!u4$+1|FRVkUQXQC*d0kzc z!S|&*bV|Dg^|lp@fDkja(CbOHc5rPYw~9rW>}}4lTprdBn7K!UyVq>u#JB_hn#Sws zNJ?ZA(mAc$;;q#y8K%2aJ?RbmuVTFJH7cJ>0(GUjF>%B!u%ok^Dc!B>H}nv6WY$47 z^e0&qWI`zUgzd6c$gp&-CVH^y*@*QerjQ#2CQ+hxU9owgET{NcSVeI*-4kNgkJ?Oa zN@EG>VHS((!Ni^jmSomp?SZnWqd|a%upW|0dL>t=E_)aEup;g3&Lr2a^ERzZihI-6 zaP!8EXp_#)&SYnMs?X~##`0QZ3aL zZpNaG(`tz&=_G0B_V{6Us%=|3Qk&qS_%A-FTXUA~%tnzzw4)O>Dv@+2`a`A%Gx%O* zbYNAA^*rjj3%-D!f~MKDuC;rkCu`ufnU2;inQkoIuo%YT1T}^7r8`n#*ovbQFj75O zsblqs7esJ>n3guv)qK~Mko~4B)7{gD&b$e$K(}7h^}f_THXTq`s6J7hQB@)LAJ%tw zs*#&InF%^TnBJId?M!awzb0blx<}B}-P+mRiagkit*z@ksch*2$#mCdFU+Jjrn=!p zVgg#pb|AEUjqO(H`!U%)jWOc3q8lQj(niT-Z@#JEp;*ki)?iPT27`qPq*fI6q+8){ zYckmI*p%vOcackz&|CYg(IR5AN>l+*nk%7H#cPwKf3BiVRChxVbFNqfkAb zNg-ZR>D<_wMyf5~ISSU!k#W#Q;IK2@?$?Cv{^>p{{P?h)Bjt%Ak=e9)Bbm{49sao# zmv@lXp;4o!iabYYOFqLdehP6dm|xzxvD-)3Jl{u+_GF-Pj_M(oy25tK(Lhup3J!ON z8^bS>-CL9F-FcKm7nBXA0w>+s+1{F=2*%EKQr**`M0yJx67TN-%l`*U%0MmZGU*Ju zNP)2i9lN7*J=4~8cK4>bG2qB(Og;L4wy>X+*+>Blr5I|{g!$H%-0(0F1+sTS)YaM1 zip?7!_SJMMy+JJPgrb7_aMS<8+Pgr>b)9v-yCt>UN~EMD%TCKy;$z2;wk*jdsU@`? z*(uwy(}@+kY%58}m{hg8Tk1wFb-5o_I}t<8!sLoshN)qhg@$FgHG_v}7!!)QW?f~L zS=1depkbMzU_3Jd47Uah5nyf#;sgbpaR2|kzprYaud5{)*jlYRzy0m^`*rpi7mFvT z@59J>cEMvi6!UXW%Nheks5w#S*`2Tj@wU83+6DF6HWLin=olLg=E(w9(2f=m4z?FF zeCER}AEH`{4i43^V-vlGj}4b2#WB_LT^dZz&MY99Y!hkk$Q+jExMU;GwCFo7u#aYF zHbL`?q2&#B&_InInnrU2$3GaQ1T6=WLb;P=ALB%nx>;|z|;5=h}CKy7fed3`) zv~$?^zjS7Pj%$l}_&mBc|1dunmMWn)H@PrBK6AoXMvPp>j%sk}PGKBfEfNVs9}Y9< z)}q&+o#0AcsB=CgMStNq^2CRPIT&KWTng(>^loAdy@c-*3S&oRWvvkcRE#lSIeim? zTpvZ&&__Qe;{eZxJX8z+Jv<6Sr4OMYOIEO9@OEX;z~|Ws1jL6LLp~bC=y7g-27?#x z&Ft8M!&)iF3Mhfz01|ws40RBhPNOweI6Qi6YMS9e$Lp7Q`Q$zDdTp;fO_iS6Gjm*w zqj%znX3Q_rjTH`IHFg}S{0!PXZc(7M;|QDP|`+i^b5^v48@ zeT@g%ELnb@JP;{pl=9XqK@<9_)U;_KK zeRQ{be1QGK0v7W*kdS;OdRK!ZD9`89SMZF zOL+8i0}PQ7r~(GmoPvqxTwr&{XWRR5^o{#*7}=v7a-i!Y5oU%Qj3RxEj&})J>*t_h z{`54bnX1FW`R!KX8qVL!!&pJdB zaSY=p?$n|FPE2DQkM3+5-4$A1{+t+{K8pG>{d8~)&C%3xG|}_HJG>~Mt(!L0J{Zqn zIOIL&h<_0qC%$SpHk*s%Gj8^T&yx92WZLv}cnS$a6Amrkob!fJbP9#((0=;x2Ny%G zPArzNvx8l#h2vq<#|OfXPoBXCH9WN=CoPe06~}!$;{Ol%>~}R$3D{rxjA!pVwTOHu zpzOAv=fa*Db~r=&vDXUT&{<5QkiDaWZgdZu69k;YlnD$Ir^a{fgvbYIdg}0m?;ghv zqbtJ*p}@^cMC;hpJW4M{oDqAu?B_)$QaauY{Kxu$PY!mUGsnm0f@9qT-UhsSdi9uvyGzCoBVTya69L+w?=@;B?_IzInWWa)z>WOQ}X6Bv3f3p(^Dn3T+_aJp;aDdZDI2l|HOLt)o>6h$wGElpAc{irUx9dTI zFj@5rb6g?_ek@%GoEneU0jI{f49L)6Y>|W#!%64X#CP1C`6uU|DbHXiIe9WMdOJ8f z?W}b!%uh|vVVyI3w-5B-UhkDnLL9tkCm^1gf-(^YVMz#y$py=8_p5{g6tlZQjR;XmOGSBWG6ITu_bSc)& z&rf5e9}{ObLJP+*)@3g?hvd8q)i9Kj%S3@iHY~e`s32)?ohohjbB`!mL4y$sZil7$ zW!(-DT*htRkH>C%jo@~ugmia{&gfCR5Fn*a^JHu4@+IV%Jy%{vIE{npaAs*5@4J^q zpXCCsPk64mQ#grZKA|(u-o1w@eWj_*0uz#QJLfG6+jY9KHNi#_52S=z8l1(--{mr~ zQu!)ciXoBm~-_lBh?l7)3z>tTbIo<8(lg%V=M+zJ* zbedDpa=C;aD79uxZ-Q|QzkJi^8_Kzfr*U?O9rn{0G|%Cz5?9hN&~tOo;%p3>(h_z7 zV8SiT9&uPWbYpl&Hq61ORVdBR`at_pWU5tzZ`*@c(lGdu=Mp0fh8RGz66Ti!x5Bb? zSt8SRnuAri;oS;}PYIhAk@$dgB%O57%z}9PhbbuPl<5EHY^(-n_pNq zIVQJXIvpmrcM>3MIbpCShYnacG30Nb6-CnFIV4tm1Q|ya^uVoe7GaK!0ng)3V-pH< zFD$1V8@RP?N{;2C^B$KvK(E{4;K+^lB#+>fiv|YAi4&~E`0aMihsBeyNsfho?3fRz zW9cxV%0c0vHlu!w=wy;bp_`c&-4b0YrXSeQqa19>cL!c>#dwiCP<<7 zArOWop{GKuZyA5;_)S~slz2r)(z$?H2I_Wv zeq5r}E#13WUz(VAAT>i?T-tm{SeVw7y%I}uh`e)d9Kj8<>33mUXT$#YUD)$;coqfD z! z9L`}JDJ*6&-ksyJ7+#PFO)j=-#wVr<^Ye#haqRvW_BBVJL0>nIu`rruECF)Omh0zS zy_}kTnum|Da?e#?E|3Pt{ge9Sy_>_k7dYnR%a7wCm?qLu3`!48xbV|8&3U+tY5On| z_x=alsT*u`67zDY8zx@-e+Q?)A?@{4hd+;lbzFa@rG4T6=T>w%&F?vK{|kq8W{|p= z0W&4AKFFuG=AO3Z9@XUdQgr>JcL^`GjAA;014*2ubCZ0Inc^RKtSg+F3PYgD6LN;i z=P34TxovhR9I6T7Esc%xxE5X|L5K+wk*K&r7sZry1q;Vq_?A^=nX+EFuU7uyn8&*f99 zGU&O$dy^gP9R^s}*(sJV(;J^T##e|2ZclDW^Z?=Q?al)^9cGr#%uJ0ZTbS@@!Uh7} zKETH7cElSCVUYMkgrOVk7{{vi`0`dL4Q(O1g6;6)9;}Cm&eD-D%*3N1ppKGR=P})y zE`-HAu8Sc-c<>M&V2y=F*#z*E@O_W(XX+qoVaX=7ZW;O$*{=v8TW*l^t|otsUOWM0 zfc?+w)tpEd84bLRC>evI^Cl>$Kp#< zECR^5LJ2#oewZJ8_IZi=)b^gWY{|NEDtWvKj=4~LkNJskG!6?X=#3x+SW^*$P6&fI z^r$BpwLOEkccjU_eaWt!k_i%KoUc19Z|GppqqHy|!hNgM#qc_s%(ap4^cn42SaKuW z?qh~J9?stRq)*j*Ig@vpY93C_Bh}}}7PYBJxrMVmQ6oav+HJu|0q=Jl3|6~@BG{B* z%uaHH(l4ya8+90MO=0g5&676xFjMk17Y7|#(@~yy++sdzx|Ve#>UX7YxRj6Wo_>vgH?Oq)z@v{p=wBTekeq8s7jR}Oj?uCi>R&j)dpw`~I9^6O z_`q<;-6hR@IDQj1^g)MheN@(2o7oETAop=>r?Jn+DR*g`99oCe^E?h>Knpd0W~}|_ zTi{4v&Kb+wqe48LR~YrDdk>8snwsW8E16sSL7E&Fik|(51pP}`Ks)AFvr%-?Nq-Fe zlCgsb!d6)9k3_H<_7N}XK$LarqioCx-kWg6+gC8Wg~qy)@*_Q=X0i0cN{mMF8i3|W za4HpW^*6QsxHP1ltPu;LFGw3i1>Ksk`w2yZo2Trg{AQm`KW~;2HH=+L8t&`{E=h84 zr6U)MCaSmkv4eCOc#anE!r(&oAVaA97G&h74oJ9%%Ft-d35$B&LzFfnq0)_kun`;A zFB{(FcVas0sBvJ8cj6zLiYs?H-s8Ev+nN}5jk9}lSX0Yn6J|f{o*mCoHi5dwe2L4; zI33o?r3err$BFOGb*@8peAW#F#Qo!W{}NQ;i3bkmLxHy$fF5Mk~;=<60bb3kqvI)Ys$3DF$gr?gn?v*X#s;SNrrcK_<$LyatjTx~JEmk)_ z8AUh7iyHpntE8AOMOB!?ob=)l=W_8u^pC&;vD&0~i3 zZq@+l1Vc&B_xUapbyVPV}=a9uRtZn7=w3kFAN-9|FY*kb0~aYrZhFB_RFSW;(_`Lw)8 z&V8i=8s@gga4>60Vt36W&YC`z#~bAj;&20B^?&p;PlNztt>{?{lCe96v&Y<{#>tfd zGb`DO631C-dp_&ir}pG{>4F&LbKA!pb3AFx@e(Tf{nY8j28CHy=NJ56Xb11GYnV6NOMV1JJ5j&R{HSoAe*jvZm8XWP@7`L|# zbPg}14EW&b0J3ek(;e#=Bu+wEVb`$}e%TTk-P^rme8Y2GcKVq24D*e&fS?WN+90`o z*l7#s_T=97ikx4B$=)577U@Ko48Rm3jbkLq7G1O9Vi(-m`4MKy{zsljxObf3T89L$ zKO{Soun#nIm@XWdpG-#a(lqkSzpindizIZNit7$c!*EKJar29pQG~FRW)?KzF`VsG z`KY?G*l>LB4=edVgyP=u0;^Ae_V5kQTzlG=?tJ`FOJ2JpB{7`pgW1$#p$|VM6m;BA z;^}+GXlek!ozQu8&J=7%hv*3F8jBcw4Q_@wc0^i2V4(YG%_K@#!n>6@RCp}hzvI_i z+0!O>ZlNy;4h76Un{2t$hk_wl(%W|YZy)%R6FQvZq>gOSKGh}MXdl*+nzXDNbcb2X zR9&*PqhUf$GCwgLe%i;Z+mMP5sUxZNoGy;Qxp}zB`EO@8mkvmk(*ZtySy+wkB`P8< z#1)a;sfeH@6_LJDg-X~dTQIxrUTB)(C5>&<`k4wBUFOw57zlmxiH~XP5*qephq(i3 z`R$s3mpL4e9`>dOG~4bCE1+}9rMHtWv5Cy`U>(H*H(z&%u1oW)jv7xUp|#RwY>XBs z>VZ1-sGId}KC<=i>VWmTy{t|P9y*>`v%J|2#f&ETcnO2P3h>Uiuv_D8_-5&f?1V6h zN4_CK+jm7aWlc^43A>_mMYb{2IX(lstcd=(W9!{-z+b@-nwiNW)NPzh*)pr(!}mQg z+#b2L?*?lxgwrM!UB2{4o2FQi@E&q>;G;dfW|ixHz=IaexDd(KwN5Tsq`nn{(x|PV zQw-^)R=;V6NG9*Y&0|jGLzeg#v$2okN465|?a6)| z+rmqzVTyc{3D}j-_vpK-5EB^~>NdO;>Vg$_g~9>3baKaOi?>DbmL6I`b{I>OPbQ7& zEW5;rE*=%?=E7)ZiTk z@!Q1nbOO^n6PZI*Tv@{lZo=8Y%ZH`yjkXPDSda=zYI@MwG50m1y($oGmoK{n&~>vs z7#H)K+QAa%Zv)LABmO>V#j37AXQ`h3s&fe=! zm_fx4FGBL@ZFn;RZ#LYKYzcGbt;v?oWI)FKAyV8Z#Z6xG$(GrLX@5p#8FBZ)y*PsM zY&Xv7T%15mRO)TJH(WByWihV0%AKQfuWWZ4ohj{db6RFha6LB5DXe4esM3X%?h|8c znzk5h2{)RpJkE|i!Ks8_)bq!6eUwD>AkRzrHR^V7M6x4TIC;`(P~zVZaxuDd$%0{Q zw3Cdf;K0|^s?$>cmzwAr6#8X?V{9&$O2HQQWe(zi{uM@|ZCpGd`*VNYe9?43=ejUG zA1~;tguk5>qGJ#6%>cXH9UEW9N*o^ULR~66bYl51eSb1BJ%=-a?ed?>I0mh2Il5Gs zmvIYwE~!G`htSbdB;={AMRDyGExIm69=+^xMP5W*io8g!q{xfWD=qS=;7ATZuf4b$ zV%PS6sR_rd>ZVdQcd`;|Gs=bAZmF~+TT_8cXRC;qR#%p-DYqnBp%)@5+1loIwsySf z7T_pb8K-W|81=hkD7CL7H)-xlv$7Kelh#p7-VUW0LB-LmcHJ_OdLbi2h%f2#4$b6u z=jxKOkq&QLtgd#;i6up+lNjl8L;Wi$G*VkT&RlVk5g%?xES62u0UNNA=?JMk0;mS(7Yu$J|PP0wY$k*=nFvK^Vj@m~82 z1lHS5=A_cXM@t>Cb{N!j&+A?B;{z;cFB$(`!O)qddfA~f{iDK&8<{RcS6s}@Baxj$ zufYr^L*a`O@BRqta;tIH!EVXXb@{+r4Ex0zPX038WMGae5kCLGrmdrQ`N2A>jGw** z-zLUUzp>;OT+nx@eI;Ovaiy~W22O|V!A`eWhYXpW_&)68@)I6Qr`^HC=-Ai<-puhP z=5f)jzI*QI4bxzLk`t7p3?Uz}^{(DUt+p|Q7w0c;2z$>ol)Zb%`(<2P-ib?wU>R@Q ze%;m0FTC5oj6J;Czl;QT0EE!*2uHz9@Nqo=#ob@OGLv&QncWTQP8l!v;px$?$I>-` z%&Z$b>&0#DOPpo(XGpp8i;Hz+PAv-`1Gq*6C;evTl6&t>_CJ9y4?Hpa@rMrHo?u&) z^vV04zzaYqBEF2Yr6zscj*z_~2VJ7IY(+7WC| z%YEMpdKDioQJjKBe^~@C>s!EWdN_vSZ|S2wUSQ?lY4H|G!F68pERTGtfuF{~Z()3a zcca3Ah2x|Cckb9Uu1&+eQ&@S&?S9j8D=R((gr;}~ukq0qPjKLs>0^k)9B$O~dKy67 zmmThp;)SvPf<(PR+ut)CF5se%C#LbS9sc4?Hr`AsH(DISWoF@~+SE0JbbfAbeiA-T z;+8<%hJiQeVb%o0AwoVl($2g2XM&xC>!^?WTtti3(cp!z;rd@Rwmv!W-t@$gj)Af1DI_~Cqb+dbi`o)S1L(>b^W--9B!B;^WDPl7p^on*;q6o0;Cqa> zmFX|@6VDJmGde3zq0GWCZk)vRG<>rr6j#X}Uvz0Pmif?z%Me-QdEJSBS7Bo8xKy5_ z6G%JWHy)1J!_x_@kx%#+d;BB(<*kM+<0Xe1opD!NDCt_7D55`J*L&zMKowoR=28)7 z{Uej1miSwWgBvAWzbp5j!yL)i$O(TVI&|8y0`7H~p26)dxVFEYAO2JnGeHu!V_GzP z>1^wwBJllBeliOyhd!OzCLk(u6D^Vq*P#1~hqY5&MO|9pyTd*!=(!IZOD^;nP9!}R zR^|F-Q^8s3d5I-#vDrp3=Dpy)W?^rlqX&0zIH=$=K}s?2wD%1NEV72;vI+*A9?K<= z%s&uwj};w{QqovD@kU%Q>pe&21kXdNw;P3aIo9$N@&;Gf&R~gumiOlRXKf9%)eUU} zE~!Eas*L4~<*ie=e3REfME)7vF^a^RnBD2Pi{LN8!`IvjV~4a^hwj)UE?4DAKy*{# zl6pGCB$5l4*{&gZSmu4Z%i83V3b{6hdpsbf7LtI^PvgE*-u#KXC_-^}rD=YIz+Y!e z&8g#uXIRVe(JXvoZWO^PO^o4Ho-x$V-S`w| zl?#=XxPd-}#^>{D9)4mJ%c8(mMvZY6!&lYSR43x%uwGK@E%26TbfN74_(I9MUA+|L z=n(EeA#Tg)JS>%muX>@u^MGAy`eWnzNm z#^Ep|ZY1^=aA?VYMh+Kf`EP&F5nkxY0foPo#TPVQ8^_B}(Gh!}*?7R^!tCL(!J(l+ zG`76SoTD%H+pJ5xj}8n|7?tt%9z-czsR|F;4|JD4`YUj;Z;n28;q-(dF%mz3xb7IOZ;5vZA{0yT1 z0s&v=pDOJMGS&13@$wz65Tu+CKqWx3I z_0TqP00gIIFw7jqmAK>XS-j7A1h=ZehO}OMrzWN0zj32YWYj)}X3ks1oX(HsgFTYB z#Lnq8Sbqg?Rv zUwlW%n+%gIjx4A}r$RT!5t$zr>p&rl$|f*uVMpe@i^dHTGaM$MIh#NeiD8l_yc0gh zOP|Bkzz>@+$i|0nkZ!2VY-%yi^ABhc_=dDYd4|U*>$on#pR5T}7>w*Vok346p;Hc( za1Nstd15CwhkL%IaSA{908R*mJHZ=4@C6Y&^4=$uC0x)gcRC}$_^t%nrdbp^{7}YU-Z2txvi9Xb zZdDJ?z$~nx=^K8WpT2q=Up&OOJ`dja_`$Z0GLy?k<&eK{+FSEEzP>t5*>KY0{ZM&y1fyQ{PIVvj2N%ATB*CD)EQzxb58X=>ZOtc@Ip zU{w|W>-3&eaH^#)i)n0e%}0lb-u_#xh^LMHk^xGWTJRYj&SapB z!6hDM3T0%;vaPOl>ovT%RM=kuoVtaVq3CLp%h$2Za5LZ%yYvKM4gD291cp#C=i0aT z(o@uv`59SD3h!2g3IwwndB2z}FQjh49c&>=OrDg5r3tcZ*truC?x1bIq7YC|igdLZ z7PkK(Hrvbn5)1K+$2oDz++KhyI#&>uPMJu%=3qiD_s4gJ(*QqKFtMO^WTy9n*;Nwti1 z3la>5u;|5IxCt3j&A{xdqN==*&?MHQDYTVCce zqGcs|MGU5dysaJAM2A5*eZ@=_qixJX!+k2akSjdk&RKtXsqzj#uj1Go%X1j9$QfiY zwM%{noq1z`voe@o_N@whYTkd3K765`d#|b!_AX^CB*^Fk^j2i_5#%5V^}T~4ZVPsF9ge* z7c2m6bKQ5?+F}%|%efHBW4dg#?O#f`U9w!RyK39<13aU-eP<{hR(qG*eh9Ot?KoJ9 z|7U0Xf|-^$PGiE=gnsT$VTu`feoa5igk2zN!cJ9shIM)Z!kF0eL+Fm`5|Nq5~4)1{AyE{9)5B%U@haUoe z1$>p*9|oUU;oJy*hi-PRh~LUbog2q*@jmAc<2U=5b3c#Y{t@SX0l)VQb=>;|_=h7t z0RGj89|3jL3eEV}f zKK;l)30`x$$4!d-HSpROd)%V%AA#3ZdmM~-`(p^`x-&hlCj3j_TL9;T5p1s-!rxAi zA1D*&BVG?~32(X)@k2h>k+%%b1K?|N&YcsceVT6wAAHR&WN~ z7x-U-`!}wHW(dzOfO{968vxV%uagH?x?y3>pS%4+pO|0ivJu}49*Xz@@UZaP3~veLMOfV{M&^5AzAWs+dkX0xeA^a;=Op4U z?8EyU>Vxnw<%=jU;JAMEAurd^NZ%jC=dt=C!NbX<>E0Dm()Dqi-O-5kCN~MLZ0y z3;!pW?>)cLHH6i@^O5;pn7=6O!+Qbc0sSh|YtuILmraz1h_42-pQZdR%HLe+`oVE} z4+yjTuYV2wuJC7JeoX`QL3j#$?FHwG!WzCwFx~$l$ZsB4w*JYX6L| z+OLUuwcij{`{%{{x3;1ETh-$(2&?^z!Z&V5{qO5>?tQ_(d%z!D+vBpr-vhpSbC0VU z`57ax3I71h--yA=StG9ttNWSv2mjRleqohYF_?gOwaR0r7J+3MIb?};@9(OU~ zesE@euz$^+knihpLtv)&li;mclyBjG4ZdS=m8%H{1%umV_Zc0YQ7GbXsUVyyk)gC{VM;GhWzk}W{5diPLNv_*(E# z#Mgmy5$^;KM@%`xr|xCJ#fa(tr0{3p-z{5vTuJzg;M)g!Tv-@DUY)}Kbnh(WYhQ8h zwD50$dtbu%TX+-7bnnZ6o?za)4k4+*~j`G%9u<%It)@Q09@BN4wBJSnX4EeZcGm|yc;m>W-g%(?%7KX)Ga z3_01C$0oEQz82h!_&PAA1a0#>!5HGVnR16NQr*jf`yyTg#t_@f{|bIygZ#@1`}{tK z`GN3zP|%q_MPXn5E+RjLefdNGR2RMh=C56g`DMf#!RX?>e+ZdZO<`ZYGw9!heR)M; zZ3+AGycwCfDs3M_67QZbzo>vVZ=Y;yTG-GuLIYGHGR(skHG#$q-Wpi5I^!da7Os&z}Fys`-NWz zUt2_ch2H^hm_&OO@j5W)w@fcJKO%hN!!SRN_zLd>e`pf*H{yP9Nm#>M5!Ucjg^$7h zx)S0m{3S5!d+n;=|6h<#f%^ihdwpxtoDsegZQ1%o#Lw`M@I2)wvHm0c%izHV@=N$4 zc*E;xZ-n=L5^YQq{tG_|UUvcOH(-`uRSn@EgM1MFUl8`=Q~2lJ6U_fLE^`|A*%Md4qf`O~Pc!rx;2s_^f9!Te2+V*LLS%4@_Q0%wFDhJ4!@l-G!F z19N=N^!NeDvoE7Pig*h+FZ>nAch)ez5dItRmRB&Ih~!%#=lGlMY4`?&x8+fv&cZ$6 zFM`*-iuO(Tr@`yL3il%Z5V$1#2IX(`xKoBJ!r!I5iS!iy$YV&Kw~+tBUj(mjp*=6@6RZ`z0T#)$jDoPW?gwVxOM7R=v*!NG_zUlLaT%fh!j zf$;BN;Z7O(X=9)3!g2Ud3u}F?36H?NjYX^<3j6#XNBo3+exAhop<#D*aQ|i4UtdOi zg}(;=(DN%?Hsa0TA>sF6Y?Zu#{1To3uRo3a5-x+Uc@gWOhEECqEag??kMMWF>)Z-I ze`0(GKZWsG4f6gFzQ@2fyn^-2B=ED~br?ML-5A=>m%tkjqI?PeGI&i1b?l}f@A))% z9{k~Sdwa@oMfgFOzwUXo=fXb>USC0bApCRS+tFT}i}*HhBjTID&4_9Kg7ELrJ+v=< zH;3?C^BIh1QUCje?*eZ^d%-;v&R;(|f*fDO{8#uCcnt<4BM~Ry$%sD;E=NrBr-jeL z{<`Nee~tJ$a1H!>kbMJxy;ZEw2s?y(U9!@(B3=XTyCs;v;j@rqFj%?}CxF639&v9}cQI&f9^y-zy#;q&PKBEA}Y zHsU?t^TJQU{JJ+#rUycJPJuVKoXbVL9XuTIUhrhZ!{AcHkAW)@7r>{5{|xS5|2D!C z$*+ODF1+!3oZEE~ya_xhyzwcx&-Pb%4>^bW9`PpdDd7pockRc1NW>ompB8=z@(&-t z{)q5zfCu|g-y*&Td_LmM;0wZ=z8CIqMt#{5(u2GYoEO&eGa~#2$T#QE-$%S1T#k4z zxEk>=_)Nr)fzL%;0COFd<@pV`fBk;+f06te$S(?C^*Mxh81?;*V4l1WoJ9N}I2ZBf zz{A2vVE%>!=)WWRCdkXe{|55GN%Y?l-vh2iyct{4+v}e4GBM9 zK>DFQ;F|Ey!M*F*o<{O(AU`ksJCJjHb}{0+ zz;0Uz59LpQ^ASG<9udwKk$x{CJQ42(SB1Ywc@^ms@osQa_zlQ+yo&M{@!jCQ?ZJP_ zH-j_6w~WGk9pQ=iZg5ff2Pwz+>vY6-gU<*%Q8nTJ4fEGrK=_0|bO_;n3;x|1{3mY$ z_X~d#{J|FTQ`pzHi>NQc&(Z#K*bf!{GWh`3dm`=yj|l$`Wi?+h;rzXka<7%!9~ zUJpJc+&_l)rLw|RBi;k9348z1U!RS*7hD%E!2Y^LtOrH>0q}*0DQ_9(f*;H4572*% zFZv_i10E1o|FaR({~=-Ze>h_LUyPXkPa4KIP`&?}pJm}+LwK&n;!h*ub>Q>D--3ML z4UFd^UJt$?yk*?E54^d;wS@PBGq0h4-VyvGuK{O-wLj-NGSh?1{yZ=IB+ReFVo)RI zry$>z4u8?`q_FnqWnt~lPZ{$SVeQW^M7*w!{%tV$r~P>*V)o}r#O%-UN|=w2_UAQW z?avz#Z*QUhkC^@W#faITyWPP(+Gl^BM9lttKv?_p;YiN@JTI*Mc`0J{=Zg`uKR+EY z`}3->_UH9T&i=e1to?a2V)o}3gtb3!Ma=&EqOkVoeS1QDwLi~9%>KMzSo`xNV)o|) z!rGtbgtb2(j+p)VWW?;x7lpMyKNm6k^QN%Yp9{j;pZ8&%mF1bt{yY;g`}1tX?9YcH zW`8~sG5hnPu=eNWNY4KJl(6>aXCh{QUW=Ihc_U)>=jVmBKff5s*`H^ILVUD8?~j=M zc`jo1=Xqi6&#Mu$KR+w1>32?8`|}GCvp;V|%>F!cR|pTyvp?^TnEm-s#O%*=!rGq~ zBRTu?Nn!2JPesiB{B*?Z&l?f5KR+*=Euz1A5&hfUA$(-^=R?BUpD#wt{=6cr{rTyL z*`J?{nEm-VVeQY|M}mK3_U9SlAC&$aUvBi}k<9*lL|D_mD6IWC?vwZCwLh;%%>MkG zu=eNYg|$DwAgulQMPcpF-M-*Hnf-a6u=eMP;Q?Xo&$AJ;KOYj-{(K~2_UDtr+Mkvp zW`DXUto`Y!h}oZ3BW8bk#_(BT?a$9g%>Mj>u=?MMnEqcBR{#6%3F$+o|4GF3f57k% znDg_0i~e%`+Fq9zJ_k-Ry>3!?;{@azd)=b2Uk~f=b#>vpC{KFbIbpn@==!(zy7Lj= z1ilFN{k0aLdjsqEAs;cv43#^)^j-&w)>LBxB(HR1J#F+RX% z$2s9U!Pj83qbd9txxUJ^gf+Mqg?;)oSGmkbgMU7K-dyDpVV^!1a6U!Yr%wy#Erk7k z)q!4rwv^#5Gdx4+KSfSn1FlBA9$brVx(tXy)h%;cCQF%W&iJ0T_frzgMXCtQlobX$8e+26n5of@8 z!$slMM_|5)@oU5xaLI64IHCDT><>kp0apxHh3}^M66$lr8F0;TU3i4%7m*(kXTS}^ zP2pLZFC#x9&VXBn-F+c_pQrg#NdJg4;Edr!_&?Bm1>?<#GvJ)C6E!02%j=7nAAort z;tklxtJQ8%_<1ntY@p{M` z!v1`24e@&**#GxvSKj+7%9G(7nB~u(zb&HuGV-#>UxWRPlfABHxNgj!6ZyAc{<>+j zZ-$$~JJC+uKi}(G#{R`f-V1qle+b`aZ$f%3_PR5|{=DzWUf1_vkpDL1{pDVlM0_nc z2WEa~{D(#Uf586_VKcL6xMa*PirmNlGyR7i4~)(3sN-fb5h8?g?yVF2?5 zBd-ek^Ol3?-;Mm7u;1SvLi;HEn+U(}zlFaE{vg_xp-+bRsXQml^NCw9mb_r(?vY?# z?N3I$4(=@qe;9q)ttk5!g&zfP%%eSiG}!m&0S}^oiFh+OFZ?B#UpIpOS=gV)+>iMw z++cpFstWt_n3ylth5dO<%n#2;+z)n-1^aKp{WT@b_k`8G0b$?2l`+2tvpjtMS(w<0 z{ZirI1YeWIenZ4-z$M}DLeBlwMd3}KNBfw=dY14LWUMdbAD8+M?jOeZHR5Z*rHCmn zM@)G|*tgI56|O4$D{znNy=R2q0JZCW<75+FG^TP{9KJ-Kg zugXse{~*oRk>0}pgZx#@Uy-LwKW$%!gl{{E^Bd;2!+NZdpZ-+v|G&}vTI|n!IjFL%wElh2t2M@%81Wg7qa~AHJ8JON9OT?;7fZuzz3S6?7-UzXSI+)v-Pu z@j7r(xbL5#z4|KZTg2Ca%Mnw~eN~2s<|#id{1Mtehw*8|*MZMQOnE(G${WJduz%BQ z7=K2*4xIUH@K58D75;IUzkU_+JK}ZVVd39mc>1tD74dc8$%rX0MND~Fc-0TU{#vYm z2=4`NdegaT#OuIkgpbku1;j7n>%iwCro0g`?MKAx zz(d08$~a#!jP!{3dhoFDJ(M3n`4>J0zW!#UN5t#ECE+hYe)9n8N5t2I%fb!Hx1#+N zcA`!T`|>*38THJJsXgb&Sn< z_ppwo?s;=)r@jvJe^S z3IEtWj3*JbZN2{@9~S-?xEG0A z5$3wl)eY1~!?lRlL(cQGbWiQKg#G-6nX@X$mHWYL?{t2b7rELW5xLIqP8mLJ%vVLO z^E)gFdH0pi3hVstoUz}C+cNzhyB+O4DoaD;K7G$)elG0a|9ow=e?QK*pW*?}+s65q z2>bX>WBzTJ=WS#2Ja0?(@hz=(CBtQ5|GsD$@e}@Kg#Z3in2!r zi>uu^VgLSUWwmPx`}m@9xghM{AANqcYk}kV@;q-GexBzg`}l6Y%Aenj`Lr=#754E> zu5xD$pA+`)YohUK2;YzJ4`;9P=Y3=UFGTWQ$on!OJU_$u?z_rmgd5=N2d{Gd5nl~X zgug{O8n?W#rss&TkKbD;Z-!49^A%wqzt(DZ#_(BTA3rp{b>YqTqYrh6udw=mK9cuB zeo^=&%x_+c#&&&(@1|`Wza#z;Uk&E_jZCk*DDOl3!Et^L2v0$NEe1zJM$YrcH2)>| zH*gT+f8oCauN%R5SNO&Uu%7S~#=F8#g0n@8cO%{oPBsMhehTs}7<>%~|1o&oxbx?S z>7ViiBd=Z^?5q5Y;o`=$e9~}oOEj2ffM1c zLH>c0NDpC6&qZO)&$6(lXH{6!^Ng^jXH8hs^K8US&*BF{_%uByg*APWeiL8AO~V%q zS2v~YpD|p#E-jxF*7Q6TG1D`5eK4=-nU9$1SrpdvtO-BxAiDY&G5#0U@pw~M$Kx&G zLp1*q&g%(31LplCna#m}<$mE8D1RB{U)aA7_*Ik#;TyMOy$^%?ys%&IdJX6Ig#QHg zH#D)HW8@b<82tMV>}oe5%+^n3s1nk zHSb{kNLb5TUHIoAzw;E%dqunk+!X#}$k#oO^EtwrzL^_Bcr|?ogs=S=;vH<7tFhKVspPreN2kfJwHFiq=HwETg>uT8flrZO7G1rAPJbkNzTzN?NH^qJlO!o&q9Nb?N zR{u`-Vh#xZPTU;i7li-2@X+cYzw?$L$8aI#9)<_0@MHMou7i93Tg>OLN^?_q=+@xg zB*yAAugtqv$eNycjNKXjZ;1I3VGVx;d;`pDdR2urJ<1sC$M$(wNSxlh+arN}HQ$G^ zHd);p0LS*T!s`Di@J*5ZGs0>=gK`wx?-y4499w@l@~NNA6coad0WOuva4$n?fvF?x$3-dm$N1-=~zk8wo_-;HvmGDRY z=fV!pvAj=M<*YxHZ`3c%pBGm1n11{6 z^5u^Q|FNB#vd?!DnV(vJTF6hjr{O6gVPdX)G`Oen;aLurN3AbBt3g&d&ti}@KU^*? zPlPWV4&loQtNEg^x?dJn_p8Eczb<_6i1Vr`{Hyx{yZeIwKO&q6|E!oV-5<(}wqF;7 zH9R8^q|4)JVQt^~_ov&hqT!0Lme=!!xmV8iPxE5{%<`=58TX3g_KSPPWNp8=S4`IQ z;$AUX-Ajhk?bDF3wnud%ucO?=?G^WaYT9F_u|7Ly*0jNVJ%37jE_hRDo;$BGHesgbwtnnHCY`Q(>UO^Jcxfj6ts_DnIeX`o;T7BHUa&4Zh za<09{?I+jT$tvfXHl{sZ?S{U#kA2V~Yx}6}lTz0-bP>s<@-c-%0Ab}r^gVRfJH#8a-^6xQ&%)#-Z2 zH9VFJ9XD_dkF52MYj|WWHy6OEa)Wj@X1>Es)^fvsI%clXk#$@!436trSy<~C``Nf0 z4GFi)iS(mdPy5kt(ml0*795we^TKNXqL^3vS?OohKG#szB4xSZy&CJ~2!QCRJB zO(HJ0iLmBR7988>JIQhQIp>edHRt$oxeldt`Ttk8N6Kk4E4f&2)R$3qdmJV)qL58q z{Us|2%=7-ASiOm|L;bUAP=r6^onGbdONh;X40-D}2~cKYwaGK?Lqb`J)n+lzb;(%$ zGRiRBNsK#xfN-q9KlbHR9J5n>Q^KDhuVZzGxWh6TtHVAVh=lR?qQt9Y!dXQfV|Z%D zo&PNHVwsH97m&^jC*QxN8o|7k3P;MIR{3=qKB?L-;bGp#>MWqgZFbj*=79YEwD{{x zetk~N-!JCGdrvFbFc|d*K7X+;YxLhQ;V<471YEaMg|~wAvHkl$y*qtN+@XP$Y%q*^ zAJUoWqw0)24*PgvA5R7HyW6K5&V!h8xKhH&b0bt2B#OV~!xQ!kgCzX_E8-5vs#dME zg+UCgbZ8K|s4)9nDp=`cf*Fa{`y|`o=hu<8zt0jY4Xzq%;+btc_4`eHza;J?H>52b z@bTW@8fk5}&6n2=OX}|?pCudId8`k_>W6*0ZSePa#`^2UeJm-b)JAdtlqnLg`Fga$ zudl}z4v4>;tHo+Y{H>e%_$QifXftE=i0HF#iPg8oeYW|r`Vn8QHaNEBvBDN1)CSiy z{_YcZxLzKs&q+8FQ%?saJY0{B)o+M?5$k-hnv?MK8~s7?_l&d`YUeBB4%fqC^))fC z?TgAjEatgh7OQ_L`N(y#SiL6haD6FOUj(rIc}X<0^81UDzg(Y+)eplRntxq1?}&se z-NuN*KWOc1L-@aYMEO}YJ99}ei7 za{Du|%yj;HafD-7st3j2+SP&e|L;mTo5tU-h`)JLZl4o>2aG#^2z~nd0puH%7T<>@ zUgwPY8^!!KF~2)lb7e8#ce7|jKP%g=Mw)oUZJcQhp_T34u_Jy|!hg!Rk1Z1@oKunU zgWC8TDG%jaR44pCBI%aJ`H5H^74y8GB36Gd`g~6|R^Kn_%=;x`#WgR6r(yKD)<%7Q zAcv2lZjtcw9)?)mB>pA?u`b>V5%;rqruDxr;o*G~vHD&yUpM+E#9!X85UWp1IERe> z>*7A|Pl#2I)W;E{|KpNhydNP}`^0_Tdl0Kp$(O~rfq{WmeFU-k9Sy(H zzhC0Z`vzk5$KpQk5s1|t;tt>MkJbMp`sa=Q>+hi{w0qlQ)40P(^!c8CtVYCNzV9Ea zqQr~u)yFC$?(lv5Sbay#^S$v{{hGM%jQ&X}SA1_iRvWZD82y^~%lFh{_0v)w`2KsW z9u$8EjQ-o=KHo==)t9u~-W%&;E-dcwee+oTxR%3Rv95EUk#x9d^#4Nq<$L3?nv-)>GDsi9hGskMR#J6uK)^+YTMBg7Uj6_$7 z`+UzhR(KT#3j2jd|2-1lep$OwJHH_A^Znph{b$K9?tjJV`!xJU|DdD;-@A>~&q+A> z{%x#&U(%EB@y6*J z@qN%(ZIbZwz1CR$g6Ll``d<BcqwSQW(m+a5uD^~YPIj?^#*2S8hq%+sDWAzi_4&Ud9)lW(|TaU!L&awD$Tz5*eH_Puq zai8yP#Ok*IG=Eq$Z<5~ysb4&oAFJbXnIFs$2LUkLaP4>ukR!6O(tUg7m;u4qws6UsxZ&+Xfvg&1;TndY|&?`5`gwSCIYl&8QdaQJ&Rb{rw;kfcl!ls(%IvNqvo(>VFgM z0QD2`LG_13f57Pf5Dd_~4nNiWuOI`dujNzq&--|N(B+IfIP&Yw4;y{%Wz(I!(Ld(X z^MmbtSNAoZOy57m-y+hNiv0iL?`*`BXONy$l>aaM={kyXMJgQm0{)s2{~0*6o5B8{ zgL$@tiss3Uh<_J6@P7OUMLD??@$Z2v5&usx&xKOa{GZ^D=RB#Db$x{V7x?2nrBv#E zGh({$WZi*sy3cz?smO2PZ!%(nrHlY&nA^}?&c1+@HiRqUx9fKB^Ay8 zHU4;y6_qmY^-yMfD~5TmMJ(re`IyW1b(rbF_fo0oKGWMD_=uSAeNs`*`0_nZDl+5C z_c*E4KHuY1_US48y_(l@NM?D`bv9Ht@5YccgvvbEO{MPhoOaASHyg9AOVK?2*L@K6 zpWnHlQup|s3uT(;cP*3|zkz_>Ji{x|^X)9%xSU+$tMPfaoHPDwJ5Imf#9u9vvplp6 z^Q;;bfBqDIm56DcXN#ybe7W_3Y0THhYtvjdJb8Uu&U2Vlv`_bS-I&bu<+(~KGShch znriir=NhThJkK#I(>%{1D%1UUo1WJE;1j_;He1Z!T*Ti5ABgx{VBT*`Me~1yzwwAK zf=dy98+FB>uQb6>=4Plh9Adv-A53t*llp`v>%tJ4v) zzulLHEFWhKzhby<_%*}l4Zmr)W%wP#8$X3WKrz4G#@|)~ z6q#M?J^~b(V+(11J+taOg+D6FKZw6+0u-5bVx9n{d@^90pELZt;TH_QX!zxbZ^rYh z5#NI6*CM_Z&u>J`uIsIchw=PQ#05OBWh9^&{v!T1M?4D7M$Gie8P@iU_9-tKdC73u zu(p3{ziQ+)!*#%_S-;CkJaMp0naNcmyaLI7laK&)daLsVt zaKmuZaLcexXqham8X3&vsFV}KS;INQdBa7+CBtRI6~k4-HN$no4Z}^tEyE7;+&I1& z!--+Ms+TgKGt74js5JeGhD(OahAW1vhHHlFh8u>PhFgYpfU4om$N*G1F`PA=Gn_YE zG+Z)VHe4}WHC!`XH{3AXG~6=mWPY#l)%5`7#K^OT!+LuszjggU?H7&tlHs!9is7o^ zn&GoHd*?oHtxFTrylXTrpfVTr*rZ+%ViU+%k;I zw^H%T7)}iHT^lM*ubkn$;iBP^;j-b1;i}=9;kx05;ilo1;l8z<_;9?>Gs;wqA9=tq z&n#1^eAw`a;Yq{gh&i9o^RRS}Ts88V;kx1T5pzCr!LTk|(|rzo`7RQbvYw+=&Kh~n zaNcmyaLI7laK&)daLsVtaKmuZaLceRKx_OmvLLOT80MMrIDK-4^M;FtONPsaD~79v zYliEF8-|;PTZWyiziNClh7-eC!+MTV?dOfWXt-p!Y`9{$YPe>&Zn$B%X}D!rHx4xX zJi{O7Z(>;2qt$%Q$n%DahD(OahAW1vhHHlFh8u>PhFgZQ`H+gQuD5FZ5+lzV*7Zg; zujdDqi^hD(aM^IhaMf_laNTgjaMN(hux>H7_J(w z8Lk^{7;YMF8P@$0jc?}aba_Y&^E+--YCdN;Z@6fr)A36TXAS2J=M5JPmkgH;R}5DT*9_MUHw-row+!ohBklOhc>v|aaMp0naNcmy zaLI7laK&)daLsVtaKmuZaLceBG1mC<9uO+!#BkPd&T!sv(QwId*>J^h)o{&l-EhNj z({RhM`#?Ir8N-R;tl^yDykXvBLZ#_hGF&!XF^Fx)iUGOPy#G<+Ej>Z9-` zhO>rqhVzDthD(OahAW1vhHHlFh8u>PhFgZ+rgVHWh7-eC!#Trw!$rd-!)3!2!&SpI z!*#>H7_J(w8Lk^{7;YMF8TOl$QFt?4B#1aMoHd*?oHtxFTrylXTrpfVTr*rZ+%ViU z+%oLsJc8y|#&BXdYdB{(Z@6e!&qt_#Wh1W`t{Scxt{ZL`ZW?YG#?iu5e)C=^Dh*#^ znD;`(@|@wk;iBP^;j-b1;i}=9;kx05;ilo1VV9)ilQEna&Kk}c&KoWoE*UNxt{AQw zt{JWyZWwMFZW(qrI>H7_J(w8Lk^{7;YMF8Fn|N zqH{3AXG~6=mZZ`2ZoEXj;*7KmN;7kwx@T;iBP^;j-b1;i}=9;kx05;ilo1VRw5vz8S-b;jCeO|6J2A zZ{$V8CBtRI6~k4-HN$no4Z}^tEyHe$iNE2*aMp0nu)gQ6=}|QDlHs!9is7o^n&G

Q!+FDcPlLK&GV-$Fis7o^n&G z+%()W>~@;?8%_*o4d)E!4HpfU43`a83|9@;4A%`e3^xt847*(>{)Q97S;INQdBb}5 zs+Omck(Uiu3|9@;4A%`e3^xt847))Sf5VC4tl^yDyy2qZlHs!9is7o^n&G&Zn$B%X}D$B?KSZ?oEXj;&Kb@dE*dTwE*q{Gt{Scxt{ZL` zZW?YG)))A-yk>^d>5&-D8qOKc8!j3y87>>H7_J(w8Lk^{7;YMF8FqK2o1~(7^4W;Fbal>fGh#07v<$nup$SFzD9;#93}+4J z4Cf6O4VMg;4Oa|T4c83U4L1xo4Yv&Y3w`4Fe*_S5VmNC!XE<-TXt-p!Y`9{$YPe>& zZn$B%X}D!rU)XKu|2`9c!&$>Q!+FC+!zIII!xh6-!!^To!wtht!!5(^o^*UOh7-ej zzon*E&dBxt3Y8a)ykxj+xMH|!xMsL+xM8?yxMf)HP1EpZ?oFphVmNC!XE<-TXt-p! zY`9{$YPe>&Zn$B%X}D$BeKZ~4jN!y^)^N^n-f+=y$#B_l#c7Y&yT zmkn18R}I$;*9|ueHx0K8yZcT24JU@PhI5AVhKq*z?Hwvjud?Ba;i}=9;kx05;ilo1 zVfR2fJQ>4@;jH1D;k@CZ;gaF9;fmp^;hN#P;fCR+;g(^y-^AZ=VmNC!XE<-TXt-p! zY`9{$YPe>&Zn$B%X}D$BJ!s-@I5C_xoHLv^Tr^xVTsB-WTs2%XTsPb>+%()W>>e`l zH=G#G8qOKc8!j3y87>>H7_J(w8Lk^{7;YMF8Fn8t@i&|p&Kk}c&KoWoE*UNxt{AQw zt{JWyZWwMFZW-o648LXg z9m8us4o9I@;g9ZbHoVpFKEnqLKV^8@@JYkZ8-CI7%Z6Vy{F>o648LXg9m8us!PrOf zH@wyGKEnqLKV^8@@JYkZ8-CI7%Z6Vy{F>o648LXg9m8uMHt{#S)$l&U2Mj-Dc-rtu z!!Ja9J%aL*;a4N>f&4YYZy0{d@H>Xr9_YlE?rk=_)$l&U2Mj-Dc-rtu!_OOj(eTTL zUp4%i;WrGwW%wP#Yd>k?Z+NTWeTEMhe#-E);gg1+H~gaEmkqya_%*|C7=Fv}JBHUj zV&ZRjtKofy4;X&R@U-ERhMzb5qT!bfziRk3!*3XV%kVpf*FI|EZ+NTWeTEMhe#-E) z;gg1+H~gaEmkqya_%*|C7=Fv}JBHWhP5cdSHN4O80mDxjo;G~a@biXWH2g}$w;`xs zHT=5aHx0jSc-3Q__|mc@SBF;HoWR_ z6Mw@u8y+;g-|#`h^Jg* zhU5EBX@1_w|ET9rdj7QMYdwG7^A|mD_55YeU-kS=&&8f^_gv_Cv*(*Vf7kQZJ%8Kt zjh_Fnr`7Y_o^SR1anGOiyxsHlo_Bh#?f>Xa zsri}Nwq~%Q?zgqaMyJOXrZpT4Zvhc(iy6D4rP-N76L-!|7D{uY$4ZEm{FFw=j!qzc zQ?nCe^FAJPKC<+G&W#>9GCOeuE{}_dZq1J^7N(9vFgNGZYkth&VxcrPYV^8jEfywH zj-)8Kk<#)`sLmmHeq?&)(CBnwY-Zv3{KPB*b8NIU=jJ9DkI^G@E+p46gmd)B#GH$h z)E%Cho-RzxPfm`{y6)*mBXG6=M}z6;$NcZ?4BCNF&3?qN-IBP4;|p^W<9E3O_us!`Zz22+ zhTm)lRewky_UglqJ^pE^usi5v3%lB)@HAA|sXFqIRh^;24i$xmtZK9$1`9hnX0mrl zc!zv=hYI0$Z}=S)#Bb0aEbP^X9qp&xsO5|q6R z%22`o?AQ~2;js@Oe)VBT=V`a<4TgtoK^_JRyHrOW5c{283n2)i_5(y-2SEuErqo_H zRYE(kN5Zxzgw6l#7%GI{z4E(Teh1sXcLf6owLa|FtKMLEKuA?*M|c`4>{3y9 zK-Q}fc|cse4w5@a+7BoKL1(u2cANW8uSHVcftP-hM%3`XGi!!xTKKWr4RT> z8tWk=-G18ZpOATNo$!P_QJwIBSgD3Qpe~B#ohp$Bgk5zy51ps%pu}RA4-Z`Szi_V| zKL3PJwROT1LZv$4VW=>u8tsQ2;R()q5uBCZoxvy)U+wIWCnUd^!EfAE>NsNiE(!8Z zAH>}S|2r6dkfK2wEt@{<)rTE>{1XDII`V)F63H%=ga-sTXtW=YW1_XEu+z&B2eq`L z^OW7$J@7jv@H-@ZhxB1b`)RM&LW--X{jeiEA?d})F8_>8YuRJRKZcjRL{!>)o5B=8Re>CLt{{~q6%-~NO z`~!zSc@hK9VpSdyKZJdUzhdxb$O-<)|Hk1jH{@SC{7r+;OwIoyjfVL1{I`U^zDSJ~ zgFmECI;O9lSOf7i*s=&@G_>F;rTfIpON^0)Y>ZxIX*^Lej>KlHxC zA2#G)IQ%n%KWFgQ9R5i784{Hm@k3s9_=Ukw41U+)hs#L#kKb|l1A{+r@b_f@m)8W+ z3YqZRG=9~153kxiwevlpZLns&t9Y5$)bad7K9~L1^Vd8TPIxAPHSJ#r{}CXm_52-4 z{;FHZ`%$@jski8O4oR@q|4{CKUU~nfG!*z){|lt)oS2v%N3|>8E3(F>`8=laHx~%WSUHpCVA&g znx;WXpNz#?wtTHuFmA>tMO}Iuu#acJ=zFIEAT)cg7MjdluC%RD z=;(eS-Xxf{`^6Vu9IAfwMRE?BnM2jjOMIyMS))%IJ!15*(9Y^Z7S9=d$mjz`?=w1L zbf3}PLX%0eYm?b^!0ejfd5^-bn^BMV9Orq(^{cX0e!Gf$$)q-9xNDE-?W}%8=m6@& zrf_y(6rjgfcfkmQB#?{Z8_;oh$H+R^kKUw0}}MYW>m%VIM4(Lz>)#4}^I$ znG10mjxd@unpH}@bMQ2KKeKjF`$uoo^Z|6X516Wbx1^_bX}hJWwcS$N-zNOFNh&uT z<)gWTM-2_9@)PCWqjXZa_Or8NI-MyJMnjDpgxPE_+V?6v72dZ*=ja)lK5BZWTYD%M z_1vMT;HSq6gL+_MKAp|s;~6mO{Ip71+4ESBXrMlZaHdk%-($%!tCK7E@zMV3{Cvp# zwzK->`RP#gZPE@y)x$=&31$7zWZux^oVL?`wUgs&n~WzT>W>I5__x+0^L`zVQU9hLqnp$&8Ba&~`G1I>gVvwL?HZh< z`Krq97BKh$txvFLp~TzJKg=53xj^C#(}ZAN()vz^-$g@axyEKJ)B=OIpDT=D?|h+c zu#G7V>s2nZLTS~6n0G#4Rjf?t#^t*uA=q=C(5l$Y9!+hI-R^OQ96a!-)T?U3E=eC% zbG=Eh_m$wxn|7=4GYd};%JGJO-UfVJ2)fGRQ!JnBx6`bC(f<1#+t=oUgQzbx(l4s< zxOroA2OF z1p0aJYe~Ft;S{04-We7@(c-VM_$w@~>rZIy9PDMQFu%?Z!CuZXj87IC?453L3>;R`tO-(ajX9x-{MyPJ*Qb* z2*x?RLj9N)1$%V-HmDk`v-8dGbUrAaC*1tdS-VjE@orHk{x=`~S3LjRjAUBU>}kJ6 z{gv|3arpEY`8ggLsE0MI@U&eE^MkfqbbesH=1tS~;rL`cvp^NiE_>sPFQVx=U(yq! z_HdpZ&;Ba!5TvnA?9y>N)S=@xu)QPa1(p+NH+Zf+1I&92oEPbtK}_x9yvvc&V1DV^ zH7M8mwZUA`SH@4s>9FvE3F>EgeP!y0EMN3#w|PN@`cE5(=$WAPZ{Q@15KPo|YXcFx zCa6DBujoms9Oo7GpW8nSn0(lx^@{SDFKp5Jg}&zJyv*`LpIQp*Lc=nNdO^94-+X;z zI({2YxAY428`eYHPv?Vt`>k_wI$rbjov7orA#Ldsb$sXRTdCu}4aP}*+f~lZ?+wHoMfppFul*L~`@&~yp}iw|<_cf^ zBFblbg!9e+Bl6M@^R++2soL+{XHauERsCXczwLJ-`P43*??=tw29RCq5!ZQtRHtii z0BXdpMxFoNzH$J?`YflHR-hi5>zlT%RAHo)!*UNO>D~wxR_uRN0yj#b&i-%J+ zz0i)DKiGd*=D~2P&a;{QPgwjsv7;crNaC6OPg=ST#6tRFNgv#QMA8TE)_ErMFBbl& zR@38nl78|pD&b2%`AbXq(oa5jMd*)+9v|`0e?8ep@ui>qx)Q$hlm9y5hf^;QTCks1 zLC@VnWIf<~dQjc7+O%CY=ruNR-C^3io`taY!rUKL)8)T_80XMb?dhos+WuhhF% z*WqMVipM!}k@7@@XwXKH2`VA4)0HjgwnVZ`h&zTkOBgcO$YmKVrU{@HERQ3S*DX90|_f ztIA&ZE7?qcG?GqzoEcsD72Y33&dmp3U}GXwcz+OnX8!})u2XgX$+zoNou>x(->2<7NiU(xwc{J__FILg=gQ|Id_--jKcuk(78FMiUk#0S9|qb6Xp9TFZA_%6XmPDdd`XR)m}UAOr_SSy=O{%aR0t> z?8Rh)5Y?ylo>9VAduNyMeQ-iwH-b?;YHv-6yxKcU_~BIMj@mm@;)DBlk7Ms?k{&Hb z?R{+tU+tYy!uP=mecdNS^{BnJuZsAg+H2?Osg#P^s~e}m{kM%{ukE8E`qWVA4~|F&`L)%|m{9JN;`#3*0wtq}cD zzRz-e-G4{=lG@t5QCh}3f>Wd3MoO+Sag1(C- zp4>mbr2eXJsic?c(|u$(RriMleY%fK?$`ZBG@t5QGmgI1!Vjme6k5=?O5(}=y8kHE zS3i!v6{hc!B7MtEpYDrF^<6oRzDCoxwn$%t>C^pIslF@5(RaD&yR=B(Wu{N}X{GwE z8AspMrthjEeH%=l?h8xxHIJjO$@Hx&(zo99HI3sh$#L|x2tShdu9DTi}ue(TJkLlC>cB#HCC19P3PYW+zjqcJQd!^Unp=Z7axQQulKK{Lznte{!raqrIV)%4fg83u5eGU`?+wT zu9v}H-N%Isb)5|M>i(^8PjCCS{5`$y*9!OaT(z*yEYx){*sJ@s!acq2(+cCIEr=OweGp+u6 zUSn~q{~q06g)?=35bUvi)lA(N1bcKp70%TCK(L2ii2Bd8`tQ;GR5(*RDA=R>sKPzH z?w`V$R{uS^Zwl=_aj-}COW{nb{~mStaHg*F!5-Zog)^=Gd+1Fp-|D|d_ebviXK?32 zNzdO;?O7mkcOO-_r$1Nt!Jhdl5B02pyHEaG@97W199mfAX*&(7dEVgN>X)Z!wY;I) zBjTsgdz7)xsq}xgfO~)D?zK8=pOpOWo~^T1?+M(!V1;*rj1PAoINmw3+0L^EQ3k_! za#dw|F+Eg!SjuHRS+!L4(;8Pd@KW#mvBXpF{EN}&jsDK)vqFPSQk)kgwSinex&Emt zJIVY0E&85;?Fl<4uw#Ox`mK`>HmTpbe8DF5bKWb{LxPl!gZ#a`4l}Po-3|9MY7gt< zL;fJ@!4Yr}+gEyGg{SjKuu1)$dg&iQQvKceNs!XzQQC8odC+8K=``dh4>ku3V-rq5TJnr!}H->XU0nLk`xX#Zh| zvfZgKRs9R`lj!*P#WC&~`92D!V%%LQ4mHXMLNL=Ok@wEhPBo`VJl`KRGc_H9OzJUr zmc}70^;jZNuYnN+-b=E8s>(7ck8u_iB+p~A=e}PNs9LY8Obv1G+cqwkEB3i~8t;`@ zZqgn#S zf*Q^yuy;1S8|{MrXS=xfT2&P`zt^Z^1haL%E|zPy_hUf~It(G0O)X>TQ@;q(I{yYK zo0nmRlxzMxV}X_H)`Q}5by#>a&NsW<``_YnO>X1)e-?WxJnhe*M#raXx2g%Bk@O&` z18?KWzn8d6pM>UQiv~41j;kg-X!7R^-FWh^gif;lclj#32ZUc$rsF)AZS7rS?Ojyw zstG!7hw2{EdC}Tw=-Qmdi`r|b?vU^Y->2z=2Q==!FH%TX{=6%n5PJsqtNwXahx2wz z{oMLJTiZY1&(@AruAkjLphouxoR6gcbozq7y8g5MM2+q4k zs?d2jc!_p!eo!?*_2>IFsr4ADwtg+DkNZBBd#{-9S8LyVzwWnwt$MH6Ie5VOb-{Zi zPJd*-Iy-_IT?c~GZT?5Pw4bdvRpqW-w4h+N`fc71)ZM+pyr7N|%(iu@sQn7_g07Fj zY@Iku_01G}^ZlE){w>lsqe<7DAf+Ad+M&>nHlLVZ6A4}gS(72E1mK7=cIhMejTe^ z&S<6Oa+G5p_#c<6e(2gWsImPZ>PgMP>9!wSF2x{Tv>%*h`$06H^xq{a=f1B~h41mO zWrLKq??(H+nk&!U8@TU>d>LXsI*hW)N}iCw;!*Hoh;`Eo@cOal(uA! z=GZ-DtU!>|^M4!Cr9PYV{9oAr&XWSDSNt)h=K!u}QlAzDGA^wPq)Gf17O=_pUiPu6N2_DcCSzXh$QD<8^5Pf88TkE^u$Ip<%k z$Jpa4Xz^7KK1ENO-VA#hg?K^ROaX=S#4L?Nuh`SCa^s&Tti91LqQBMJ`x4XNYVD1B zh@CCg-bfbxantWBec9&;l*bV52kQS@&lgqR`Dl5JC({?y_>9W;(xg5!t-W;EU_4lR zx%$kUrSi}s^;ss-{JAiHuE6_z7U9knRTg*W`j>ysI8E#w-2bTfe!6U;w5y9(m7ROk zKEch`RTC=h-0_SUSdg!f^G6{*=csdt%ilcVr+OZlEyb6fN3PU*=g%W?{bg+Tve zdp2y4s8=<~_II;&A`9l&d8A0Ld6K2iMgl@Ghg!zcXXlYN9Y8_a);VOCa?OutTe`kx!ZY`9e3I{FKnr z^T;QJpFfYx!GwZPa{jY=S5@f#c<9;(#BTy?r=jicd~$SqxpT_ky_%liCz!rMy6!9H z1z(f$2km5-b2!UJ`q|n$?>94FA?d*!I~O>=F}b|oxbw&y&JJVg zQ-08<`}iO^U*+KD;x~ar3+Iv9P ze#v>HN!Zv2yLPdC@EqOmjz2F{slGY3Z+HD`?U(Q0+1la39L~Ox{x!L(NwZat>)+z@ zi23u3CY|Tqd{;P^*uJ^V#)sK4c);520#Tp;yNnrqcAh+4?H{_-`k}_^fniF&a{K$i zdu)6;ealr}AzjOv(WLkFGx}`)I$i5Mbg9`_bUqn1J%jg{ofaRp{qT$?oyRx!HA{gd z=ao6uPsQg(Yp<#b?LRjU1Zf>VL)Th;+iV=U_IjW4-Td$7lX-n=$I^Z~ujKDZoSvd~ zA6L(XQjd8XUN6+mtFAx)_s%Q2|8eEN*ABCFXd+fKhc1Z$`g6x9Gg~a>}P<$@%4Nz9{~mme2eA3DA(L*7p(7d*mM< z;~u%${yw5uKKgytm-lxTx>0oRN!<7D_pf>EFq@_fVzj-+WQ_t-3wKy|?b?_;$=?Oea8km8HQSw{e6gx zV<^bH($d)ss9pIW;T*r<|2cx-M_eV_ki_^hqw!hFnyTmC)qv_b>qzSWuxh`33C3=%mfp`y$|V}rg7JwqjNOw-j~u37&@!>Nx1E{-MQ>82wM9`hK^wTHot- zRv$M0<3_)2^f9A)&g-oHhQ+^T^s7d7{&#;D-dX)cj7)OvU}2ZV$a^J)gIF(vL@P60v-J zng7$|hst;DG_<`%${B4_{^0wc*Z6*obDnj2X|ij-%zl-dqT4`k^aCc3Z4cXfRJYmk z{TQBT-1^G=><8z!qkA;p6!UYA6Hd)8|C9xq?%oH6_FgHd(FP3bzAW~-_ljZNBO2eU z`NFy{S$c&q!aDV{U>2>w_cCg~qUrW`gki1vSuoA?*Z!-eqaj3I{V&*~3rJWyYU!rG zR{b?N$@Eu$LDDmK>bQRrYQ)c~9~M5_f$^d3?GktWGkCA|i|eOoKRbVp_6PMm1U)Xl z%a{2;uj<`T>v69Pxv}zR-ao$FSpGD4K;^5Bd%b*vr1`izWqzKL(sJD21Eo5KrTn1| z_3xn$E9X9yn*`z2|IM13_Bh* z{=Xa5`6lepbu#SGc_-}nH%r%fD(ukTQ3yM9z6(3_@A|`zBPRC)qdNbE9pAUO&Vymc z_bjgSX4vr~i6?hx|GWF*0o0R2ICS5KG5;r@{t(vcJ}0dHj^rOezp)=en_rVVD24ux zUUfqeQ^g?E+2=~oOx_InoN{l3Cbwf3v)|IuUg z?_B7M_Al$@eorGh53oLNA6J+MZ~#LH>$Jav8XJdo+V6pVA0VvL{txVZc37u=P?$&5 z9}4q``bA+LQU3^TqIE1!{UosWvSFRBcY(b>4(qhPf*Rc~gms6dy#sp>9oA_-1~q!_ z4eQjOgBsl*hIQ)KL5-dp!#efHV3w6%t9}_wv-;Pne+JXsJfwaaOmp*)`fD)F&2#Fv zL4KZ7|1HdO>c_z}?eNgPw-wyTUZ6j!UkB6lycE`|p9RzOzACI${|csA{cFvyrm5eD zwd!}lG^>BDj^ALK)xTE9aWKv5U#sIMILYc?t>Y+T-{a&w8i!r<*TtY4KraS$>n8p7 zlZY43hc9OyJ%oHy^g6=J+-LLEelQWD^CvBH`svTySGalcrRKjWwmy&Ei%bZU(5*5L zQV9b0E75r{V}Ee&#`Tl=dEy!IvrJa=xxbrx66>gp`?~*@^k{o6foN#^KkA7i&Ur`p zwWZ%@`UBTXx*1nm0b^jWb zf0*T3y;$y_|McRE8;}=8;qT7gici>$PC!*XUS~fI5O(OkFnZpip5pHdvV7`se^1AF z1)i#jP54A#M!uiNes3y`{THA%8ydg)8-YK2iQ$;p;gl%J+rO(ooNcesaQ<>flj6?F?7y zhAy0__hAFzv3}u1Jx7IS==)Xri}26T^O-xZ1e@w1jeAQ=4^k^6?%t;csg;_Jc9V8# zU8Ql@C+)IGqMrL*fHv@bu-xw=EZxamh5ciAislbbQN7_QTE1I12GHI<10T6B<^(RcLX0 zM(?pWFBXsAm*1|so{X{)5ZXX}Q0Eu!{T}tVQosV+dlqo<-;?Lh;pV3YAw$*f`#b!8 z1y{s*HQ&J}{FIXlf;+EI(QTDCsBVh))hZD7>pB|H;Ss`qT}J~>FcQ~wG~mFJxUQqY zW|j{5wB4iY@I_qf&2Jomc|Q`$t_3$~h`w3Z)iwQ|yw#(Z^((lD+3V5=)f?F+V= zooOswl;3LgNOP3np5qkT?^=0JLQKY0za)F^yh(l>EdHLv640<;%c+`pfp^RIG0#uD zP39S{yX=p7Gw;VI)){L*wm%&xtk8Ketk8Kdtf-|J_-6{G979;aQBQiRQJe27Y<{!% z0AYnrmOoUisq^Qpdbq`W9!_38W)R_MKdLEqUT zAL!Q$fd_LXV%gJ7cVE7R%^!9=w$Ol$uK6V*?{wE96X5e|qD zrqv(Z@8*Q|J@;Uu^KV@jgNe@n)lY(nZv5zhESSjFp+5DSKxPU0f%Zo*-^w>X;y9&c z`SxjHZj3AY{hlzb{#CH^3`=)*sJ|8LR6i`(q5fB}Q~fentnCq|)gOb2=s1Kh?F%jR zgZhQ*uQ09tQ|JfvmqI_8zvTNt$6=wLbbJ>2LH$3l_ZnfvTq!?uz7X$8G=Cg$0F-{U z0rXc1RKIq9$oY|By!RW;u_658B+mbKzc|#P=g^^!7H$u~*ZuKOhwht)I#wG$Zgh#! z`9|j&^^Hz9I@M^UQQ3Y#|MM?sInNq>+UOCZhmAgJ^pMe<(T9vaVDvttBS!Zb-7R$R zWL-a^_XoRiE_dI*bLVyLW1og$H{;<-F&JvQCf}^{3eN{zx#r!u9+H?(JMIwuvneNeOPCQ z-s5(5*!!%G!pDC1Uar&bn>+2ExwAv>6Fco*xwAvxGjw(wvix6ETKyNHRb{iK zy{pPj7r!lTpUgDAtwDdCd`RdNUEaOU4m-aU-K$6aKz>&!kZk$)FRou)zq)%Bci$S= z->-(%cCTuGpA}Zyz3NPLh|qq2IxziVwcV@Q-$RAf+CG8(eQa2*_m(V|LwNjhzb5VC z{w^%6w)@t6x!RsVpc6q@ZSQUJ<=TC2zFfV}%>+7672dl%V(ovCeDAus?051#>(cgr zNE)!X{rUHM45?kWiJqZd!$!9WJyt((J&g7PGlbPgBwtuAhpp+SlDn4#2B9!-RH!8|8C+hqZc-8&>Om^#J^WO&Z$! ziey=->-!h^y&N%1nyk2LG(KRAFAFb@-OvX3 zO3wE?>}Gx4{5)l*n1yw8l7RfZd;VVE-Md#uY}%?)BY!2OU>ot$Xia=P&zv@xr~nd+%W9uUhrT!o9xw zCD#x3^Z4zeep`6oq5fNV-=Th7c;DgPN7%V6|2_f*Ar#(6*u3W6M`URQ)7|?BJ)eg5 zy?{cysNWUZ#r!S*zC-=7@V-OWsY1Kxy?Nn%hmNa4yXg3G^If{^Ox<^v_3FO6%X=cRu9NvUV?{B|rqj?P~hoBxNZx5@gR*{1Cpy~q1v(R+h;z{TA?Yw7!l z=f&@ygj>qE-u0Zs2k%z>qc%=T^Xa~z@IK`Cl8^1dl_ISEqr@}!>iS#ZT`TpjDx0qR z`?5v4zejuO{(hmf=gVy`{v9|&YP+oi+qX%1(f6OS9`8^&`Z-5Vem+2HEH`iG4r@o7 z=L+@G_Q>3*dMZ5KkpHcA4!sQfI5R};<2mSWwXfuyQK$V<+JDyXsCFFXa9&@+@@lnT zE4rjNNXJGvmR_i|0y&ta-ule7x2d?pw_Jq}c zR)34M&m%?e&CbHcpQ|7BHG7Za6MAi58Xd39SA5S-KVUxh`}ob3P0XJE{$XhUKf~Xy z=Q7bl{S|nk`FTEY^I2ZMPUG@>%Rb~&b-ShBsXE<+TbN_|AKU@_R{ahg97@i4Pvob4^(Heu;P5|MStg{T_)YXZ5o9 z!v70m{~txF`g%zZt3Ph^V@4k^`cb1FG5TSn_Z$6?&{Xw^#XoKIQ$`;$deG=6jXr4f z6Gs2V=;w@n*671VKO?lW`W-@3_I@X<9+h~szr^1TNOk3Yfb(6k9~Hl^are%}a)sXw z-60B_?LM-?yFucV*A-gJ<9s?7(7kV~@CGHp`BkuEro{Q)hy5Dd=u15R_melx(scAM z{UNxqM&r=Sa@>2%{CnSqI~iH)u#wQtmNvB@q&l-3M;D^mEU)U>E5VT zQ{GQ&K5e%sU(3<<3+MXGF7vtQo2&hhe@}c9Z)x#94(%@Ocq40t_SWoPuv72T_&QAc zMempS`iteb-)VRKL%vVGt((Q`Me?Lxq~iXN9=5@WXw2f9dwE%%|ygf4uE%k;Ct0%04NaZtq>RIi!4Q3a4v*-9CqUmIz<#9p(GN z*Y=9?$qA=xyGHq15B19^U+b;@8P3%X=K3r4&s9G!9_Pj5)qT&D;~QCJ-#}U{R~XlN zevke?LUG8u!0}8=ir>4Awy1==4|M0sL2Jh<`~MRL?fX(yF)e30{3{xQ^E9>rd^9oc zxA+OW(MXSCFLzL4h40T7my_3DrW?QiTjj9c473B&=ZnR5FrAhjD?OT@eM>#$F}QXs z)^jX5=3{wW@sDNCc11D~ zyUp)T5BQtAdItR2t&_b{x&Gc(l`E3T_VxLFStu-M8be~d+~e_+t*x{vuPN2j-__bZ zr_ax(!*q6Q+7D5Ow4dqr``Xif7P6cAH=pCLK#h7`M%I5-zY;N8H~xexe&;St~u8@>;i z+2=YIse9!u3Gi|Hh^5fOKBv!Gh>4#e677?jmdN^Ww;9L7I-dhNjE8kz0Gh)S?UR+P3{2wA zw@`!^J73uHaa0?lP7)2OEuH*QfMM`4zRE8Ec5TV@JHTf@RN{HJVa_x68m3P?V0a1erwp$Ke#G$A zz~4074*Y~+j>#Vxz76;p!~1}LZ}>joKO24s7=QXF{d@>`vf)R8uflT?p2K)f20sq^ zJf3RcOF*Zi5&1KTtossR$|F>AjX48+{F84f*-!I&bzrdFmw}>quK^UHav4500^^^^ zZ&AM22TJ)NP|Ckm`IXCo_W`4SME)LN%6%A=a-Rf+kH{Xr67&$_^!aas`k+sM#zB7s zIt=;?P%8K%DHMoODnN+(`4VM`Z*%>O4o}q2PgY$gt^gn6F2q&= zy%uTk>vA7>3Gix5y%3n|a~#hK!^6PzBi1E{=L+yI1wDeN30QV!DBvG}`HywyJVbeh zCZut`XPoC+;^}z$fuYv(@mOHg8w7u)HOHHQ8QufRI(*Rb90Fyt{SXxYq`}Z;-mCC| zdU+RsP6BNNr9C~MoWtJ?inY?)56W@&X;AJ}9tY(< zuK#ZZWj!7M<+%KJQ2OI9K{-AvQ8?H0IiRfT3Q)F53Y6>jFeumV{h*xN9s%Xrc?6XG zNu#;uR)hMW7l6(LT@N}7Gz6^y{U^}VK|cdJ8}u;f8KBRB&H;TDDm@o;G3c3~8$dbN z_ko@T`ZiF`ogW3|oc|S2&YwR9l@mT1Yd+$y1m#?ME-2^C2GH|B+d(-O?gZuhc@OAf z(1$@e|2z&#|9uve^BupgNFVk=Ik&wYlzx0QDCe#$D98S8P>$yZK;xj_09^+9bI?mb zC!o`ogPsAp0(3cOJ?I8d1h*HX%P`PYyvy*2uP}mu#8=Sau0x)cph-~hPPhU%0cSwJ z?0g!Sp;3u^UWd<|#os`ROl^>F1&mvXY2X}`cYxnaJ}l@3rY{Tw_Zc1mej_k_EeCwF z;YWerV)zK~yMQy0c^3FX!1O`jvX2}7HSj}*e}kWH8#Vkp@EvXsqYKzX9+F#>HO^bAXj$kPL$#2j*8+iHCuY2WA~}z^??}0el3w8u(^l z5B|*n)%J~K&%07t9)B9t=5p_B;H!wg*!^6N9Vyx5c;G=9g zZ_(~+ERl8-$1M&LW4+%2K1`6gl=A;jNGFQ9_Hgyt51EZ7a}O~1(Ga`EVov@+$Rv#S zFmTH7SAh{KTk!Fi;Uw_mhBpB}so1qkS#}kVTzsmk-7T z->?t-DPWi<>kWw4g(r^Z^WdX=uLaM)0i*9^Y=6u6BZxEf;K>mngm{kNISiRTP!6Ua zn!FFp(2vK>*IR*G!2da9XhSbBe&Sy6Fz_jccLQ@Tl!eR)@Eqem0KCL-4tSN}M}ZmW zZ}h));BOI*esmk&06v1;0T4jGobqolJ>+E!!$b*AXFvG8#v=&~|4M0z&No4x_A|UW zqSO8h2m7C63pU>c8P2y1`y)6f9KAOoboSg28TJ>$$0C?<%HZ+70KT)~F<|Ez0`d&2H;KLG1$Ap z%AFd`%S?w1?2m>R|IXks@UO#|<)QUEz_*&jkAOc29LGfVjO85$ei4}S)d=uZ81*6G z9Po5t+K|ISI1`v<9RZ#P{1spx*e*1j!$Nok@Ylf}#)Npa$;VNr3^4V#01pDQ4Tgbt z8Xf_@!{l?ocL9F`GHw$3cfdGoJOKXhm>2lNz^n@oeV@idJL8~-@X*c{(8ml9gFc04 z2lykPzrw@)OAeIb_x!-3`xnGnuQ>4Wc%CC46X&VG)ae7y1?F5D$H&FMQZL{};OD^~ z2EGpX3E&)X+R0;K4h**d4+7JMVc^>g=YZb@{73M47(D>|XUYJ7445|LfIkb&Har6S zb>P1M^KkS%U=->3z&`fWHOIgY<4-+RsDuKN#`+yeY1 z;J<=D4E#0V2Z2X`4;#J@_!q#m?EzpO@;?lc1AYatwADxxn8t%*%rx0GHEF;9mgq-9ird_rUynMqVsbpreW7z$Y6X2A*X&2fPsY zILe=av$~T9z5@7o@P~mn0-pe!1MUKT2I9P^2!UTg8Q?bo?**IqcHmcG=-x&-1V@Q) z#}g+&cq^V3Jo_L&6?7QS0pL49cjNgmFo)khRP~_YccXj0WcWS6j~V`F;HQ9NkRQRr zum?{L&u_qg2WT9&{u%h6fQNx6V1T?6I0wxC>XbMR1v7!)OFoq$?B~Y>+_`{%05A^B za1S2hi@~S;Ip8M4ybEBs56=v@BVt{u}(P$^Rahdf8qUj$~XA5*9RE`KK*w3}T z#dsvhD894}^3BG-5qP8FJAm&tY$Wy~Dr!N>MSDw`j)}O`_b%jhZG1QI`%K_tz>vxJ z(ff_hKDrk;ZgG&9%R}tsXfx~hdDSD*-vaIxgn2K5$#U|)jPU;$!IQCGy1b?$Mgw03 zK0@XF#zdLrIp8_qbB$n_2Mkg9I1l*CfN?Mu10Mv=A=5>`2$dhf#~NVzNt_&nk1CN* zWUGh+t%J-15&oA9Uk!eR<>jA+f6VyZz!(mmkEhRY9QaMZTo;D%WcU{)^6A>`pCNNog#U>M{$vEhet9i=H{`jlF?_%~=_#~{P? zmx1?^#PoUYVL%^;sj{;kWJ?s!(fzTGm|C~eMj{54avkuGiQ#W)1pYN;PV>Bv03%d>TgcJ#`F|MC&F68%-F)5xJl=eM&~!cy znJ)pO-(|mtsP`RyAbcJCkAeD(Aka6^z20}gm%b1#>TQ&SdMNWGWY`W2KL+MJPQS8% z*3)##GV%;$Fg?i_d>+`%r7r+CBYhap#2Gkeg68nB&FKRkDCc~xoAf0Mu_q0H1#j2z zeFtDB{b~yIxq80}nB$n?)rNiG8NhIZ@+H7$0UrTnSYUV<_yXW8nJ8?jVXhmi4g0`r z4bKOrUvRHVd#?c>Zs0Wn^JiX&uLcebw*U_S!%jI{+yMMT;1Te50wYvDB}DfuQ-Sw_ z&wU-kJAnULp?4SXI}N|b@=izmBf#jJ@;Gn~Sd-w}hRHu-co_IMhI7E>@KuqAJu?im zUW*M612-7X0k;FA&&z$}<#*B~FEGFVMI6Vo-Q@+o$>jyU9hm(^n-2hA4a#sYFnw(p z&;7vgr}77YKL`93P=>Dnv(Ix-@NHoFR2&ZXBrrnd)A)Ewv3p-k*JOzO2DbeSX_y+M zjrjef|JJm;ALJ1KHDuWStp5wZ^v@RHa*ScI5BNC4{BGhD!@Gf}8{P-(12gXk@EpTA z;ByQg0X`p?^<-N_{cTEy%e0IU=mwL_EtN&o-sqJp23-l%WCqUxCIM z(GcU@WFhvf0hp2>U(DNte9*ho2adj1hw(jGEo-^pt6p=7kSqi?|$Gv82&UcY?nEY?{YU7|MONB^&C>1 zf4BK{#M$O-tM35+$?`l2%()|u=SPNHfGx!Sh=QI5AE7dZk3Sh^-+4Yiyvi4Zs7v&i@?cd5z8iDQ~~HSjX>k*5)uf#1Vz0bKS??+lawA~DKkJ0E`*bO8In3>bg1SDX(1Oz;_I z0dt*aI2*Xe@cF=}8@>>Dw&9h)XBgI8>SF(hNI9NJlDHo0K|cJ(g`@nFX=|aAenosO zF!L~^4YMxYz!g}-DDy^O+ZR^cVi;*STLD zX2L>VOHX9%mI~TZqp?rU{oO@=5`9`k*KZW=(8nzjjKhu~4P5}3U ze*~ESZ;4iRp$v@BV2f>HmlvCy) z@IvE142a>4~>1G2>GfG<4{V;kyoU~o>cF(coRBKGmPci3@Y zexH&27Cc->iHGqtK@af=9)2H%IERPt(TR`Xaofq71%OdOcsA0lo6=JzpQ z4?2v;eNV_onfyCQkq74Y7DXQTdgBiR^W8l8Bfvbj5$Aw!Hkl*9yA5-8dMEIOD8~nW zpWzYUkD82+4)~1WIPfEeTi7uOOKB%Czmq5a0{p1Sa8mlN(*w+KG34XGKQ#U@@P7kO zL)r-NbHJj<n*ggmmWTy~7U?M>ydc%j4?HzTJof_TM_t*Iz?1 z?}UGV+#nL(f_1JPhTI7J2FrUpa36%?%hYd)8-d>iKG&EQ;P(TkfQNw}1m--Q1OBYx zBfwuZ%rARCX4nV*zTr6VPYkyJKVx_p_z#9hfXm;YW#xdU80HtpUk%K1Y=DdT7`Oz8?5$>Ic5ra1MB%$;9h1Rt>iR-)DFj_+Jc< z0DsnS4*08vj{tuM_?9vhuoCwfz-;Fn@Q;9bo_-YgH--7?j{q+NPJy2TZZOP0kVzPh19t#RUf`_Z5#a5HbHKM4J_5YkFkiIo z1Lj?j4?N;zfbVxQm*O3kVITOA;W+SjfZ5kAz(;`D*S)|$H@pq_&%k0E@WiCz5#W;y z=YXdJ|08TT0(_d2xeR-BCj)%G;UmD880H`QTxr+`P8f~@cN=a29yB}*e5>IR;5!WG zfbTMV1o-`iy({p2FvC9ZL16Yp9QezIhk*~9Ob+;GhK~S0Z`fOlOq;asKJaA2ap38O zTYzf}4+EcVcm((Y;M>dK1HdaBZp8i+m~&wcA8m%c_1H@S^PVXV{8r-+1HTLSZPWw& zLEyIo^CIk14&x&1D<;oBes~->Mm@kk24+9D0RPtTFz{atj{r|b<4`6Cd*I($BWeu+c zzS;09;C+TyBku_pz9Yap!Dm}N4Sc)dXMy(`ejfPUhVe&oW%n2^1HRvI z4ERCA6M#Q!xE%OPhVfr6lzr3iMBu}QCjmcYxDxmehK~cD*rv8k27aaCsb}Ku(lCG4 z=1gFYM;~~h;W+T?f!S6qz{`PK*(Si30jGg;z!qYaP|yfILggl;U8R`U#k^MlM?&m4 zGBm`F2fhw+Am#hO>jd6xlD)v~ECWvn{0hh{feiPQ(@nt}!KZybo*RMNkQN8N1(-6! zcL;eY1 z)`j6GhJD~?4ab3B0Oowrf@gBO_Wdw$mEqffXBplNe6C@(|AoNJJA!8o@Lxf5c$$Da zX%C*1$;5#NfkEV6`4-?V${@T27(u@G_cq{i{ELGTt*^U>9!4H_4?P0>F35L-=J32% z^U8hF2Y`FPWB4d=ui*!Q=>rU(1rChQIND0? zcn(;66*9+V)Zctyeoy2kq{o3@1wL^L@M*wYtA>H+8_oeQ1MX)<;D8ole?|N<@LgT5 zHF>7D8;%3J@1YC>vs|bX{aYbJJc1A2v2BGumSrJk|6K>8u@fSEIdH2`Y&&hzFj@Y1 zM{(@>2>--L-Zw?~QzCr&5(8M6E;MX3$}wsoc4I`&%_YTh$DtpX#_x?ezqkQ)B}N~4 zHyfrOz11+~-v-S7^zr212?Osl{xI-Y zID%)M;rn3jQp51ovekw^B0~m(=4rH-Hqf9eA%o_Zw!Ip7m-WT9z^JdxCGEh>!@%!Q z5c`a1kb1qDN)TtH1M--LrCv9v9+AAm^!SL~WjGFez;Fxjhk?1z9EMpB8QuZ>dBZmX zA2NIs@MDH|qUa|K-wOUyhHryezc73|@N>Xyx80C&=i)nYg+HNF`TLOe3SinWg69;& z4*>hX=r5T&=K{aQ+G-v!`WzZDZqYt(1&!n3`nd;m7*8E!SZV83u<&Wb9|;Tz%S(ewDWE9_}(#KI&3cNTI5S;wM zhz-Y*cfR`Bi2TjEmuJ~7UwM{N-U;_uKH$ms z7VvTGuM7Gq;+^<@@C!-aWb zfR*Fqq)jz~=E6N2(&>-bzj=3Ao=;ew5sOFiFpK2741oFg+;4IRk1i|rX~Zo|UJme} z$$hFw?z0iOiy`+plY6*GE*FtI4RVi|+-Q;97b0@l26*2vxz88LJra?lBk+A0!iirh zlKVOzvw%a@1jA z@;8D10y(U^GH*@WswroKQtTp7*yA;VvU5_P?3Y1MgcJY0NcXoRx^blTc9`6^isZf% zky{42H=Eq!MRHF>CwDp+518D49z$*x zel-g(aCes~Ny?mwyr@PYRG@iF91fxx*Y_tRs@oe#N{Cik?-ZA2Wt!)=!L zN#l`x#xUki?{g9SuMzyk2>x;e|62rqBZ41`;QxqVuB!|z>w6Kr3ASOsEw(X@VJaT> z2Lt=>EJ2>h`Web9QtQXJ}~=`<^BLT4ocgf0&WGRzx)XJ7Et;G$MM~u%=0tgPlK{={|oq0Q1)Lh>_=4W ze+Ynq_Ve$Y5GL_s8)Og!-x|U1j^K|*F#qn>$v+(7^LL>f|B(p)KO*=@1oL}&PW~qm zet^A~<3AVS$F>#tyeo7vQzHC15xgLRU+Wj-mqhq0Be)@gH%0J31m6(ByCV3m2)-wR z|0ROI7{QN6@NXjcg$O=zuwe6RBiPGy_j!rLvUTg0U6WY9=Gyv1(>0CtiGtKe!C!F=J$@!Kt&S%8ymViCqBq;q*OTh$GTBT|w-;tpi7m;L z*BbT-Wjxa>Q4q#@d$N5J7l2=%?GA|w$}*`fy$LZ#P^zaj?PWG6GTp7|ZC?L^#W0_r z{oP@vtvlW7hwaqZzl8iH1$L@Enf3b5TSzuO^Wtm(sm_)(Dw?voSp*f+-`5SjO@n=C zg|IIP+qU#{_ayo9?jU?m23k$o9*sF#ZtXqZ)abEzt4+C><$xX#-6M- z9e!}v>tDQxHsLdGQ&$guFP9&Mtgxgj2@BW*RBsX3)0cJ;mXSy$`@?jkAY?>GQM@}f z=yj$Cy{*Zve)eszsY}{lh~8VzR(6s4?p6mEIIxheQ;@U> zB@HfcV4+8EQiVj6AzdJ!3qAT&6MdavIejac>Z9lOrFvINOVeG&_rZmUYu2t>SHFI} zSFhxXWmhLwtXWsTylG9tT2~!YVU^byvMyV`yuPu?TUUQ6#69L*x#sew`gMtAl6yVN zv_8orWLvprZDQr+%T{^ouQ9N0eUow;*RNKvZ24shn%AvTP`|uh!SW636fk>Uj!N?p z>dQyWYWRo8--1pj(pxbHB(lkYMCaygI-LkRH^J%qJW%|!bJpwXN#WO|(Pa4b=@2BL zU2B;!eGI}=(&;U|ed-rpFB}q?WlX@!B-8rKE=jCyXiBVD*U;Elzanu>eUnGt@`knR zo3E%}ArU4f*3~at->_DM*EiNLU$b(}n5l{N4a+YxCDDA1mo+7t8X6MouUK~ZtCy`^fl8rI#+(^AgO53#B4O2f%)<4hqmqR*FK=kP zCb4oErmB+0Tv2~jVyqdEe00Hz(z%PxnplTQl;~QEaacMpL3^SJPPV*6zG)d|3|X|t zOlrD%oWwQDuNZGax4}Gi1Il#2Y{iOo_3PF(tSfa~Ngvm(-CB3an$nuDfUmDy$6ZCy zq)4VLvR5RU)?87K0W-D;$Mni&r3(@EHFyPy&2}=2eQIILAp3#d9_+Wz#o~W%A>o{J z&c(k?y_?U?Z-kodJM?M#2j^AR$e{kg3@i7>gPw~POS z#L}hye3zC;Z0g6RG}E2flHL-g`{tejYtHuPpY4a4?dhJ)a}hoB%-4JK>lTPE*u}HR zrZjgdttRb6)vB-)IyY}V^K73+%)1C0xG{nfOYu^hyhQir6!!4g`*Pzen_wJ_gtoV4 zg$DiU{xptB^50%y8b1kN#O7=g=Y`Fv zRk90PGOxFnJNUs=mu_jdqTROP5D;Rg7J7ZjOczfsl2t6iX0JcZdU-fMVCNnY&Ti`F z#<(jJq5@qBiF6~K+q$h@t3HV^l}+}g+K)cPc-c)VpGW}pCbQg1;)vLjWlA>F*4{_Z zmEH{1(4SycPza%v6L!j3A^*rWDNcUj} zKcI{*Z0&JcM_c#87cf%LHQk#t*)5)&a9h(|nXTz8j&3;W;&6hNLj6)*$uP`p9wZ~# zhjTbik74K`f0&X!)3bW-){x_-H=XSp#9;2mDbSr4^)xT7k1GYV6`D^}r&U$R+l97l zj~dzC!$L3s!qk>TrYF(Q{~?Bj>kU9}Hq(>MpbU0nCbQX-#+D(FNcHx6VLG)XnS~dL z3FsxeU(o(FHq;OFWIM(C(ph72c$!p6wxSFXQR$;ZVj$mD@K79PU2kwCONYTi1=1=C z`%)SB+om+GGrE(#8Q!pbS)!GCkCo&u*;pTEQ zk3#i$CWT(IuV+gpg;YDha~7WvL0Qd{AWNy#t&mr_C|YEhR? zr7=Vbj7=EWT|I3~+uV~KNMFU?u4kf zrz?ZY8z7F=R5H~rmi9nVL4DZWY8eN}7aMubPDnIwMPC@$9Rrgvaw67qIpGWvDNY>hlYZQfbtYg3=ZhXlseR=9}OFIOt4KsN2Fj1^%sW{yq$;UFETi%Rnmpw>3dsW2!FDd+T? zFfJ0k?c$V@pm#MY1goX-Fd*_hEbA}T^o6`z1lW$HO+a6$SC!y}?freNJp(crv2#gv zI&-Nf(Tb%OjfAZaw;9y6P59X~Z|c|*Jy?hf{QhqAiCDrO)LO8Y^xlcZrBhfXWKBq< z+Ol@nhy)CblnhRpLeT3;bPYAjo{Sqjv0R6R{I_IG)IqP-Fm7GJi9zz(rhzz{h9P1# zwtN)D?lJ7^!RAGZ$)@@}+?8^#01p%aH26Z>>Yy@(Nh_7uoZOP>qCHr6<(ijB3J_kt zD2Ziyw=VAKHgT*@+|i`uk}j3lgxlCwwDMNWdm>C96d=ngv~FFD7T{eTi$DZ5!|vHJ z@^fP<(c079m+I*q#3I#}VRvG&*rsz$e&Z@;j>#;Tl-I*OC^N9I-XynU1xS9utb$Ek zB5{S!WOMcuUjY#FiXLqCfN9G^(Z-laV674=4sSb6%|}bLL)r`5P%Nj|=Jxhr=YuIE z!3OU0*m{}n#bU64eq82D4DTHj@Wq587_6)7;zh_xW8CP#ApqOmG{cP zXKxjiktOp2&6W890~o_@Aiy@cm%Z(U`2YEsLJ5PO%#|K*iwi77M8TG1TPEdEjqYd! z;hdOi&kI9Ukk5$yF>jZ7#BmFeM%{i@Qk^xjF}2mPJBitLAenJHcdQ@TNL)69l38bP zsf6U0?W4T(!p@yl!=@S+*o0vsxtVGdZ%Si<$Dr=tr1@w?Te_Qjw*C;q9SwxDi$XaY zV2cc+64+4l2xf)zg5B%R=HIfhG`?-a&K`cqjjmKePlnu#qJ4@uq6u9qXHY#q1w3Xt z4xP)(9<^0K7b0~a;HAE|ZF6>u+hJ_$rEB9|7q?5B1~aYOWXp(o1g`IKQqFp)Bd;Gqz$oB!zRw zzYQ-=WN3JtHn+9*c-aBzl0J2rd@N9;4MPbJGhgV5FkB~*=+gO9>{pH2tLbgMc(a4I zRQ=uh(npMt{mEARM8CxHm9v>hX2n*SN90rcS>9P<5OBScj_2x|*@k{dz<2XF)OTih zf2r-qRZEMZS{$R$y~#yhaxte97|v}<8r#H7>v;>2BnEY5Hm7BIOl`(eh8;qJuO?Bg zEtx)eFLs=fxTM?ryokh69Fal!xF3+_;PTni-5RR0u@WQ)Y#%$vSP4az_v3a51+p5v zZcrdHy46k4eQfAqe}oAKXV_@E?!eo7x>{}TEl?elpV%;Zn|kmnI&ldwBvh(qy}Hvgq+Ho8q*3RcCpt2%=eVy4=wVeZ=6FS*Wt3h7W?G~7 zfSFca1JWAoEqwJD9y-s)|743d@P+dy&z?sW1|k3Y>-{3$0-jC_cb)xxnXVA`ob&6% z=tUPuQN>M&uT|t55ST4!2XVx3C4|Q0h2^}9=zjiLzTm=bgkEGW)CMW7uxKybe6j#k zSv7m@a8s-TSekqKvsk6<`yDV?F9eQKm+Bov8_c_i=Tj7awhtxesD`us`li*1WtZ2l zwdNc%Bfkm~$*5%>l@$#oO~hiLJ%5{oj$o^G_w}@gbG2AGjag~hJH=U+icxqM*2rFQ zeXv*xK#!NWa;7)m4Vm6ZFJyYh>jVsz$d!_FCF(w^{&E9N?S*bY0cLxNv}r%e3Q$%62P#T>f!z<;KEY#d>{xUAXDT!I_g#{}$|Zxr&8o z&evfWs+YV(73i{h?XFcp)1Hy(Jx9(Sk!mS}6;s`MH9xLyt>8GiWgU;|7KZB9K}fx| z?nDpwfi8bAb@9vh5gy|pt=~xM+SG-w9}V&XR~nvo?o4dQ*L-v_pI^6_ zE&Xy^n+~H*c@EDl{pYyj%Go|%B=HL=I!aY7ZvI}X6APYnj^w_EXod-3TG8|6+%u5bmMlwi^E5ZB&>_0C@tMM@#ZttO zLQ0_>T<0LSgDS=?7uKLmFH-bM4Z9m$8o(ADH1(TBIMmENgG;R}qh?caM}?_ASZzJU zNTjP6{agBqj!^18TEhIJ3xPGJ-P_uS*J)A*-sIy9!7B%*`%nz?U|%zT5-n2IS&Us%n1E>iKoRO-DSfpk1#)ETaw*~LIqFH zdRCN{o+D>7_(TAjOXX36h{h z(IbhHVT+Q-B5l)&fRVr$Qq-bRCV)RoL1WNIv%Zh>EBCE!;kii_G4CYAK$HzipW2o1>Dp6BEG=- zDG|2VhGpYZvuBn}j)`s8Plt(Ztpo_4oG@6EO$RKT81gswiX!Ro9uh0wV2rH_y5ZJ0 zi!jH=fctSL@evAhFD$2=8n~}%O19;q^B$8rK(CwP;K+uDlc%xEMFWHF#0gen{O5Mg zhsBfdksJ&E_+mbwj`SXAJ|XmAV0X|2I@ zWXA8Jh6z7U!C-QWcL-BG2_RG9OaYY;YYR)Hg6})-)4*&=xOeN3r$4E&LDGaN<0&ks zpdQR5_ua?2WP%iGZUSM*Lko;;CMO5(_AC5Jh(N**e~JG>gRUxpN9d^#>wCwZJ$?5U zIwfAwk@j4`ECY2r-W@K{>Xh!CtS?T?2auW}FRpJsBrHs8%HD`2*+kyjH;&+j+4Q@x zt+QeOd>8io9G*pibNQA%ziZ=N6$p+c+Vde4ipP(9=+NOPuUKnttY|Q)uU|ZT8D;TU zTr_^%jp92ZMqPf{{dibu43F)qi-8(GMV`R-im_-(0H*OVCo&ywjeZA+yuoIf-4gc#<5mxTG>dOVv;JANNpS*W7I9Y*hPQLs&E`n(yoy4H@ z*tiRij%m)#WlY;Ak+_dO-b~$Kqm`KNl)7Q!#s3F61rBMiyE^=S9IWH|JuS^02iUiw z%W3|bBfo!Pv(6M!7c*d{1l9-n*3{hH)ZDF_9AApAfAucm$jVVnC$J%jlXQM0pJAr> z7jEkcyQabrXyQfLL*;W6-)s5V>{!@T6T({E+m0{}XS?Qm>oSl+^9F%%-^pJpKPMmp+4DAi0!=xiF$xFuVCG^M=J z)le$E;eD;{QJrJ)r70ExWL&O*FROl-AAI(CiTc#^p0#Yzx^i3ccoQ6Rq4=Ki6X9qa z7E;g~K?<;@A_lDx265<7Ut-jD4{d5mlYRS=ojW8GB+S@fcU(^B;G0L`%xnnveNq?0 zVKbR)Bj4#W+PAReM%d(IhB_Yh-ua|&tM{@e?;Wan*foz-pBi<#?u#!=jT&LWa8aiJ~bPpO%K+*Qb-Sm`BQ5e^}|jOb3qm zcZ@C&iD00$l+6gDcRp_AAOZbKNuQ%IuWWVOrc%JqF%CDYY>|rxU=ISX3Bx54<($* zCpcu2U^7axBMGDOspE9v`Pqr&D7LU8&-~_@(@1Ye*Qwa5F%82vJI2j-B~gUXu$k@! z9>dv?pLBx}#fD0qO0hNbOUFJC(vUX2KwAL+noR-T*%It)pCb7WU-GewB{6(sf!VgE z+lL>Y$QXX#W3?M?&RV@W%u8pNDAGs))dJ`@Z|TX)M3FD?I@pI>CXlxzQO3+S{@bsaaF zOXpIPmUM&eXb-h$kwC+QoMe7tsQhUkvv@6l4ya<*ESh6?a0JfH!p-)%H64*E7gi3F z&3Hx?QFX6V5osZ=h~!p91TCtF8uU;JTV)Gox9*3gboDgGWTLUHT0h%DOP6_26W=+W zI`lzpT|&bi&VF}Tni*)9Z_)&Ohou?}+M;l^&rY}Q51sQ&a{bBub*A);sC5*>4Q@V( zj;6Prdtv%mD;NtJ3v9fj9;j81I$7`J%-XD+wjfO4cQ4{5|4aCgb)2UWK+)9Yan4)bZ*EtmY+^f!7eMJe{b1(_sb!EeXKnN zJ5^*BA$AsaS&SLY-sl8INgqEtiBJu0Niaji%g7^#MxNpUxPu@0#G&R%?I$1qWGlF| zO_$r(S+uTEs)`i}yTYU8x9hB_s3kuAS{;_}{S5;-mWz&c(%eLV1`Uyj>Li_FNH-Yw zmI~{lRAa?Z7GahonaSkM_|3)-$Uzl0=u~BQ-=8HqH zhwp0XPQ_cKri9Sf8>^=@8OI(KZ*uJP{j-?A`H&@kn>?BeKeClzZ%Ynf5gl8CLjv7x z0)~mxXHH`S^f*5C<3Jw2qKyrhdblMSG`2!aWMHU^A`Bjt3NfeUEiR}Mn)dC<9j7h! z?qbg*T0wRgi<3_#sO?#Hoe^C;D%8>4f6J&)-RtNEVzr-szUAbzWO-tHjnFp(J+93bRz!|)}i!zj~aaMK%Y8_ckPPB+c8 zO%Gb@7rsU`R|TTQ3(3vUfW}Jz9hWZJoN2CTIEG-c3#~Oyu71c8hH)VZ$zlhZLAcJI z39}~Q7u&95JTxgtq|N+1(wxX?U&3sco`}K0oXa0JV}-@&TqIbrQS*%W5~Bi~!fP!V z#E!|DL0gUS+uhI+$IF)NmrY4%v%|R;%%I|jZK?ba8uqDRpUDHs<}hd8l5B2G24vhH zBE`e9I9ENJY@R+d=~oPw5O*Hjiz6t{ejFqrWdhQ;7=%564!EZ~5tLqcx>n zYEH|Hp^un8$h}L8#`nx6U0CTnF^)Z_X(|TW$Hi~o%wi?~5T_D;WavMhdQZ`!QS>-h zul<+QW^hC@7%aT>lJl;b|3b*c=#Gw~PPNfWMll|O17F)#ofiAQ*hI&m&>s^VV{@rg z3bwe{;4rpJ++ZY{#>Eq|{|eK|7fl0nu4g7^A9!GK+e#Cy6ry8~ai2z~VZ1uFgq1iv z+}3ct@X(2+!}R0H_~Z<}&`6!Q(Qmkr$&kTIAb;BRK@U=HhCIUDN-?CLFV>lWMcMgOymDQ7+teN~J~F z+7>wGUaNT!5!33%vbD`E$yVrvh)T9LxtXmkZ#o4y%2vjyQ!_^W4jD@A8_7+YyV0y{ z1;M0s)RH$tDMrxZX;!;VnMl2mks-v_b$L)T`Q5p?sBE-{w<%UvJLSZpqSH!@_Hsl0 z8!0qWTU*ZDaFG!oHX#;ECh1bHYZSv&y;D1*u6IfV7RS!rFf(R}8GfAWtws>v9-i@m$BcugTE-Hs)h5`vR7jmm+A`H0ifOL}AmW-;eur*cS)56_`N@ z&56Peyv2c~87d#FCB0$O^N!v~SJT{QfXv}|uX!J&^|qBcZE4}7rH)uT3~GDN>s|5V z11xCs6NaN1RW~qnW~p9s=&bHw#Ends%L{Q$)+2`>8_6wN>yyFp-`{GSby#5--vNG zL&V|WoDt*L2F0DX#hGb38_&)L_2V@UUB2TMzSapuDP+Dq_AzIUZfGqkGh_)=`z9WK z$9wD}&pht$$7}9v;aNzT&ho3-!~S!hG1i(>^W-MKDf^R#9nDfA?lHhL+&YTb+4+`S zEQY1V)^%%skDy!-jv`&IO(ZJ4q*cx3k%ha2PHnwcH@EZJFLP6g3SXsUp_O-uj9Dt3OVRcz2Woh z{$$Aif8D@p(Q17+2?s7_<|@qo`CAPe^o z@oF=C%{uLGoc8YXIv(uc#NiI^{O4ug{&Nn$u<)`e-nZ~Pc3Q|qf(m-a2X1FM0gL_= z8gBtPgKIjl!tSsCpgo?9_nYOsMSo77H=FaEIj?-+D>?`GGB$%}ZtM&%U4jE=P9OEp znNyRvj{yhT@ev8vhD^$JPq<+mqlzi)52h_{M#5Q)Q;5S1uIll68bDmc7Y?TLCMtjP zieAm2(|lfa~r~ z`^-ci-O}KNQsH_r3?F=|;#|h~^DP6TlP8e+xbO#;GM>i$1JC0^fY}LLQ8_t_OE1Pp z5s&AQ;XGI9Z?!o-?e+PtSC3&wH9xBGtfAk&=wCEQZNN(#+BI-#qgs=^M>t23?9I3_ zktT3iqc2vdAH4Jji~BfRLwL~_1zy5P^7lJQ4w1tZjku2;yW4R?*eTwIpigEi-XVJ7 z=(N0rq6@>g(FVs~dBIaCwUSG|1k++H^Pvrwt+3?tI3)`?&H|vg}=Ntg+;yKaAO?qf(Qj(3l&B5$Lo3z{RK;+tJml#V!wK1GE^3S z3tDiafa|^Fenprg`HC5CTx9Ljw;axmO-|wTXj@t1K0juHByP*JX!z3E)J1*Z{&HT) zkF{{0&TJYG6}jmONrr0{ef`7N09AC?05Vn*CSUUMij1D8^Hi z;7BCiWJ(g6cCX{F-SVMn;*M~cmuHE5upJeiP`11g6mI5zq{Bwhi}2zbkJuaOCwt^0jtPhOIb2rYAdIkoL1JI#gzqWDe#Uv(`iEHEtIe%v&TkG8YX zwzzk{k#DIZz^!xP8ip|O$FQt<>YK@P16xl!TSViG<-6u#amJ|qpg8s`g|U>r;I_5a z2_JtsJo*ssjY0z$wn%ZtA8oeceOwN<`QuW0p0C3yVS&W6WMZ4)=8;8q=?TIb`YZg<07Au_Yu*k3I z+(1}bWuo0hfB#byHGC}P_(omyMMHKGW4Uk0pSLArl~PjIm*7dr( zK4h&g!(z8%;)Nt&;z{)0`Fg63?zA#NBK6MhnCR~QZ-}m3j}^yuQ6=%)>Y5kuAw}c; zb+X%yH-2f3#DzGtDt znHF1N8gq=-R$sl9=wc-_N#`_H_$Dw*&0VMQ(0)ddG13) z5eQzW`PQ8~-fUDfY0d@>{nR_8ocD=~xZ)GPk~FVB(lXX5NH7?}q89_mFC^+!Gq_#W zAF0KVLSM0mR$tkt`|$!+gH(_GXSDg&_xoXT>~D6&9dcm=O)HpYVyKKsWVlrWr}e`N zf8>;_jmm?5QpABZ*48lGkS+0IYNtGh>~D2)N(IwP9`eEOQ2HOM3_oAVtpKVMKBdPx z(S8B{JD2=THvXz(I5|YCA;`Pr$)9g!6!yq-uOuugw*MWDZ8@mpr?8eM8hTX{4-d$F zK}^b~xz68Qn_^_pJGtOTHgZqlISjHZXU_@#w>#axaa`*jw-tm2n- zj-6owgg{ec2d+SG>Bw>p!{O6M*7g`4UE1TjmBs6yZDE&$a_)cqZkG!`0gd0U$ImMB zZ{uG4cJ_n#oqGK8buaiNeuP_oBHKL;o)~QLRtVnQ(c)q7Jwq*i6#UP@{ztT7_W*bS zKk)rDp7{;VeH_p8`<(kBJm>d2_bi?%c=F45UOVjE7xBD(xaHmx;O8SA0smse9|nIV z;)CEfM_Tr?;CnvQ;%C6)5g!Kgx6v8r{|!&7vv~h`{t)?*E}tIcZ{TmmSV+5=`5Wcr z{|A3Pvt2F)_sDnlIXCzSW3#vCA>Mq{1*wz0MhMp#H+v!;qO7- zgM6(bFB!fK8=Si}qS?8{T3wT%65dJ*$K3rbru0^~b?3M@fls^XU z6}}4Y`NA?cAgu1CBlA5lpBLV9H_W5pT^25oUtQ*g@!RYS&#!~~Us>ixB3=v53V$2& z6_sUf0?hcTDu_IFk8@Afm$|u!KL9R9d;nY$o`m@oZ!U8caO~ds$ovYJzbyRoG=F88 zyDEH{jKsWVG zn18B!xhqBd2>4vYPkW_otRaGlb`- z!98=%4S;F>i)3W}h_L2QR`^?xe|TuQI}-7Ja6aP4zy)J}E|T{^UJ@?dkNPx>_zM3P zdEatZG4{_3UxK_Rv)o-Y<}V93AYVPQ++8*DhLO9QLVVT#l<-Xhuzz5=ON@NL$cKeD z!u`kg*AQW#Jr|wQTQy(Kee#j%}4wQxD@dd;Ii-)nD2RIxjQec z?o}i6JuqJv_ThUS=^?y*6VkVW{C{tVzfZ68%U!SVM=5_5@fTM22ZYZ+{^7!MmyUQp zcsSz6z$3!nfcc(Dlow%jFCUrjf%&4a5ARu|hj7nkgl7)%7xv*TqCN=UNBJzu3plP{ zy~vBWeM|{|2PlD6JzXtO?-$(umYxqYZ^F1&>A?(AyvfCAeuRehC z(cSIlBEAJ&6n@_p$a}lpIbn6bEW8`?kD#$#i1;yZCE^iqRrnmt_gq@;YQpN?<;Z*w z%wH4s;k|gx|jv^7?Yu502A&K$zu!)z{F^3h#mW6*bfc z;SYk>UUe=ntl^sg)BjP(H*V~9^TNLgUNM050nds1%ax6M!N@DZ&%ykLq|2Q*@~W`9 zpL$>LPu=eqR{1$$4R1yGC*l5khq~Nl;je>N40pLk#Qor2491v#|B3Q_h!2?QJ-pqy zEnB+Xu<(oE2hvD?;a>%B8tQhn$oyuQpIa5|tNDw=-24(8V!=ynz1E8xD7Zg(-__2A3G zH)BlRcd*-C1IOjttqFXHWA@iEUc~bsNM`(Fy#E6J+)Ef&;i23EP6^i`UsGJ>`i(p> z@~rS}J1`FY{4#gMnCBb;Ll3X$&-cI%<2Ut}%<-K1E6#mzVbEa&IvDodtSl# zTiB1sUj=jp^WJ?3*(A`ucmH*?A;K`}6{@(GUxIrp(Ebe@&ItSRb0Ex5=w50!+^VB} z6nPrF`ih?-dG{z^1$jZ_BjDW)=ZeC?cg*=B`6|fI3I7wAUxD_aEbQAqWJX2!*C4-l z3+5NX-vX~c80ISsA9)41F7h9M`wqiblmW_>dxf{|!Te+n@e=+Bcuf&`7|Cyed{}rI z^3^Xnml6IM@SVuaBN4wBJRz*{EeL-Z=2x5zbKy4-ulHll?OwxN7;>^NkI1jf5w8W; zBVGr_6rgE-2N*;8CR6UvMXGygaBsvbz$xM9;Qy^v#8=qoH|9UX!Y@G1{xL7?%iksB zr||a@l*c;Cqwqh4dG?nVBVGeW7w*G@ka<-X_T~F6#7Eed*S8TLVPBrF!Af_#eGEao zd%pale@qOc3-)qfUQzhbMR@*mgl{$aLo~@v`6|e>!oIwsKIVlbxPJb}^rU#NB9 zcmegfB>XnqUxCWAV7LOl1#2r0;P2)H{cFUl!Iy=7{1KjdB)=2#YZ2cMW}~X^?F1(g ze*ioX@d!90d80k62>&$Ak0HLo3*b8^P=6!t2N#4jyd_}`Pg(dcVZX0{_zK_haH#JUbiVZe z6XbccXT5>dz1|h=oD%*8m|r!A_!%A+u2cRJ)_;UIVJ&T_hWrv91FuGXT@e0d@cZiU zU-&EFzN@IOV3u!HHR1n{?xA13D(uInM^OH53g$oc0gM-)MfwV#1>c4I;CznmshsmK zvdS~UzYg8YxtK&`QiM7;URAZ7cibC z(?0WWPIyBG=4Vk~g^z*nLV9z&P4oW@ys?P#8u6Xrl<;puzV$51YsB}1IX7O;5#I?e2#-*HzRS%UE(!k#<*y?>g?|-%*G1&N@ZV7WwJ^SC_|`rKu7&YD z`8c?*f%v6D_!qz%zK`$;|1tRXZmho*7y~Le+K67PnNlPBR^;C zbH2gws+thK0Q+n9VSQ2fZ%GU?@jcl!e#I}%p(f@ z!TsL_-&MlAH zUx)g1S@>_kt4b&z!fQt`UPpauM0`K^TEusQdp89C)O-8sWFWD_+8SOvDMe6!AUag@|dsD!lQ-NWXc^M zvK;<;%2;m^=K6PEg7p(TDfjE);I!~RrF$4WF2sBo<~IiO2)$PWV8%yPTIAn^{e7=u zJ|Vp8NyHE1jU&c>UifjyZ>eIu9C07`qVS85-*XxBgNSbd*CXBycK3$xeFf(G-o$uF z_-*j|2KxVqw}A^0?*W%09s!?=_@m(S5$C{F;g20ed-gWM6UlFd{Hkyf@}1W(UcWDd zkGv0@iuiGGI^s`(hlT4ff7kbsev$k($R~u?Jq7>K-poaOE4V29ION+mqCJZEHgHL} z2>D%GmbtRbIDExoXeg^Z)h_`_kBHjbO81V?W7V$^HT-RlNpnEy+ zHQ|wuAbk&@KOShwZ-qP&o`HPl26P z!Ig-g1lJ<|IQX*g+K(dr*q%o6TOoIwL-_VXo<{qbM7$R~5b+`KT*S|Ui^5-k{ky(^ z@I<@|d{OvsC@&*@BHjh=eIWQh@(k7kUq|_i_(5?7fcZ0LS>i?05>3?2W-7iE; z|K}rS_)3P$!kV88!aF{O{_HZo=SJKIcH2UHjzK=~Ce}+MUIk7G|19M1zk>0Ya0Q(D z8v5smSAd6wwLd=+@e1_kbHZPNeSY7(7W1=^r$cx&{PTuO!rGrN2y1^{G3L(;Yk!^^ z4DOTrs;J|_+Mf?c%>H~NV)o~GVeQW^3u}LVEn@cPZb$Hsa`xv*#O%)pB4&R+5;6Pp ztg!ayg-Fi+d`?*V^K%iiKQBkj{=6D7`}2#!+Mi#IFzjto?a^ z#O%)#VeQY;!rGq?N6h{_Bdq=TNW|>Vv%=b+PY7#&UWl0ec`0J{=Vf8-&l?f5KkprC zFMlav?a$K@vp>gyxMu&%{`^S9?9cNNvp+9J%>H~{So`yZNY4JeBCP#+En@cPmm_9> zel245=WbUBpZ4d8Fx_K+J}j*Lc_w1^=MxdLKc5rU{`_LZ?9c1MT7MhD+MjcslOc22QckA- ziLm;gj+p+BL`?s)hVx*~XK(&E=JUNhZccazoUH6|CE+6FsUF95b(;6vtmB8dh&g`XIy}wm_+dU`jvq?GI(|3@rhgkh z!SzP0A4I$dd|CL%A@6&o+ckuL0etJL-L4nw-n9R_bMT6W{NJefre# zeO=h6&lT*S5ccWw7QTlI`|ndDJ$~;f{d4|{z`jF&T!x(d%ivYBm@fSRe9`dbh`GLhO*n%zTDO4tawgdC{vqtgc@6!!;UmI+ zzb)R&M!q0?KkT#oUNwBpn0Jo^{|>|ax`W6c!~McP4f&%ep98`gzH}t-fxIZ}_xB>t z>%xA2FUo7?(cquo-_p`w^DAJ!DD3wSmr$RD{r=#W zk)KB1F!IcS5WeR&qCG|VEeZb)`3p#IFzeeJ2;b^LmvfH?^FDm9A^gJ2kQa}iNBE6A z6UkRVJ}>O|1HTS_BDm-G16RQx4E}uz?%iBO{fT%bxCUl=s=6%v>oA{OMSEem7iBg! z&o;A}A8#T2hKGgye&g?>JPE%I_aE)XcvD#YpNQl=kQashe&LnapD*n9hps_gF8{zYT|vaw$`_OA(R_}qs=eEt69evJ2pKk^*b_b}he2&?%K;rdCm z7Y)p(4CjTP9N^R%>q*A`TqIuwc}dvspXkQ-Dr0^@*za%b$NI66SB3rl$t@VK8~IgX z|Ghem@w4!IC$YXYjP*N=jhNp%!FTLKe1uh=7v_G$%|n=fVx55IRh|f|{rQOd;NCgm zqj3Me4AS>tu>aHGH6vK>754l44x;=-ydGQ-{tnFdWiek8_WLV`@I4gs{J8vb9#8iB zE017(UfA!id=}{!aX+{&eD^8T=RDS*gf+i=pAPQ%{;PoTG??}MJj}ZRtRD!!_cX?P zTkt(D;uYYm@IJ`%rMJo;ZF@5elu;Zym9 z@bA(5>!`oNw-mrtl%J0V^V(ka3y(woKFkjj;d$`ZuVFn|*oXfu%rAv~_*wsiFVOvN zq=&GN&-XF^Hu5>lQyJcspN4sUFFzDm+tX6SY){V#ABOq&twebe#z8%|72nergtfd} z6#h-fSKxba1AR38^X2FBC||-pd<(Gku^{*RxnITiE@6NEpn@VMoSMdZ$m{4oBJKlc zgr9(XeHHy-#OuKMh$)|lnDRN{&(Qt_^luTb1D}hS@^ZwKbDfp(`1f?;hGEbt`Gk~A)6eUNtdkl==TUyen7Yg`DeioB|%;j z^IsQkAON)glN*8@-h2Kl;gRJiPq4G|?jSE2t_o{-db@*p_2Gdy71cjf_oFa zL9Wa*akQ`LnOzawe?!b45mx_7;M-wd)2l43=~29?-9FEB#o^@{to5+3=6l~8Slt@{ z)4cl6J$|u$o{73Mvd=S6WVO#dda?a}VGVx)d{<(EMM0h>daG=g%O@-y<>j??vB-37gio#l+ ztHRp8Wgls8&nASM<E7e*?NdRx*&Z4BwI|x!s|p%_`mgQ7RbfpJ&WV}c8eh(V$y%P04>|W7_**9# z8;k)_$>R2Z!SH!ub+2JKjljg|#W8!F9vrjB>A^93oE{vbleK(hz-{gMGwqxg*7Uk; zm}Ba=J?EG3>Ju=YI8Ajf?pb`j9py{wBs94BZf0%&XCcQL(M2`eSiFkug!w zb#4l6DZ|r`HZxXr^yxmF#@~ zzbNTeHtr0HJKX;rtKXOKXN*3_#ZEtvpXjytr>ZRzD%;Yoe(`PtHYr zI&6qdhxg??EZjjYcj{Za5WBnEH9*}G{)%ybr}$fuc2X_gE&ALy9;+`&`W$&M*2VgR zq$l^4$7-{-+~L05 zSp6@e?~MK~NeAw$jn(TCPVTRb)t4o{+@~9>Tg5#0!^Y~jM4$U;V^tP??)Qupwtzt4 zT#?cLJ<;cW*H}HQ<-zFRCh_9_(pddK%0Kt5#_E?OUfk~*tL2hz{iYthN6d47NUVNN z^tn$oR&R^H+%Fodm68t0V65xhf01}`zh|t@OFD30XRN*}?w5`Jh@=DeMaJqy@t6BS zV>K%N_U?^!ojW1vFksy274zKJ8LR(6{N;YfSWQU!G>rM2mRqbD#Oi{$&wY)tdQsEC z=zmJw=RU+(eM8*WH3*fBNjeN@haiGO;y(8W#_BIM{6_zWCBEGM7prH)eeUavRlm5y z{dTeXBMraN|A&$e+%Ffazm{-v-(0MCu9o$P`|o0PznD*({JkXliP8UK(dWLZSlub{ z;(oJO{h{cWjsAW~AMSsP)h8uh+{YEGM8ePgXt8=(^ixLvZgIbE^#6r~hx^E4^*MFN z_`6c{7fiprR`i|Ge?rTr(Z55|tz`6hhamHXYdo=fO7tgWJg>5UC+-hmO(s@cgQ7cm zqyK+sIWg|w>J)FD-!Eeo#*z3Abs*Mt?sHlWKiIC{BmOQJ{ofRS`8_sPrzQPUMxV*f z@!My`@OpV-OB+=9?kZaT@tgRc=Ep=+hewyB-Q#z)SaFS-?(lnItbS75uO5nZF_)L{ z^ZQ+_8Zs~7cRH$hnV0;e=HthLzW)EF=<~ZwtV$9de!q*=LlPc-XNlEciGIQ8|ETJJ zJT~pzzmV`dWB#h9zcK$0rM_G=`p-!?`5nlrS#Wji1{7Rr~9hZ zpAB$_;n8MN&Ht@;XQdi#$DE z{|V|N&1<<*{netMG49O!_}=bDjQ$TK{8^)qqf6fXBSv52$@G04e{;wWD)R5*?|j6R zr{E41DApjNf^~+%F!> z^M)6s-=}*_5AFw}A~QXbgQOO7zm+o6hx@CP8DH+7Ql@?GpHilI?w3-gc`fJkkL5|% z!PuTrZ@3TtK`HaTKq?I%@BeG#DF5j{@5fU){g2B7{hyEWi{^P>92MQC`O6X0`~d!_ zG`w2RG<@$Q*Z8XW|K6OE@l7}4!1P`paSc2WG28y3h`#~mw;C$i|0ezpMtlj({Zv$x z{~7+qBK~u5A>zLPzZCIh@O;F734S@^e+zy!V%ER&5i|cTM9lW97V!n}n-R0VT#fka zU|bUF6`MJ>cPk@idFYRr^<`tkzP?7x_HSRrtlvB*MMd}79v+VPPr&@vMMXKw+oWOc zKciClS;Hm6UogC2xGK#2`cwREq2o|wHvQ6Ec(L6^9lLRO->&Pqt zO8KRLO+IV*^M+qG`~}0WM!XU4uSa|@-oF;{eRzK};`{ObR>W)yzaKF^OS_ef1Qf%c z!Cw+F(V>oL#Z@6H%Xt-p!Y`9{$YPe>&Zn$CCXFB4=_%ePe{s*O;7)~3`7|t5b z8!i|w8ZH?w8?G3x8m<|x8*Ui(6)TFbj(3{zXT^);`aMVG86(dc&KoWmE*dTwE*q{G zt{Scxt{ZL`_8A?;FMMZ-@}Cin>H7_J(w8Lk^{7}g1w#y9mI6Mw^L!x_WjyHBh9 zJ^h z)o{&l-EhOO4#+jWDH)(DCx*lKsaE=Aj67>NZ@6H%Xt-p!Y`9{$YPe>&Zn$CC$^28} ztMgT5-M^xoHs&*ib$+Yn^G04UTr^xVTsB-WTs2%XTsPb>tP4pR-V`S)P|AtnwBd|l z-5;d(^G2@wa#UV4@{-}Q;fmp^;hN#P;f7)N-uC#W3@3)USC>k|pD~;@oHtxBTr^xV zTsB-WTs2%XTsO@7KQ;Uu-}kO;XYM7YV*Dr{Fg$E{#PAWr+>1;__c(vxUSuj|-8ZJp zy~(k>V)$ajoPS(4+=$o@b!EMP?vuHfnMzsrn<=M_JYzU(IB&RMxM;X!xNNv$xN5j& zxNf*%ShqfD{CK}=Tl@|4e$`l>G0gi_V|m_i!En)V$#B_l#c!m8s7J^h)o{&l-EhM&7Qx%fYszq9IBhs%Sofc4`s9tgV7O?wWVmd&Vz_F! zX1H#+VOT#cX?Rns+tVX4oHm>>oHd*`TrgZTTrylXTrpfVTr*rZ+%W89J+~Qu!@8cY z^0blb`o7AuMxHm!`+2F^paM^IhaMf_laNTgjux?S*@aT6|tA zh6{#^hD(OahAW1vhHHlFh8u?6t?ltk8BPqR4QC8z4d)FP3>OWT43`a83|9@;4A%`e z4C_`@&F_?as#Q)5rwwNeXAS2K7Yr8-mkgH;R}5DT*9_MUHw?S?oBTJN7)~3`7|t5b z8!i|w8ZH?w8?G3x8m<|x8*UiZEytSQDGutPloP{g!x_U_!+FC6!$rd-!)3!2!&SpI z!*#KhO>tAh6{#^hD(OahAW1vhHHlFh8u>lMZzo1@08)haN2Oj zaMp0%aKUiVaLI7laK&)daLsVtaKo^lbVlVp#Yt$yiQ%;2jNz=|yy1f3qT!O^vf+y1 zs^Oa9y5WXlx4u2TDZ`23wBd~5tl_-jg5jd!lHs!9is7o^n&G8==Yv$P=Yo{8#(dsz z!En)V$#B_l#c9>Lgt(QwId*>J^h z)o{&l-EhOOyQe)o{N3?5e2L+-;f&#|;k@C3;iBP^;j-b1;i}=9;kx05VYjh8zA3|r zVcmb&%zq=#8qOOo7%mzv87>>H7_J(w8Lk^{7v%Z4k4tA=Za>xLVKbpxyBPimk&Jrcud!x_U_ z!+FC6!$rd-!)3!2!&SpI!*#KhV}fn#!t_QD;JFUqT!O^vf+y1s^Oa9 zy5WXlf8k(Mp85MnR2tsIFn|9jmS+rS4d)FP3>OWT43`a83|9@;4A%`e47=^^@ktp@ z45tle3}+4J4Hpa-4VMg;4Oa|T4c83U4L1zCw28mr#Bkbh#&Fhf-f+Qi(XgIp((+d} z@`~ZA;hN#P;f7&1XyR{J?^V$7=zR*x8Dlh>HJmqGFkCcTGF&!XF^FzoiW$2Vn|zwbuH^x25N zVZ+&oxwMuyJQp#Sp5_hfJvVfpOEZ;7PUi2mQBl4hf0qq6BK`mkKFI%qd2)Zm8SsGN z0}(HS{IKCM!?T9Z8h+XED~4Y)e8KP~!&eNyZMgd(1R9Fz!|<;$ywUKG;RA*b8y+(} zYxu0;mkqyS_%*{93|}&Q#qisPyZ15nQTz>WG(2SZfZ@Z2#|+OJK5O`8!><^A&F}@o zmkeJq{I=olhfVwqZ;AMJ7~N<1aKv2C9y2^^_^jcV4ZmXeHNzJSUow2f@Y{yF*$6s^1@ z^+wn4b^TG-pLG3c*I#t4?f+ueFL&*J=wUZITF6b0zK}aUetLHLGj7}T_~f?P@wwUX z_UPd!wh_ipA3ZiXo|~SXbz`Sza)qhsSqI2XPEDOWQ^*}XJ(fFD7&|&Uo;yA{_2RZ1 z)bUfZu5fZTH#R+0cqenTlsh$b`o!$ibkp+e^wH7roI26e9z8ladS+6+Vvur(LsL9< zVtRaZ)(2z8hm~&6xTDWMKRy0DY>kPC5uF{K%bhq4!OV=0?(C?+xm;oNsL|`7HJ2N2 zbEJ)e8*N(N3Dr3U&!3;1I(Bq2H#&9Z^z8UF0(0tUVaCmjGag5upK&26P9dB}pC6xb zadNriCnhIzElywbYgt;q|dvtDW592U=T^78SWD= z(uTpAqo)d!kr9>$hVLZxwoOjHa4L6nY;1bw#PcXy&GZfK7miNPj6++Kv|U$|v0Ycv zwq3L3E;DG6v$1B=)mT#e#p|AZEx16soD|F<(kTP1&v$ zXMAQBb>qa$#CQ+~dmY8&bF4TBu6MF<^fQxFM}sAw!G)<4r)Rg%oIJMuiX zkl@Z7ZYGsL>dW9BecPokJN0ESyrpwHnmY1^a0V@e)4w2i!xEG|-iuv1{~QVrCQHyq zpj&Udyx!j2kjlafQcN|PFM~UR7JOD&c!AGN4ZNKwpfc{3pzM}Z3D4cR@Z2TOA$jg> zKKBL#2&}#g?ry&A@^1)iQzyJ3H$^h&p9oOXL|Q|#%ZFq)e%&UdzdnccWw80Sr>WI^ z+uhV^z3uXP2#(5{FN5I?sVGKvsFCo3(iSw3mf;1t7+!|-1=$3xPy>b-=AoQ_49atd zV0fg3P&h0FIUH%0@RgLBge%Opjg%GKz_0oDv4@n?*(lp%hmiEXK zu^0|Ic-yUSyYyv9Uk1ZlI=9p7AV>WZ1;1%R-jHM>!ZRe#j*vY5f#8Pc&gOHkYV6UM z!QK81S*JST1)*1s@PhED#-Qr#XzIut@~)|s-qF^VX(zsIhXi%drw%elUk00RdsJ(; zz6^#p1VVLo_!p!~&_G(K1TS$X+Vpr(JWi{}`Z74IZ+rA*x4sOv-gc?pPJI~+ZwN$M zBjrUX)WHZs(QwV$~|EuC}iUEw2+E=ZA4Evq@Fa2!rzaj?d zKj#{1U;W>?g#WdZ!QzD2*N56y-oAwYnSUPqPsAYoXBw-0_5YbA{2!hS7T?MLrseQ~RnvvxNWE8}xr_3I9tS z{9oC@fBrrc{w*s1b@6`yvRG;ORS!Q6-7)^<4*uT^OKM-^pI^fN-c!Mcck=()CH${+ z@SlEK|NEBkKP~YlG@k!KeL4Y*&Fo#=_UNHb?~1}iS_>n z(1R0;^1pC{>HnW$w^RAAckq902mc>iBL3%Y(EkrC5&uR9|NA@m&wJ(=e`%)i7F1RI z@1ZbKY(r!96Oc1a;Y^#lcsgLdO}CZ)s(q9Azm$FJp9zj*GfzEhzb5`)RFCAT`LEi4 zB>pSw^W0Q;`|Yo*PI%hz`~mED%71q*gfxlzzdPZC`mg1mdNxkWzTYrYS9yrnCtyNS#b4(=?AZIXP+5 z5FgVAQ0l9sfQYDRK|n=Cu2SELf*@X0R8&;ds;FE=Me%{kh5z@v*4i^CGnoS3dw>7? z`~UC$X{@BgbD_lhtCx#8uIJr!ek>b_3yOQQAUQNg z*+a)Goia|T`%DIk4&iocm^*o_Fs{NUMO}Ieu#cfF3Pz> zQO-|aAq?t)iP>~khmWh*sPoebsb$Y&Iii937{X~vU3-rv$6K9T&W}g>tMl`I^V{~y zm*%Jam3yQf`YSU=w+m(YP-R~K_+hQ518OJx)ppc(f2H<^WcMmbNCxWfLlHm!6y)fC zw;fgJ=Z)S^MQ<`N|8Bcd;_a0m6`J#JEl2C^+8?9-O*@7*s9ntGS@f;7h_b#Kz3+&g zL)M=8^%@+j_f?b}6wvoUEl)78K;kWEAKn^VH(%oQlZ9Yh((+D$-$g@ft;VJ<&n}_m$wxoqUb(TNfNHl>H5V-dcQ|3wnXYC)$0k-A=ahMceOBtX~@s z4xzl%NWZAW<;IPn9f%;f@!~;LzK^)`@6D4FD7ojg9{7vcjq&$+jq5nnU+D?o`CH#@ zn%;M-^83d>Dd~?x3!2z#ubC(D;5x0JuyTRKqkiGj?t{z)eH7)Lg!tpIKaTi=pllJ2 z2Mn9*E*ATO{jZjIxL~5tVEh4K|b!v*?W4ED2isZR|L_8(_)6bK>MuMHS3km4Y& z4jwK*1xPw;kN2~8P+rR!njZuMrwKotX5}B4YjG?8z^N9u@(;Ym;_7F?K()oK`~#<0 z+{!;N$KqE0f!P+f{xYBgaX8J&KQPnMt^5NsENYTGTd!*HoITq7PRE1%al(xc?bQ?19}kK;@xP+8fnL$kL;<(G6 zQg43g>NP0U^0mNR(O1Gx$Z5Cmf-&l6S$!qyhs^CZK1v+ zdgcgU{UXX|eT4JO|0D9!4)e4&!_!d2==VHdR_eV>i{8EyA)lKMl~WaD~?KKF9{R-b#W-}W8U z{_W!7Bu&rNquw7JcudB@aFUL*tp|Q!@iWAZocx&*Z$0q1rE5pbrJpV7eFqLpdf!1E zXF~sV!XMINdhAcqPX74?d}$~DyaK+olh0WZ+9RUJM?CajOZEu9w3A;`z?XLN|3&!W zq;rJk?59=Gb5Mvp4>+D4Qui!a7Yykz;pLC-6<)v0n-0NW@#m%Nd6a6?c~t)RoSWaO zUo%}QP|c9}8O!7Hy9$^lMeXGLFJCX^n17?!htER!=yPkd=U#rf%Dg5iSAIT$wMTH< zhsEsta=ZP3ROKY~i&6Tm5BpLzGbBA#bCS|Z-{=&f?QL^}X8pDDH4!PgP0g|bz7Kt2%>@N~={Ge?3;5D+YU+d^R@Vy6J{PN(NIX^9WwgWS zCZo+pn~Yvzbe+)#pw3e=qZ%7CG0y?qj1MlzG1ue(Qm|wH_zwIMxeY;&+pDyz4t~r`B&|{1kukbsUV2 ztKv_-j^|On_>-^Wew6RSj?h=fj`GEye0`2Y`QlIh0-BHKIdjwfE45cACVdA6i`c94 z{ir^*SJy>RzS^tnqbT1;exa}Hq$pqQ)%8-8ulCxyX%e+Y?L9@}eFttTVlM_Igs48X zcUA#k?VVY`_rVGM83lZ`x2k}z_D&amILQ~9^M`2??>n%sh`lFEdZE3qF5s)ZQw#V$ zIH9leji?^A*XCytKU8~dT|S9Y(fZX1Sl@v?MeMctSVW)Jug%9Ie6?5SV^O<&A_N)Fyk{-=h^~Z%DPC8#`PXFs9o;;xY z2+{jg-+7WA<*PoOpN5lk-kH;<^V8&kDMjpCUPRwA;fIr!3eD-eK;p>*uPmaku86)R zrf+eczFO1g7twb~5q+yn-^x6F^`=kfanX9x`n$M@z7?kL!aRK!nZ7wi?7OUpzICSW z(mZ`@O`py;3+-zxqOZa9tB*k|jjg7J2{>D!j4uh;bHyt&Z6t&$$)Yy0+? zzV1AIq3K&y#J(Lx^ksw}PI|M@od3N=;>iQL4vE^K_4g)8kMh;NH=4dH^YrzbzI8?H zyS9kFJ*MyKJbl-gzJ?3gT?({)a?p0xhn zUPRy9OyAx-eQz~=n~K=??jrhbGJQAZ>3f&y({)*)eK!=*cfIKw$kVsq^mP@n??4fK zgTfCdy-#S)|K2b0{?kZsKBM!vVEKicBe7Jr4s zPqFxTi_fw6Q5K(VaWyd5Kg;6dEPbZM$69=b#mg*SW${vrPq+9Oi%+w7%;LVqwZek^ zCtF+y&Rbrs@$C82REt~r2TroMm4Dz>7Ps;bR9f81KcMsbaGI5WfV~9uJI%^Jp!551 znw5V*=k>YsC!NoS)2#dhI*$+S{fA&c=kMV(EB}BF)ZsKM|A5Zd!)aFj0eTbjxAG5Y zJG*oJ;JO8pp51pHm@jd+&zd`bI$ij|z&w?QdX~VQlf2yXr$aD@7FKv#Pkm}0_K(yr zPu602{ndxHA0Cu!k9e-d8gS>rZojs@`f<76?H{*S>;9M9?=JH`F8#&ra~Ip!Xte#6 zLu$qm_CNH%rN8C&|rgR7$miVTsyh; zsVF(t`w<=~JU?T7!p<>lm>{Ws>*RwC>bLH`V1xQO&r9ebK}!2U_Pj~EnOCpwhW$*n zhvo4he+cDZ4>*KwOHVBGbQ}pbsJ~M${Ub=KzdJt(QrbVV=VeLlKeUr~x^teOS^YkH z4wX_v>d%A_LXg&el0An?X+O%^zsBU6)c;!$6?@X^|MjaZy+!*A-}{kzNUP&fuh^s0 z7q4E2AFsYv@5|Mr)w3&av(@ur)8B0Mj7k#yO;*oH7X5M4=PON(H-E7A#Zooq50~fa zKLb(LJN2b1pAtWb?lV4hg#B~pCl%hg;!s1dn<1FylgRrNQcqPUOFY{iRns&bolMFx zXS&8AEag}zQLmm61n##pgNpJJ@k_>;RggS`$)0;}C{VT7zf?oqIr94XbHqLuPvg9l z`Bsd1jp!X(Bx(>Zll}2B@0Y@;7&AtmgB4?Dc{SiTxyCU+llNJIDp8B_YJRSNxN^FE z)S!x^3GAIo??%0#{aG*Wytbmu#`h|FF3r^OI$y5Q&V7R_G#ElKlUhd8r+yKnb^Hxd zHZH>q$=CdO>U_)BJrDBp)o$TUJO0=H`Gp)X>tiJQg zT`@-cZGX+DbX>H0>c4zg<9YSfU-K2=_kBRq`)<{^dk-p?uKc-|{6OsKJD~dKRy>xq zTgvC2-!rxTv+Zp4SmD}vn#}{Mbbi3`NZL<_FZio#Kbucf={zG}t`UCC5V(0s6}?~I zTNtTN`9aE_&&hcz2ctz!-N~C;;r!L|ooV^z`SsKa%ePAFJD6$t=E<$E_=MzBCZ`Lz zepXSY<8bgI_2B%VVvOp~wrf($(O+rpnpYn8o}oK0&$g@8Z?;_zSi4rdU+nC=)!KFb zdnHbPWV<>$f+~Fu1T$>>N4nIXJ#Q*XUA<^V!A$krtRJYmd%1Bz9V3`&&!xQj%Z&^A zd<(6YF@|Vfmvpf&g z@hPZ!v39U=#rj2mjTEe1; zik`V^E*9#>P4|3j95X?lW5J9WlAfLSR8>jb)oUA@V$x}r^3(}=L8+84joBz($bmS}kn9_AXb{<|O$;Hf^M!(ORR8{dNF8On!p(UVfcvi&Npe$Me% z%Q1St3YvV?gHO?urZ>Z$RYJU=WtxE8dSbf9p;zqLs&d8G6IS1-7t!Br^}X2iH(Pz9 z9Aama)i;twf86x@N?-DN0{JmS+kyID?)sv_I}0_B{$%=sDxXn#-%`pm&FV|L4f=!C zmn+Y-=_(H`Ql3Q;&8`cx>k7Q*$t>KuqQc^CUH{V887GUqeFq*9-%po}m3npYijvbu z%oE&rT`{KI)*ZhU19S3avi``$XOCEixceK&{8HB=GbQ`N^~fb!?(BNR&5viJq7dj` ztk3$j67?#^+5B#%4rIY>TaV<)HIB3NnMgngW>d>(`fNSYq8%tm+j9=LOTOmEQ!jDz z*&_MsuBjTI^~jLen_G|kO5$!F=f=g{dgRk4f0oeo$Nf}j z;dC|Lhlxhu+ae%ycg-QqWa)l>hKZhbPczT7&c?+#7R&J#>uE?wsp zbAyK^|2}&UX6^}JmAIR?1}}F#GMl4pq@At4vwkz}7)cLi+q%H{jmc&G#;r$Yb95L@ zpYnqioyP~sc`64t7rzN4np=;|ERye9lRLrrtJZgrpRdWSKjC({;YHUYMe4!%PsKR3 zzyC^W*X(-4+BL5{Zhr07jeU1$`?>Fn^4Z_Ae*wu^8gJmO!E->p_}=ZO03_l>6Z*m`n?+TVYnwL_Ja1KpH<<>vQ& zw^{#k`f62QE?x7P+Mw(7sXaD+&Cqi9UugE_txtwbPv323r^Vg-k5e0T9ADqlC>a*4 zS7uu~<*ys9zADPJ{oFVZq_zL_UvA}Xv3}(0>jTPnB+12 zqI%Aia?D-(TA^-Sb?y0oZoQ)OAD91&u2=TIPV9Z@>lN+qZr#v#o6bWgTKm4}y5)Lp zmxA?!`SHXH#2hc^HLC96P2K9&Ejq5fl>8}KuztDT7sdZ<{@l+W0}ZK4J&%a)BR_G3 zedI>_-cP=K^gZmC_WdE9D7teJ_Z}#}2f~u^`v|1mM`ma&KSSqzz~q#9>d($U{`=Qo z-**lk{UK zXnm!nvl>vq#!?;kj(8vBM%)KIjiqxW$0Oc{JVxAycw^}_xsUd+JR{OYuddgb&iWsb zF81nv6w^79j7SHgv9uzqpG6#zF8S+op7--PHX8$$n~JnbjcW5|F=#96TzK_v`^N!YtR1bWzx>>yp(>x&|azEv(5h= zd@xlziO}iwZ)$_`b(qQ585k#X7R&De{59GciSuVT(`aQ zG2?&N=%YrzWmMOB?Ui4*_`^oOYE;L6_j?EJmG>L}OGfqi-(L9ziL?CsP{h7lwO%IL zx;FZ~;8hq$`TjfG|0-OZ0w)iwlKb8MO)t{vKke0e-q&9FM@g?J`Kr`gMae^0;bNZC z7SHC}g?Qf`)*c}u2roDP{>p_}eZ0TD@(PKkD%T0^uT(o(&S9hvZKo9S(dnTVE$6dV zPWtiCE)mO?m-pXGex99uw?8ZSjHU-wlD)t6fXYqOX`eUrLDfGF(;U|CkWQ=ReHgA_ z-1C(6&3bo!Iy9j7O*Fp_?7fN5e!sw-3x<^sie14pseUiC-@gbfpRn@Fmag;guu}ae zu=g^;%7-MK`EsZ)p0D~$|m|yLx#Xc8Gsf zelBYlSG*H-+B>2hoxigFU3)pdZN1(6_JGzSpMy@mSiAEHRB=>^clD2Q?ZEy({p8Di z)*gMgs{V@D`SNB+W2{Y1h9{;pKllB$R9lmjqrc7kw9V|g)9iv$@rxaHA2Q<%JL(Tq z5LW6q5mxGRJFL`kC#?L1=nva;+z8wB`4_f5V*GCy)o~F-d4ZHGLc7m_-e|kC zyzX~l+&IAJBg^ULZ4>7sg5b{W!y5I2pvwATjrv1i=l@}i`bA*p*I|wNhkK78tWiG+ zs#t2&ca8dsdmkaJQNIax(K@E9{{(gp8`kLaEwJ>&ZXA8F@p7U)kB9Csy=ZP3H#tc`;5Z!}H(Tuo72G)RIMQ_-_^tR~Yq#Dv zRHgKB^eaiP`HiIWITS|{$BBiYq0IxSNBE$^uP%JA=`VcFn_c|fou210uYw-7P+0kU z(a(4j7^BTk+;fT^nm<2D-l_H5dWX(qqw|r^F<&be^ZonZUwC0HbRa8yAM!?g>N*ug z^|-D5BtY1v^TX)6i+b|kFJyaBkNch{<7K!i#x~#+<4@#0J$j~lug{&|1*t_M=iVO) zl5vT<_x6J3#hQ*nvmJtDt;Ufldz$;*hkDm;+I{8wywVO!_59N9r}QFy9_|a5>ia%X z{v6@!dML{Gh0okj&WL_;!lgO^iSlV@xKt;4;aGYf>K7NwACA@aR5(k|S^2yZ{w!Um zxphmhp^mp@&pU(E5{bKW)gZN0)6rH^FU`79(*paXUe1(c&;9NN`94_gcR$YC&AW2X ztMGWeKRjObhR19E?s-Cg_aPr1uXaTFQV+*#yF~d?565f!g;&RMxB8is_v%Fwe=+}Z z^I*4+MgKyT9pU{|-S?~Tbl;Eux<_)-4~_?{`7-%7FUGe63#H$sDjS4`H+Kqczp^9S zAE5&K7TUj3vKLeUZ3-+z57JY?f<0qUZ2$map>z&J=yPR>pU=3 z`9|Y!H@e$s#^~FO?lF3c(VL9kW^~Z#T}JOT`U#_V3k@r`82yyc{Pu|MhjIML@Bc5o zUGzCM#L7e{+*jhLq3ya+kbbberUT~x-hx}tyZ!4!kfCb#9uVKt;FET4)%WlTKUejh zTlY`ARM=jhx;f6hRUq7|&*y+01|i&P=Q|vrB(BfrfG;9QT%XUuMlD#F*5`Ae-QNq- zT5dka@aMVTHJR8Vw|l`h)05WxgKd^in$j5Wj#nC{l^<+#`KW!tHnTI02Os6PSvk_| zW!Nt{-sabq-{TOIez{eWy`cy{4i-ZJgIc8Wk%lI>m&M;bKlvXppX-0jcCmWq(bhgmsJgYaYey{Q?F-Y|{=r!F(=e@m5R8QbB7|x62lu;Dp}j93jCKC4 z&(&b8^MCb|V65vulSNN3*7Yy-n?Obh`hm7bFwgQgKVmW4Wy)c2%Kj!r$Z9s z^H%H0#lx}%k`k89H>wU9mg)X^Sf=F)%UnOua)f1CzWi~;*%_9p{r~y#K>K-E=K8hz zyX)VDpeTNon0mh*lvd6jS?~s}!e)8?-us%tu z+)$5F_x>E;n`h6h7~{)(NG#`W#LGPOi)cAHV$ZEofBw&wv&_?REUKR)#oQ|Oi~p?t zms^izGMhpD=`bhnp{hMBHJ~GWk1Gn$r-?os`1e^oRGdjnpZ*+>$Nk+?!%Jc&Fx!+6F@7eXY zJ^8%qJ8bkZqmLNUhHSKA%`~uj_#%#?iK3(N8`@h5e2q<80#5_FsCrkAS;* zi_0fCpn9^`a=u}{kW*KqU!mQ8H?6%*&v)9} zp0@Pg8P$D{_BP$`YHxee__}}9Zs)@7wvXIy`^W8Vy1&-mru)V1ZFU~k_L$wT`+x0i zk6K*!Guv%HyS+{KC);iRy1h-$Guqp}V)FMZt$a>sMafdB?~0N-@z?zNX`Otp)KA}W z!$K$OW8G_Sv-Mxz{(aOB)v>h@#Y{%!6Y$M$KbiFw!;vVGd@InE@0J!s^Ae*o44`!@as+9qv^>4foo4%wF~D zaIc+*?A87e?$!OoaIf~0aIc+1?A87f?$!O@aIf~8aIc-`?A3l2?$!Ouq}8w6&+LWU zaD)kMKRC)~{|>8lUpTDP{qJ7*1*id({uRUj&kLr8I>hBLibNY0A z2m-erSu1*-|MypJ7x@=ECwZxUain&igPgPN4!c=iH$G3CCT8LJIZi-!A3u9e;P&y$ zEZxpAvgZVLj*&elP=Cms6WBRM_MAZdBj{E~4y)}PBYRHpgw#v!oWS<+v*!fv97Fq6 zm_5g^byxNr!`5Bda|~N|xpR!{If42m$0@dR@p@6e&7Gg9|K`q5)Q@xLC+>X1)??Z8 z4P=CnJKx|_i*fbypj)4hVfK7O*QKGoSCFe0^}F2piTPXh{6zgRcYdPJsoeRA?ziX8 zPqbg<&QG*|x$!Pta*EEkOS*KvUE=23Z;}EyzfKi7?<@4kev0dix$Svz-FJ=!>zE0@+s>rT~E=IKQI<=WZ*671v15Veo% zpO`+oiDmtld%UIP&4RzL5Dg9<*Y## zuAG6jS6Hd@&|n7|3ZYEA=KqX6%<;wS2`m4h{uZg92iWhr#avn6UX6)ApMKQWi2nz~ z5Zb&n+FyBJ{{A}sfcLro8_`(4mA7Z#Uv%&NynHYPGJmMqtMwG&%Q#bWZ2_PDAJ*&mgOMUE+PW>bfSZxmx1wSN0g)Ei^g3i`nD<<-AeiDSQ7SW$%ZDl>@>LE3Y%U&*(di zzQgF-jlRw3TZN|V|GtEkHyi)mMsG6uE~7UZy}{`9M)w;%X!L;5L8I?C`aYxYHF}HD z_Za<<(GMEE)94*WZ#Vh@qqiBo)#yD&KW_AHqaQQ+QKKI*dY91;8~u#YPaFM|(L+Y> zHTp@TpD_BbM!#V6^F}{s^s`2XjSdNIuiPfoz26u0Tj_VVO0&!U2*>^W{*-@S<@Vk4 z<#N9Vyh9W=+J0=A_ZEp$UU#@PKR#FH0J`(aGW(rx*S~@t(?pQxUi9x^r!VpB_fU6D z*L3(f`$e#`O5@PW{^QPlv*+f!W@hC@-wqw8v*+f!XK6ad9?`dBw#Jda=vySw5$D-5 z?k_8HUcO9A8$IV{dBbHz&dZlE7t{yxXXphbCtP-d#G`x}2bSr6e^}004&-sMTw!?y zqw>2+Fx?x{Vv0V!PwOqp*L<}8!Z|)~m*<-3n=?(~*>mQNET#{W8kea;txabF_gu4-xz4sGsNe^Zfqm-iKlT#;r1c zBF&e}^=sY7i2k3k#gKP_{h5~JpF0mVsf61Pb?fUstH%oa|0;d<9$H0A^O*wwiiY3} zjjaVARg8T;e!^~4(rZxgLlVn<520_eFPy$(SzC}V(TVSWm423&f%#+Dd?7!7rqjYB zrAP0lmDEEXgR8H6J;mhuPNX+tzjTVP^f)ay=OwfwW&am|aWpyN3ni~&?OklYx+3;H zX_>XA7vF!8IRC#GL-u<+N9j5=>W2$azWn?+-qXHD@2jFGUq1Ri#kr&0pZ|O+^X?Np zEGKJVwEf>W^sru856quS!{mXu)`zt#SQ6a#8K&CzZ>hHoDX#wV?JZuukBVJop6*Xj z4}<$2MZf)C-2c??J+#|<%hz71^Og2Wou{-{>io$4UqF_Vp6TYdFSR{* zAIHV~`;xb7J4K%pu01*ax%OO(VT27@F;-s?nK^bVzQK?FajV{6F(y=hI{+pE&;9xR zHFwT3v_bj(HFLx-sF(giyGSSMV|>Iq&)LCrXScJ*`B%Gr@2(emFm#~(bsvX%yLjI& zd?38kdUAUE?f;EX4?}z93b8v?xlCw(rIwrN!$=>xmQtga<5}w;tgrlX@P6*}=iB*D zf3M>XT7mk<@XY#i&tvvao&&h~VYL1!@79ZMUXVKvQ2)$*ze4BPx$kx8d^>j@p!4qB zzP!$-bLRm%uV%UVWGbGo`hPB8ov-Ke)qdgj$HGcoKjiY&ew52s=l`v}x^L;~h5Hfe zH@eQCUEKN4Z;#RX2ZuK2kF77#k49^E?vPfXqm25!4)DcaY@U|wXR&kTdmVju%o9JP zy$p@9U&y?}z0dE?NmE76L8IUQ5kIkcApIn|&qw{t$gRWO_j=s-9o+XI!ZzJs3)}7! zyTZ0F8U3QsVWYZ^a_{$sZMN=ed%)7aZ1hp1-!}R!qu(_8h*3R%3hn$Y_dSyDTKac{ zCa<%0xBBcwIr%&bs~?thH;*Ep{t#CGo8E8VL+V96usz)Ol# zFEpp`2NDmiJ4*DGc@08cyB7Q2Mt|No!Z~|EFKL^-p2Fs9)ylMg22ZFY2ecdQpGP)rP7pH z`~I98chV&bq@PrjykFN3+4G2;|K7uTd z?@br&FGqM@M#sV8-`n_!)L-%MZTv{m`J9QPT>ffEG zu$+93mU%Pr$#vN*JJ(p5cdju_(kcHxqg_VMqq5`I?P8`k4uz8EQc~mWmmI;f;}^=Y z8*z7z+?u2p<;LUOxrw#U21!OeX+606%Dl8B{7!a|T z=AZM|GWjpnI-mca;9vilden0ueLund{-1k4!TpYSnWyh%{(r%5(3P@&Q>?t9eV>oN zFUSzyto!uQaq6R3ZxmavyYr)p^7qUA!PU!z(tpcvx$kB4!WbDJr`SF)jYA-R9%wkl z_JJe(Il{Mn;0WIrKA+|z^^+4$v3=l(yv+Ni9C;sj=GYtYOAp*1zmRHk-;dg{T;guu zHQ05LrbC!|gB>e0j{KOv+yBk(zwcbB=@`b?uY+9|OWf`22D|Dt-RaxCO5<1@i@q}@ zTJ-x?eFvVD=g`zwZWO^_=W4y*<+tM!iRbKJqvooqd?bmt=c3!UcXYIdC;yL?YKQg}^S7^Mb{cjklZ`XR&m$g4JQs3?-jlXRB z&zAdwo%%rHe!aB!j->E&_NP>Tw*H$nUetbV*Q~x>`drKTPs>PsyEke4W!wL1xzFwA zFh7`>eXqmKm-`N?zq#iN&tVrrE}XA^8|BXvzWR5RFa3JHws(~8i@f%aD4%{A&ewhx z<+J~V^R+)l`Kb6TCpF#Wb{D%Lr8?y=t8 z_Z;0kmUb~@$9Kpva9_~}O;xXxd047?mD2GIM)iE5@1Sq#ON25Xs!UbKCEj}QFGAa^ zuTVML2W+owR(|CMqfJKNV6?+%o6${1HyZ6R+HJJU=oX`IGa*xHYHhR6$0i*kjzQgD(MsGHHlhGTE-e&Zm(Ltl{GkUwwu<}l!(f#~u zc}~)eN=E3rb&jO7Jux-%0{cJ1JkMe~gl2dCd?0$R%6*G`e=0UF$d}9c9VQ?M6-RM( zr2hG+ApM|U@QsM*xdQKK!FF(+Ad%B@8r)d<4Vs~#z_w;slxBKaB z=`B5>pA}1QZw0OED^EqTxtYbya#NjKJDRu5?(w_RVY+);+7D4ZX}@)g z-?J(0cSCl=){Uq6OHf%|?nbNJ3s&j8d$hX#zmfO;-;{_?(e^jO!FauagMtS9R|y>t zNH|XT*{Do!-I`q)KACS1hFzQ==m318QcYfx| z$@(4-zN;5k<|gnhjA$R7y-AAjrvb7(d0B|fa44@oz!V8A*Bl`BGdwmW@ayJ0NsQw< z9++tvT(18O1D^zb6)5X{7BIqunHrS{K4R2sLF}VgzJ1OgkQC!H-NFcY=D~n_OMOt< zGaDBJZ9N4vj*GSuXK>M0juFGS&SA!|?^Jxaas$RrCs)JB{G>-oLTo-ro!(#0?XXLL z(O*iLzw6_abbXv{$ad$L;Nx;*!XU6~b00<|pocy}-?tDGe?=tPCQ`@Xu{@{YisNFL z=YeK$vCL_DJop5HiEK zy3Hnzvr(JQL)^j$o9KfUVjQD);(q* zv+uDyUogxu?IFYT+iw}B?LRcU4ERaI>wupzyb1U@!`p$&PzvhbgKMJUn}AO+d?)Z! z!=D1K0;Vr~1=p#D9|67;*J50c;W`WaC7{pZIv02;DCf_0z%2VhV9Fzu*WzOt`1q4w zXlVdoxB`^*o&trrUZDK)I^eee<4@$@seJE7P|Cj-l=62gzx)E=&j6!+ME(Jj`-aIq z1_~eXehOL+`fE`3i)TQ6(C0wopk)xsfKH$=GI|v$GMYom3nAJU&Q+H9|M|diT=aj^ z3@-MODqO?3qJ7~k)iq%m_$-fw*hK(wq`~)PT)7;0g(bcLn7gcTTpJB%faymp%P_7^ z@K=H!#>Jn4S_R4s-UQ4)mYri2qb!4$yK17D;xynJmEf2j(<;p@_Edy329GIKHqCWebDuw(?G8Toep{5gV z0G$c?Jm@UYSD~=8L2E$gfL;N53TPiFpAR>Kz6O+Mdwf284|Fc*A3;w8JsOq7=frH# zd7!nRd?u^~<@2B$^bF7eP>!`920aV(LC~{7e+A0%=LOJ(peLfTIli6&D)UXy*CL(* zrGLK}l;h!hKda^b*h&pnaehgWdp2-~J#dSD{}P>bZF&U66sUC%<+@-*W+p z$IyYlihG#HH$cHV`V!!;!Ku*ByAK01G$@hJCVUPH^1KbCxam8P-vk)999w~hq5LuM zFC!lo{1}+Nl>z>l;X&Zv0MmDefuAw_2=G4)9|q>D<(nb%H1LVQ^l9J{-|!!RXBqw@ z{+-@j!+!$*Ov8T$KHuoMgI8FhCwYV|_8e(f4K)&d5-p3)o&Ul{$W*bF9?DuHLFM^MdonLigC5ATwU#QsCOG$Knf@B6%Q?9?w zV*~-rGQHLF+LYxrfZ^J?1vuIcuvO}dKR4m{Zvt+|y>YuAB-UXV^`*TZapxC+Vhj5V zd@dVU4+|~UX><>C_h%(!oZU^Cec-e2F&qS@E`%{3HjE5PK4}C4 zFviZ3r+{IX=i~Y_a2M!2TnquOIIb7K=X19SS2+wsyGXyC1Wf)QE`}al!vqN1aUI50 zVKVF}41KtK!&d^wfu}--Hmm|Z({K~;a>HG~oGbN0CIfuA@%I7q|AO$|LEtM5-wiwr z9)j$35YU(CTUSA*n+yw~(Y@d!$le42L@?u&!R7r0e3vhOjuX*rI2DhI z|LsRwC#c8&(th+t)N&lxOYKK*hU_pdh77L5xbpka4)A>VIm1p|ap3=senkEBpV9ix zo0w<;GXHdcd5cp5>hu)rBNTP^{CD?@8R_Va+R0gqv^ z$?VZ+R^}q`VShBlN?9Nb@R%>`einE<@LNsdc;Jr$$I$>jFz?L(F97D4GYDJ@{4wBR z;ERB1!!RCf4JLmWxC!|4;Co{+FEl(1oB{qK*qL$2)8ylL;N1dD{Y}7k0kaM=z=sSE z0)NTmhk+k9d>HtLz+ZyA+XTE1FpdZ3FTj72dVqfq%yMzz_#7~8jsuU!!;NKZ0)D08 z3^4!g`&Hl%0?!BLTw@rR;otdz2k`mevy5@zi-CVbJ|4!G0aK?B+y=~XG7h{0cq?!d z@Br{rz!~880e>BM82Aog8CNHG-oG05fj9 zv>osZz^u5Bwfru@m@qVA|6Je2?Kxz@InVb(~l79m4_e zZw+@J=Z$?H_=mW6JLLJ@ioXJ9fKLRz7420z;^?4pveI9_bY!&dEf_tx!8`w_HP2e zi}Juf0X`99&K|@W-ljx8_u(^c@d3~#T)%-lT*1rW`U~(4pp>H$6x+KAo9{f817e;vyaZpY{&}-(i?*c!s-h9R~g!_#9_EjHM5n9v_(DBe)z!ma?XM z3^H$*RQ-5|;a`A{Am>2@ke7Ah)8N0841~WM_JPYQ$r&;KlxOZl@KMzg_{#QmhK-2z z7%pfo^N>K>7@r0C{l+7?-tZZPdCBwZeDHfshCdTRy{s<_N3o3=sGs~*kfARzydi?S zfLUJ3a9s?)kkAkQyMf{w}e!k4?bm> z?|XsYACW*siOsiyKWO~BfDahH7x-XA9#HBQLyFXGq#lINdSV^pap!{Y*|y~K8ir1U z^4o-{ZItI6pxJmNw2l0KgZyR2{|<1{@Xvs6H*6&KA1J5=$rtr4c{(QIQr@R=ktOg1I+UKxUL5NENC3pwZI7F_u%7tV5k#6C-YzV zVF7$8%X=a73FF-wk-r=Glg9rvFhcpg_;@HHL*^lVSb#mE^V^Vdb;6`iIRQ|}h&|tf zOqcP04Ez~j`rG%F$0fH5($=3wjCId4{|fR5<)6dH-+-x;_jx1n5O z+g!-Ya~wSG$>LsVUzUaXrdC(m%T>vzn^t~M7IIbag zFYs5~y};iDW_!`*CxNd3Wq1mh{*}S?2VnS6=`!FkC)@fPcmlAD2f!x+(~qcZ8Zbin z0BI=hr!DriNa5*0u1hyc3?Z=3)aAR>@ z4SCin?dEgjXP^w%ga32TI3pTjKXnMVJYg87WcwHIeK+pIkht3ij-Dlb2>f4y7squk z@H)doz^+e!9hmhr4E-m95`PKUjgj{Qe+x1Q<-ZalA1sOsQob)EMrO>opz>UL0{6P- z)KLRt*CYFQqnP-H9GhSanImZACu|Go4;lDV9pPgOx8FlHdzQKyg%Z@4=`xBJ4 z`4TCQ0b7u^ykxr8?F2B_0hd~O!tgxsHvx0ZZNg%56WT`?}M_8?;`_& zWhBlpg79K8TW8DGn6&R3foVU(F2iicJ;1Ks-ex!s{`J7<>TILZcN=a7exKn@=qd*d zw*ud8xDEJ2hTDNZYPbXV9>be~?=`#y_%nt(fxiGuJG*c>pUD7!+4%c_>ravTEXBOT zD^{+xka6X@0{B>KmozYfoPWKxTJ^Afi|OG!-?h85hwVP~HMx6d0h|AL^9@tyxxn;! z*7GvpR|5OM4CsF{KWqfQ0(^!9@bQLQfln~p4LrqgAMlBWcL7%#Rt@T6EO#lcNRqf7 z%RxT;#)T37$+WjZC;f`}UBJ#~4jN`zJ`7xju1guN4Nm|?DEkcf#7HZ-&oIha@-@Tb zfFCuC`kC@$VDLnb>qk^XovcsDjQI^@h+hG-{%RP5Y)R}?<)e8@$_>L`OC|x+X4?52 zFl>~ug3n9WMx}GJ`aSxFYonvUKM9rQLzV&eN*f&w{#2{XV}V(}3?~A=+W1lrhMDH- z(2bMiMe8ss^J?gE^*q}!?Y9tnwE?J?=f}14@y-|`ZwCcA+Rx=1s{-y@p!sKvKO>he zb4pew18gj7LfQ_<&y2{-HvCrbk3lt1<|g1-#=iylGAcj=4H}LE-)1-i{87Wh!1p-$ zqp*f^^1z>U^1wq*9{9^nemu@GoILQ?ojmZPP9E5T_%iLHU;4)Uf7^bVF$eV)H#<)O z=Gjrc{cH!X& z29x8jjW6;J4Y7~Qy;~3m=KF5sH{s%(gE)ihUC={3h>Pzu5D(+xIWF;GT<-jvhTLZI zJ}}pml!*iLeH7v*V7{-n05pTky=&ql&rjIBabUjJCi1{T#?JurY?%B(^0CGx9tQr3 z$s7j$wqe%bkATlWK0ff1h6jQFXfi%3_Ibl`;G@p8e67OIq@BQ37)($cIg35cWIPPg zCmHsE8P0}$9C)VjGr*?-!)$L5xCU7Cm^|39M)PErIrd1tnd3pd_;@Y!*uNO%?F=z_ z83e$|@V&Hi5Q&7?Skw;#*y2YQLl5^0E&&BOdZEcfY(zTOO^b2g{g6XAy2j){M#x== zNgVZ@TO|E@OFzFz`Zp{+o|n$?n*RQbr7s4a_*$6*$i7MRoke$!kb$97A;Wd{D(`0O9>fEr4fyqTZx3)6#4?ySv9A;F1KtHb=P-l7?*OK%Vc_=yi(g^h^Z~;@ z@I8j(z@IVP1pH;g8Q`xO9t8f5;bGt(8$JyD6tI{*7iZLlC#~U67 zezoDlz^54I3+xLF`@l;K$AMpOxCyu!_zKno@Mgn&={;jI8Q?vJ2Z8q)9tM7o;lscm zFwBMDJ%)YYA>dC#e;oL0z`IfZ8Q^aLcae|&|9z8L1^lGpb@OqiX!!E^+IOhu_46_B zY}Yzzg8Z?-S5YVMRA9;+#=WN){u%DAG5mAjiwr*j{076n0PZmSOWeD|@RRes(rXPr z1(^ZEzXg7u;okv&!0_*ZKVT47;beek7;XZdZ#V-SH#`Wu((o|wdc%i-HyP%aHnth| zf!}O64!p;36Yvd&Gr%_+9t6G}SjG+DdkymoAP<^M9QeD2n}C01I0O89!-K$oGdv7D zZj08#Vc^Mz`DKz<8TNr^0q;fq#DULn7|+IKz?ZTP&jU4_0d52SHTc88Z!$i=c(NCG zfO>%64ZI&X1AM2$ufw^h$!Fpi>wqy9d&9ur24;I52L730ZxNowhJE0_8IA*&b?UuM zz>|SlZy8`8n0khRPcwWN_-x=Q;B(SaXV?dB1df3n2VM_MA8P^*4R2bEF$9=?*aiMg zz|@}szRmC;@FC#qOOP+{eZV&W$7`{^0=|)a;Kxj!U%L6ZVITM@;CE3T_;0}7z!~7N z$dr5Y!@!e(eGG$Lm=Mkcz5;xP(}4rZU_rYGcq?#x8ukQ%w*hB>n}9Ea>@e_V;IH$> z>3C)tj#t56;1j^k%)ow_;X&XJ8y*J!tl`7J4+Hl?KfgHj7;qnO9QX;~e&7u7AAm20 z$T08=!1AB>W}z$r#t6y-zZ!Tl^PT0Dooe{1Szbxp@YTR;3||Ah(eNJJ+iQ3q@NUC5 z0bgf$5cr_sJAv;3o`}5e2EGq?2W%Jyei(QsZJv$(1-yWboHhCo1wpVAeUEZU*o>0pq}@1HS<>O~7XZ_k!OAya>37HUM7; zd=2=+z;3VjFz{;dCxiba@a2Y|22L7&7P!qY&tba_mjk~AIEH(D;5~-pz}ErOwkF_r z11D(%@P~jmuq?p$0H=WE%Xc6MG0xErfsat0K-%XO%XT}egwhN#ImjSDeU*aV2RXI@ z!vlsP??R08M-y);7^0ndbj7!{c^Se+iVgpu5Iff;sbL{$Fgv}Gzs@| z42lDD9W7%7u!Y!B$ne#W0jsnL%o&u=rF(FtGV*7EzsdLufZGk94cuk82KZ>`nFsj` zfLRuX)rNiG^@iiX?ZE8YO}Ki2VV=~(PQ!Z;f2ZMnz_%D?{eKXc_YUH^7x)jL!??Z( z+)8_JJz_E}`!9h(WPk7(;5N!2{2drU-jgfofdKyEU_{I7&K@$j$DKV40+&O+19TYI z(R#1!As!FB89auQfVUW)4on|ls0QY5Jh*_m5=^@Y$eukQgR{)e46rNXAn@zJXPplNUjSSVovRSP9Qfm) z49&pwRUg+jVES7e7vJyU*wKXRYVe6OxZVcL=gKhfdkwP?9|aCU;i* zW{7=_@>6tPLnRjZ2?Q`yOS%38?CPy_TkhTohU2(ris2^U>A+o}8JIQK@DAW}4DSR! z&+snb3k>gu9rcFM?@BH;ya#3_4ZjVz1DN%;4>H`_A-)cepY6uK3GtmKKM4F*!}Nz6 zfYDy1ap3m?zt!sMAaDviRwUn3dK+jQ7oVT|Kr^^L1{vlx4E#yMUjhD{;YWb)H~bjz z!@#V6ANMe?%Rw3VP89PRW<*2m?T}{xIp!UP-wFD->X6?fcnX-|c~BE!G{&`uh1fH| z6MA#)J{8!tyYoZeNn5*LhkMxWIW_7Jj?Xj(IElPF;e4Asx9{fz&qiMK|2QrNCx2?h zh9k+duFy8#f{6TL!#)*ih)sl`!_Vc6j$yCQh1h$5zq?(c<+lL;1eonU4=lGo#?)Vf zkD*s=<~(Qua0b_(!I!x=a1;3G3sOPPflrx9;Em~1>;oSIEP8+$P<9z!PXzzn;Bn8L z`lcd&Gvd=g5srSZ$)k>CeC>FX$~k$S9WZZqUmwzW-}~)8;PD4@m_wy^R|X$2?Ev_> zByao&a*?Ax4S`Q)DCSML&E)RSFq(^hBQO4gy!h|(;<%S#{9TcJI1ZHU5dMS@TRsyk z?(#i7FD}wjg?N7B9{L~VYu+U$cQ<(W^VV5h+Qx1ILDlOVllSpzHo1Sb`|=L5|~1waNYD z2yz_rE;hNJ9zpIkkZUu!pC3VP3goUexnCSX?s&+(&*Xk}1i7;y_eqoccawV1kgR*=K^b>3gO7QWyT%qv42c5^6&RZ=n`qu4+N$!Z?J0tjm5&WSD{)pii?)_+l z|FH=ExM7YFKLFDoqSJ(zbk@ojNlJP@V`dz z7bEzq5&WG9ejCkI#?M|n968vG;(Bo&Kwb{``kC*P*l<4a2?CDH(c9?8y z=N2#QP9-)cQ(kk}Bb4#hE{TFL*45eFBXI%vt=lajQBGNFYI9dY%n_98Y)*Tv8xyTt zn$z38t@F=@`TX3vC2Vckl5Y0HP1Lw`A^8h)?9?Xg0d75G0onM>in9WwIGfWbXv)fF z5fn^+%{TNm^!1!hVt!~d_d0WpulUCz1YjsB_eq@&)IpwgmBMBSX zMl`@8u(&7fBD5lrN^T9)5mRs@nv&vMQhi=~y3gB|?AXfI?=n@%mX_{x3j~_2{k2kD zQCKSH5fRMF%X)-~gkDQW=Z0iQ0>2Bqr6=8u>Tc}-O(0v5Pj779p6Ez#@mjkQ_(|lX zOBa(M0Y_Se_FcnvaFMz#%?{3YU;&*gCutE%8l3OI0+0Tt3W+E~I$u5)c=Wmk_6CAl zdS5ctL*MR6buE>erbA2b02d}MUwOgmx;1OOIwcn`T9;U|e05!I!}9u-t~jQ`3a>HT zx~R6cZdHS~y6!@Vd%Sb$@)Zqrs}qal?lsKQ+9Zo`+tTGL6H8Yty1-j=nSs@78kDnY z%`ydxYA;gIxcUMGb+vU0YS*q-z}vHORGN)YUp8V^!#_O!Hg#HEH_@H!O|)<9PNx%L z`v!E29uE}12i@&;bf)m@)~GW48g>Yh(5AJUF~Ib}Q_|_pT|Md-UKbn^H%qU9m&r|Q zE?S&eS>KRYvbuiNs=6hK%jz0D@@nf>u4%luZiz&gm{?u6Xifb}5ni*Zu6Fs-<)fx1 z*3{QtWJ;p4bq{wY6>l&<$X+yz% z^$pAFRxhhxqh*bnxx9!%@RzS#wq$jIDkQ94rFj*kHLhG4=@70YWcD@cR>mG!Gu*EiG`Bw5|8uDx_|7u{)9@O}V^o zb^Y4qE0@%-9W}Xr#gfF5MGcEaPrS5l^^xvfT7Plf>I(`DrbVll)}C|5f-@4U7OfrK zFy_3b?vlp3m9?X5SibVoMJtxSh!94xQMv~W$liY0%25kfyP|&0=ov4Cm({PT8~rw# zjfNOiC41zN(m6Ucu3S^M5+g*>I$v6!K&3R+C04D5%Z!>&J!D4hiE!`w#)g&ktMOS^ zSnZ2fElt#3fW%b^8~F2mY5A&4&t9^8&7#FCMsd@Xb;~bUmZ-(h)lf%YEwD`mm{jy` zDcaJ;Q3fr&c}d-((RxrZdoO{pwWHTX?W)E^u`aW2N#Q8gPz#sQ$6Xypo)bSPx7p%cBU02v`nMq@9{i@3nOBZ1*E2zvRb(bba8zRX^6D%nl+*qxN z)hI-Pu9fJhg##C~CmP^nwFUAGi!h|fQ+?E=hIK^}m)BlgY{a+DJbDGn^I*}EC9CUJ zudZKR=(v(z)UDmaaPjiOk}rX;FI~-PN8YGOraXl&PBbjPxDFj=bP@LHrHcxmR5E5S zU)j)5*bUf;;Cf3Eb?X{NYe7k0DV{s3X_540m@2GTe(~}`<03-Ls5elHHi|=cZpWx9aM| z>gIGzq)yKzoOasj_}kF6@$~GnGD#qalggw~Dz zoaW9%nB3ge!GGa#-g*8!mzGFu*op~$>z2gk^yV<#GiMg8In|$csvovqneN;;2hmeb zd963EX1?fxU0nHXNORWLY|>6ttqR+rbK}NSPW5TT-1DJGOKjPg!g>hn z2reRIL4jqc&`r(VLW8a8t!XSu<^PUgN}9rQ3JaXhjT>R7r!j9!C&ayt67#y!>GpK9 zw+WUdHg+em_S%S2B|9*^^}4#a8tO}R=<;A2>TNp~R3T=)q1Thddw^Ic%B^A%CeB;a zESHD17*;_M;qDDvxX9?hzi9C~Iua7uf^<%iw|UKaCBjs9vM04^SDK}BHPM3=BwmR^H77O;OrS*V zy887(S(@@Qw<6`rq$k7*3bmQslEQM_!-@dagGCY%EJ<&~Iu2z~M}q(jVLc=g^h&Pw zUG^^SVRhfxolZ1w$g|wsvo(+2PjBo6$U-ot=r!_GBM)={h~-R7tcs z-O;)&-Hp{I*6&zPC6YbZOThXzL_4Km9yFyol401ou@CE&%_*p*f{sp9l|)jVaP)K! zmN~u3=)lsw16^nf*0FBIt2-7QXC^i!!%be;tF*UuV{3x{9;i8i1uYgiIR;!Uu@T!3 zScE6L!mU`2bHOCB1YMR{Yj%1kiXWz=)nso(Y8M+csDLdSwG?SBRLH%WmhMiLPB%@& zV-u;at#GN<&hFNpK55<7){UN8n@(*`cEc5)Z6ss6ZHp01_Az4=+?IDkL{y5F zNc3h)2sgv(-qk$2tP=xCUB+<&$FAe;#U1 zup4%FcEVXkG?hhLyHu@>BCs?6e_@>dv^~v56QCDY2D>k<%g25w|Ol=awJE1q1 zTezj!^z@Q1CUYMmAQ*jz;s4Xx+knY+opqk4CAHj2q@*OvPTN-EYsVig zS&~aqTWUM9Qnux|6DxMbGD#c%Z~xj(EnQJ9(?!#2AH3J4nwYoKNb69EoX zc04;`)+_}CP#6QX`}hF?4qH#g5J5bok{A#Z2loFz=Y6ZreY;wg1IKRN`#bOX{{FhB zCKu+%XHNRMiP6Z|;~HGLQy540iA2J1hC>IswdnO{C%BTyl$b&1+R^`DDsM4`GFTlaXu=cp7vrzgM81&C4_aX(j_(?ASh#7{+s?lyc#|q;!C+5dyN>8B^J2u7qMQ5@Y8lU#) z)CaSxW+zB(cR0|b1$tH*<(RAUO8ri-I$>fLid_+krSy++SB3A~^?){WKwN9Tde=za z1o|@n^ff?+lffQC5;A16GuA|Ri?M8J1|u!BDFr65-wZ)_yw?ZVzbs()!xOf0!VC9$ z!W#k{df&Z+u*$$_b-_p)H?nT;2julR#>|wZt0i+)?BP&l9(3}UOzGnG`g2D9-25L0wAx`Pf z;nxqTLy5dP-)K56t9P^j-s0%&*koH9mY!%E!SIj^?wqM;_vucKPR&b^;_wXSjgGg0 z$oQ#Mm=-+2*^}xpIo)ikw`8Ev|2@HVf46va_PBPkizQ|zHq1DHIPw%Xw4~-BvmM)| zS$AyW1RAg~qTps8O6TNE zaq_`<5(5P9IWlo#5qm4XFgP|8i{mqH_N1hI~FHnJ8@gnc0&2;W6NiBGINwIe5W&$k2PzUKIUQPDB=@*AJw-=M(` zXlfDpP{5W{`+W}Y*l5KjXh=V{)4>}$i+K#PcXZH=?qvl+z}ZtzVBj=0zGpW?K0wn` zM<;v_F?JMn89v z*!^_1(M@1NzA)?PE~CL&ba?S*!f3C+;^X(PpTakJ*p)`f53v_{7?V!UOpnWG)W3!T z%6~Ctaz|#cpIx|@|9YqAG53>MnH0X+i6>BeL&uAhn{YEFPTf!o;pyl!MlhrJ7bSlp zJTP1#dDmF(OH-~rf??(K1u2Nxzv$GH@7l?sSDj|(<3xxy`@}d_9yq~9)q`XTyK-t8 zwUkq$P^&pPpVj`wuL{hd1Lt3UbQqj29K}!CE})#DEFC+F zslIz0MGKGcFUJoYAt+RBuFh4athh@9$T|Ur*d%RaR32`Q)oq%{}3d)oegf#^uCfDC~9AL3aZcF@o zXI>uG<#q?Rmq*yU-dV__2Z(ZT*-fH&qV-1?HM1}~Hi4JWW+91vP*cv0m7YQx>^Q)w z8NA<~z>%H7h3(1xj}8~^IduO+lAK*F@*GKIG5DFXiiGn1(i^zmUZz1tFx5`X&rHq@ z1aEyn#kY_<*2>*JP}l{_GgmYZf(OX)Wkq?a)Xs*fQj`l*rIj)Pm4!dtENdxRl<0OU z{(eD~u6MEl9!S`inbu@xYSGeEX%HPm6g1)++GZwQ(zcJ4o0`O04OaBHh&9h{A9FSi zD0F4f&CgF`;Tm&BHa-i-F{)$lHHYN96V))3k~fF~i)>g$4pBkU-ZE9%=||a7w1NgB z7TgXi%`3VcBDjLvzWMv6Hh{J?3nKT7}a5tYZ@SF=VP$gKs;7SJE)}5$7@^42Bp$ zvl3=f1Gm7kbXg+PcA8^XxZ&LjiBAce7LoXXbR?a0(9D5teGSGq)IuLLRe~g}CSg>A z=`q5BDOFg@!;Ea;7IwA18-b5+~ZG;!bJMqPP6kX6iKepcaPGso=;u@S@ zAyyDxI#M)n$vE+}-QLdlC14b@ps~Urrc>HFwT{WZJSWwgPV~el#8ABmFJYz9>tRWW zhfo>90+uzA#o?VxUJEC%C+8Qxkmd5`kEeP)tYQ=N4LeiC+$kDdU+2~L*7)X?WFD>Xg#kw?As@toWlqRXY*P7U|Qm@w8D z^`wTI-CbHqXK_48_0oTl42B=+{PjasaUUBuNJV6uPkEklJ&DajzZ{A~w_&05`250( z$uYU}(&;d{vy%W}&jn*N**a&L#7Mt=dK5{A_mEiep=4~h>k(Yv8p0gg0G`mD#x4w| zT39_fK5$FhlpNGWpFJ*(B)x8ngCjTGojinNEgBdcFix=W;x~;s{S{BbRxg&ivF|&e zj-|saXM4JY`@{rVZZzL;>k~c&8RMNgA)DJ7)(|)!+O-|Sv%THS_>3fCd?F%WAy~}U@p1k7EU1(q)_`P z14AAfUL2NqeBegEgq?&4B;5H*{GJ4Qs01FNgF>ut9)Ik_joayzctuCjxq!(9s&ss! zT%y%2-Md*|o|v~GHA7xp+I&b@nAViN97}SnymOKq!3~q>H(^^R!T$Ls?D?rXivp+c z9ee)l!J8@&973e?Ary-HN8W$<(I~H2Yc8*7FsUzHJbW2t@mO9oKIz7=bB4i|UspaF z78b+fcGboB44ZFLSdbr&Rs!$^fiCpOF&>s}oCJGdKrIG$FjkTCGgzSEc63K0tol;# zh_(fxq2f^TYS?bTQZ1IjMLacnd=@KLu*nxqtH&`UJu>0KqifQ0zQoDR zAXZoBeM9mfQv2Tf+sPg5c2f6^l3N|zgMi9%yPk4DM~)sEMFwG>Ywy7097i~yg0CKU zxQY8cIH)p%#Kt_BEBRO*d=pW4_(3c=b+hVr&hyL^8;Ys11bZvk3&3N3C67YgXy<^h z4BOcmV^aca7<^0B4L$63Alb#vVt`ejePjvqz44jj{KjZtYjSg5GUE8>miFi8B#xX>$hj5EhqwnAxW8qqWCgco;XRYY`_kAGn% z9vlHVwr7s83BaUlx)7H1xH^UekphR678+%Xz_XV3JaUk!gQ%^zd@8#Y;&yuc&>r)= zsVN|%7f%BjV1G7>%?`_3m@=E1_!myiBh}}}7PYBJxrM_mQ6oav+HJu|0Uv-o z8mx8+MX)Ksn4RSIq+ec@Pwp`In!?T_nkQ}eVW#A3E)MyzrlUOZc)>hMcxe6iMvd5t zu0(Yk#Ai6haV3=a=-s@U_f%KZ8ZvX+YV+GE0!6a)Lg)kp*Th>W~l$%1Yhxd z@csB!?a}*&L+&nX_su<$Qi&U~LA z^IE9)Gh^*n-~LAWa?DjeE*0Y8w8H2dH(`#99+{fv0aTf2b1xgKEB;_%^zO$asCi)# z?YLjjM$t(p{c()xjU7Y~w!(65B!bnjmv~7BqO4ONWphsOK*CjTU%~K^8tY2RkM!)? zx3ry;?V2aSsZ_w#-_#c5@{o42Ml6J$AZ^eOpxF6@qQMPRc2R!A&!(R@ONkoB&Ls_Z zb_ACtxwq1hi$xRFTl~mDIt;u=i+EvZA^VUaRDK&W@>7Q++(%_-wC03`z3w4On~_lI z=0Mntjq8^U@5;L|owd|Bu*N&_k4?qpyBzQHys_Jw7`5F*EkZ_aftN_KqKO$5aK{dxZhRN?S_kLE*xw;6zC=XMdEYo@|H5g^ zh=pjey7{Rnx-s74@SQK2Vzv}jVGeWBD@B~i#p|Z@nF@_7&bpx(rj747pzTBFhKEj& zY0;X0lM8}$f}y16`+b*`9{X{ADm@lvFL9R@H-dB_HFCh({6IS7-#zs`+u_o7;|4}} zsYOzTM^)NIob#}8wLXk5!Vm2nQ(;Y~nR;+(_jsEIGQ=m%Z3zz1+}tI7mOK2WmJR-GbE$NwrvX->h9I^B@ zhtKf3Ek4pAsxX;Ga{0+)SYWwwO7$-PTF{H;l|>tgkc4 zd|KWuXTQ<`4KvN-IGnXCvAgCGXHDOg$M@v-d^wErxt(KV4Y6yL2IV_fSS?P+!dhp9G zaG}j=uW`kND>doUuZGf^vc!#3@gUtNJ56Xb1<$gnV6M@MV1JJ5!;v;HSowu*k8v+8yxZP z7`L|#bPh124EW&b0J3ek(;e#=Bu+wEZr8CBo@|MX?pE&@KNB36Z9nEc!+ZlRAZP=+ zGDvP8c-jWqnjC1a$N43g>|J4Dkxqok08Am$I7*Ui(-j*ob-|t8?_s7KeCTk(jpPJh zTqii}pX^S;rqIk$x^QfMG8x5pz{oTIal#2Mk5yC>6S~ihIWktUUqR!#BKh);hv*3F8jBc`A-Eah*b!+7fr0L$HIpb|0q+*(P~k)2dLF;p z%APj4eH(pAa4=x@sbt&jJ`@bevfj4ifBVp%oYLVOCv|L#_Ngx6M*F~))T9;NpgYW3 zrs}e#9Ssw5lKF}Km?wP9x(%u5kUElD&*|a_oSTQ6od0&VcIkjrIUV5R8C68py+lQ% zg}5S;I~5VMtRm7^s!$0#Wea9^9Dt@7UeeeOt)HpT(q-Npgn`fp4?m!-OK8|v9Oe$B zI+tY|L!A>du*-_*-#fP6{BHbZ4568sEJEGJ$&{_I3O;<#;oVH2NINsQ8#4}7@pud6Q@OK3~Qkfd58oeCfc zKH(@;wKFswo!zxC|7eRZqv9GHrUqIm%5B)$vej;7gI(*|%Eni$U>z&pVO3f2B$Fl= zpUX=trYM)50n(c0&`z!4J5c31m77MWLMIba5bL+}j2me9ex3&`CT5?U7`P?OEd1E0 zGi>w4A=ty}Tee&A7O5#A^!4)UDNV+)$JWjC_=#KRF{kn&!y#!*sL$Du>Vg$_g~DmMbaKaOJGXEIU+AF~WQVal`DD_V z&az94=;BeKj_&?Bqe6ABs~e0r2h-d9w%baLV3(1d$qE*N;KT|Gar6k?C}uvLP7U5+ z5Whn_PbVjG{M~$5xCnfTjs`vC?v}rXb0gEC-u#mgg@MI3FDzjK_YGA>AdztPWzIm zCt`3g=km9Eu)<<=E`1bE2&j2Re1%a#_C3*(LF|~U8Ki2A|Huj*alC)Ze%X}DlpTI* z1v9Al;Uh?%!wp|Y;OmCll5Jtmygk|0nGDFdKSYWfrMR4GKG`!T#1`*~W*Pb=EN5y`G#;nXRoL5ZIs`(o5^F`AkoU4o% zbXCG!n}f8 z*mFr00zZU~mLeffWi5(pw`kFIDe~xLS1R%%>QdxIaydm_j9zY$PX$ME2zu?s)eyV3 z|I1A{W>q(pvbmd;SesEU+;&T)W!ahvTsm7t#I(A+Y)!c(*$TZ7QOVXex3jh5O}79? z*~&O|YsRSGB}1uwIk`!5mz$NHAegj{TJm-%#Rw{nX0_{@-P##-y;~x% zICk%bFEPW9bG_3DV(;O7ej)x+m5WxpSV0-_1L-)3GgiMoJKA0Az9vI^Z_LMD&Y_#pOWQRpggvp~ zkKRXN)282#`*heB2e@s$v`a!WRoacWIIuKB<%6}NH*9*|&>QJ$+UMGlIUMh`f1tp6 z+sT|%TKH(GBi0Usn(leMEB><#ENCwq|6Rt=nWcKgp)>uX!iXE0E<;yb%giH@okOp| z3?@V22PfYA5!97d!>n*`WAef z7)Slal3#E^zoYh*fGx)5&H@-X9kvHM-C`XwWOm|vv5(7N_*g#e4kkv&#wPIPk2f)o z+fw!Wb4PEO2J@4gpghhH@)cX}>Rr@o8$&p%`o@N^_e?|CyN0}9#q^bNs9?St_YyG`?pwTa?U2RyFuNu=EXidJ=*msx(1M$ zbwg*pxUGGOv#kCMDR+KxxsJ@KW#MB0SBc=H-^^Tc;6QTlFn)L7@bLTJ_vqFH+oB|1 zejSPwy=_l1(WG@*idHxMrexR^-Zp?Ms^;do<%#=av5M(0(7>MCfn?DCducy0qwls5 z9((TZdI+~<;`<}s#eBm5Zj3+F$4evenH4_B=GVIX$sWH3&Sh!dkjg93j$wORuCP|n zi|24<;1n$S3nO?n-2$#?#xWFsCmrqKh<5lq%v&S{*Zn7D;gz2>@RxJ&EQ~MkYEU?^ zaAMRy=Z;U~$~4?Lg_U>QWj8IClHzxQ&=k+$n?Bm&2@ZTeeH?L^!-bMwPXmZ6pu^o! zyf4*X2dI}qGYE3y7JWQ9jo;$o6W_7%Votf$;y6Be3m3JfF4d#+b93{P@DVKqiYyj@ za0MMc(!x!7xE}-G)WfC;ltc7<;H1@e^V7Vq+h-r{X_)2jIr$xi2yhEs5cnHP! zKZ*0))40_QxsM1OL%#DS5P#$1(OIw0cTKiY+@Q;wLGb54HS#YSq!jRawX_Cq4OeSS z6T}7K97F22 zz2Z?3C;uapq0aa#dxIM#+(j!lkHZ|v*Ut%m7ddp=vI1^;n4ZCXFSwV!oge;;6f;2* zw_{o~eCcfKq9X8HQ2xFa))9R=vxPuZHI+wt9Py1EBVZ;OpMiZSoZ_B9JT75+$%*MmDaDpc^9Af=c$wfp7-7Fk1a zj|BrxkL5Z@<{yZ;$7+vPDQPU7_&P3_^`0Yhg6ARC+f78f9BX+Bd4ua)XRzQu%R6KJ zyS4_}28Xr**H$3~RmO702DekVd;{1)ME*(KzKO(|nBDETr{J%{!%x2z#*Szs58bgz z++5Fdf#|g4>UpFslSpnKW-Eu}VVU=>Fl&=fD&!izd#0FLNCLh;ftx_Fqvdc-MJVpB zG|gM~{Z+2ioH}uIrqll5#7zXecoNMCav$wUyO#0Zm#8e0$-I;Wrz2xqN5|=>`%4_sO9X3F1+?D_DuUm zfaS!$AU#ScJJNr9q5nK7l^aMd)R!kizB2~+={00DuMTYw!u@Nwx_AK>+Tz}}pu=^9 z@am&Y3%m#u{TR(04@Q|myp=tSC2$cOhCn!Qh@lX#&4GL24pW$JzwjP7`Cy%1pmXPq z(lFsKp~c3hzcLPY>V#ipPe6-ka{cLRR)wdyZCS0ka#%Y z$X}CiXj;Pvtle%gmEge4$tnD{dI3_DGJiKdKdhfB?F};3^ak-E94>>FSN@oSyiV_7 zC#d8vh7}1f#*e1%rXTgCj!kE??{lDEXMgTKpI>)R6xQ=yz6EAa<3S#-86mvaw}rFoz2zaM+t2ioTDD$|$z@K&r(+Z5QIK@DJDWOvib#xA626Jj`%A%58I z(R&_ww5_AefI5Xmiv+zh)}qoMWAb>a zjEnvBa!+SH#2z2?l|DHo%b*=|enBX871WLctd$%MV5JlP>Xe&-pjz&-n8vo#d~~qq z%^yWt=?05>&K8)kf!ujO6jKg!?EuxXeQbhY}5XL`!_70~q!jMN_a65758wEG* zkJ~$*?m$Br4ta3;9UT|pJ+=lSrz|B>G7!p7_ALeZ>cF<=DDmCdg^7ez7sMaR3Mnu$omCe`ABpd zE=dbfV)CRcEKiUX!_M7-a5rrGvw?tmdZVk&u-N>a*laJsODx1Q9r3!7ZvH(wlS<|IsuzY`v(}ek zvD-EALJ~0XBzkXrt!r0zI+-AmdSiD?boc%)x+ z-K|6ylB$LFsyuJHLW%aN?fa?75Y*BJawHWaqWfjt}~NpVdI+n8~hiJzl(MoW5e-iqSUaqv4Jd zT#*%Ca2Kq{W@r};TcgXtB&c)+j3`=62zKQ7OG zQPm0iim^_#dFs+1lj}u7-L&m>G~LYDuW79reiiNfj13Xo5| zee%3hiUpb_|2uR$3s6UoWARcn^rlpPtSetdupqR}b>IDJi&4DZ$c0cY(`BS>|5C!8 zyF!-BMO1A&exPSGcS`ZF+I!s2BbY_)#Gz08J3Hgox3uJOZ6^FGi1R-|CNnHqy!VSG z9I%D^0QJYU_-jA%;~kvW^H)Ip1Hbu$S!rj&AJ$$tMe;u?gnJh;6r9D(nB5DenV4Ps zA3MRyWr5dR`bC1yPi=A1g+(FUy2^Q~U!hKK^ykk#? zZwDV8?C_o7KLuYQ_TK|Ov&y*#@f^Czxx;u?-s{{)@GRcr+(+@ue#p6>!LxtFxhkF? zAL_VwH~1GLelPfyi0=Vs?(CSq5BwD3gNIk|$IFj?q{ANs|53ybgXyoD{}5g>u*o#| zoVf7~y!X8gFw*1Ghnxei8xLt2^8k#IAA`Ji9_Ha5`5ExgsUFuatnz_K-V1q7_@h@k zcU`&14GVt)y!GiG2En`cOW<{4GxRh&_Ir}^hrxrXrP z$hB22yC%5zD!3m7H5c*K;9=qG-i`Erew8bN8DCYCBL5)d55BU>l_GvGcroIG;3eT- zpnFJ+GvL_0T4a75%r}JpnC6jKP2sC<0>6s%GV)w+h>yxE!aqgxNZh9I-;rNK{MQEa z58aITBe92tefZX_c8lORd}Wc(!2AdMR=cH$?*UgLejm7M?4OC`>mWZb+;sa|yirMdyZvHGNBBUej|?_%~qw zgU_vY<%sVAFGc)5@M+;2wj%y#R=b+8x_34*-wX5Sg?;$yNDtv@*x&Fx@?Y4e*9)ti zduxdQk5FDi{Dsy1MEL)J`~&l=-9W?#z}blJ1LuU_z763mp}YvIdn1wgUYK7L_TgPZ zdI;YO`x{On{=z=IPoq8vk5Rse@&b6?h}5H z@>f>73^-2jM407&`e@~de?;iPq_cq!5(*BxaW4H z*HDjZMcfbW+ZgQMO8NdCmjyGuror2@DBr@r1io!>jaw4_Gw{}-HLekv-v;xGn}U5c ze@7msj+pM}g)>>_?l`!{RfQh{XEB(l3;!T_Kf+_|mp7;VTN3`i zVSe-Q8dnqU--V)mXpK7;@fPrT;d?31uW=W_aeBL}0{_pD_q@>K$MWc6z5374`$PP> zXTrEvxfh&)HhJ@I{9zE;FHG~4Cq|wZ{vgbA9Eu^ex3A_oXTVU!E4sTM-20sI2jkIU zvFF`Ec-37s&$-4bac2SVKY+j9M}s@$UnXOa%sI&Kh(YQ%@#iufbJ;skp88RqpiMdX zYVc6R*Mf5q?*=n`YMye&Q#lJRMojl7g%{!e&D$|P7yc}G>p+hy3*+I{68@!oe**dX z=bbw({8e!8XE44N_T%s806oFHcOODD0p7lM{{^%e!Z7I-s&EykHS}!0h(9t6D~o1?IMbV z@Q;Hxmr)KP`IV3l3I8X^H=Tkz!Y_f}h0Gj@_^secVU2G|_#(`&dpgX8pMC)2SNys2 zVg5w+<*^AZm{-0U+>H2IFs1-)`ED?V^lhfxp$k;|S#V#(>%bUNdwB*0`6}dJR@mqF zxm6BBTrb}WIrFC|?91Oph(SDT*RBf=+eD=2$@$+VPC#cAJK(- zxi7CMtSw<*p0^+~*QD)Z2;$BA@{9VD7)BTD<-WY`NBtK5A%t%e3OAbUwtOSxXfi$f z@_GpMS6IV48Ob+7UKaM_15}2J@MqyZ*8|QNu7Q6CvOmY)+mar4HsVd-^THbcW+ZPzc+>OFm4*Kw z@a8(|Z^Um0SB3X{0R8bW;veyy;9A7jg6qPXzUPEb!v1EYXW!ZoKk^1}M)-^1tB}6^ z!rug6T||6^uX_;TokV*T@dj|-m>&_&(fl~#EBxKycTJ-HM%)iB32S&czh!=EcsRc$ zKLh(4N{FxU?|@m~Yv^n#e+$~CbxYvB!0KM#x-@5mAAp5({8W!y6n+^z z*g$>>zvF|5-%Dt3gg*>^dlUW(KLy@!0qZkhwil{6A0>Yt@X)ekBdq>$ zenI)ag@4&4l)s3#flC`Y_BS9ri^4C_{Atuz;j11-{HpNpZNdCK;F~^!@*45Gz!~B1 zg?z^ul-G!F1!skS2J-B4XpbV^2F?qghkSPp;|t-d^UiI19^;8fz8&)Zw}!5dyc`zHKr;Ei8^dlA12ToV2k93;D}($0 z74|ol5nti87(2b|8LSURyamkp5z}`o?+rHLufyL3%vPJlrQ0z!Rtz> zV>bqQ@?r2i_}%IDcFAx>cpT=heFp8h@EP#N3fcqVFM_wCy*L-~t>8w)H-eiH)BFYD zf1rD4U;1te;Til0#>+IFXAh~XCvMVJ}>+f%x`!ZWqKfl=d<7~E$4C(?*tD=ybnAX@i4d)@rS^bhzsD; z!hPS3_U$!e@F6bATJBoAs?JX{~ht&;9A66z;)rb7Z9Flj9<2g@R0X| z2ZS~KhJ>Ghd<)vck%(^t7b6}7mm|IxycF?+;4=|_7+e$nBHX`@?P(;x3i9*9Ux%FI zvx^bm33fX|_$faO&PV(hctrT&BGT_!geT%Vz*XU&r@V^viTDn1Q}|nu?|K2{FXFqv zT(@9*M)?+SM)=?;%-0c~i0=Xyh5tR}7=N9P_%84nVJE7ld<6aH1%yxdZZOARncIW^ zVeG&dS+J74Rf5N{{K7{q2hFzt{1nV@z+zA%=Eor4 z6Wr7A7Y$DeYkyuA*8Y6Sn6C(He|{n24R!QygTX)T&odFTKTjfNe?BCv{drAT`}0P` zJ6q`gBW8boF=F=T?vCI;?Xy2mB4&R+AgulQa3p7co)^~syc99}bFOo-JW|g7{B*?Z z&#S`PpVuQf`}2md_UFxr*`Hq!*8aQ|G5hn2!rGtr?G544{yY;g`}2Nb?az~l*`E&x zYk!^-*8Y4rV)o~g5wkyE6xROyT*U0po5EWDE(mLX-iLKorYD*Gc_w1^=h=wapASXM z{(K~2_UA=m?a#}Roc;Ndu=eL?B4&SHir z?9b0e%>MkGu=eNfJ;6UR`}2&j_UHHsM4w({_U9wQn*K#)^r!64OTyZp*CS?seok2X z^Yg;mpI;Ex{`{h__UCSY2oIV4d7rTM=ZWC~VeQYe5wkxZ64w5FBx3gGlfv4cmLg_< zx+tvu=~Be(Ppc8LKRsjktg!ay=Obo+enD9MZ$(W1FAA&weRqfSA=CdPV){Q|cnJKt z8=U(M^p_jg_qx3BSHMZ8*G&rVnSgwAuUi!M>tX%9t}Z-6dD8373I7DRe|xVxAMuUg zi(uBjwmJ}4#}E04Ier)s*6~9zVvZjsg?0Q;0y92;4F7jybE6#bK5$JqdlcgXY<8Rz zeh7ROHanWaC31a@YYA(3FADqgY2rM~dxL*IeO_7P5@DY{7jXVW*r!hm`}@Lvzv@u0 zKTFE+{w%{Yg#J_H0w>gLYHsTEUe8m0W3lU!r zZbf_@_+rGg@9@X)f0^#HK1Q4Y(~Qdd!AZm%pASTQ9XJ~??dOEwdJOK5VErQE3^;GN zD10-`7cqW~I0G&jE(^bx<|nZ~6mbSzFCux2W`4Mpj+%Vh} z{xO;_BR?X}fLn&$Jt2L6iRPD({t;)u8N-S2@6mh($8|2fO-Dm zTd$2V?+xya!TgnHke-HX5pRUNA^gc3 zxW0<`-52b$-FfQ^C{KoSV3xm|Z$y7lMEhmrWs!ds_BT)Vx|-p-F@H|v{b&cSoksg+ zxGDT0$nTx+buDB6VkGZ{JbN&N&!6vI>~&{^KgaN%>UDkh2f07rTkds9#8-oJVCIL$ ze^})I0sg-Wo0&z!C1ZY3Kxs|9v6+Qk+~ynC92Pd{)?>55{I{PS~ISeG&bUkynkpCG5`!H(>vP;GaJq{1Vz% zVShgOWyDYT$5Gxk&0~BEX8QdWc-_lLN0IyRw~&9rA3?i!|7*QyfP(uf?-Qnc9prgo ze;)WE`2E4WKM(vm_yd9cdEhl`T~YWi;s4v7#{41T_24o%4qr{=nx1Ec|0DcQu$g?` z@C9SOCG7Ki`&!rcUiK z=SLr1>&n9Zd@nY;PYe6=zu4@q8vAFA{jgYJW1~ z4RCK!_>bWJEhzgJg;$|(+nh&x{BW@E&;LD&{w3lq;Jon1V1C00`e$K({_-H^r*MP$ zp{gqE&tGD`SQqZO9rbS-^QnmY!R|xB{-@#ox)SDl!s_0Dum|ufg9(Fth6Wg(0 zDqIF%mBoHT#OuH%;mn6D$fg_p=*!1^rmlyYrDLjLx1m`@9T8@!`|`XlVa-@^P-*oXf$)ED79 zPr>~@q=&GNkHdPbk#k>_{{L&5UyuEn4+qxvwH7hk*R#T3g!#8;R=K+HtKc2|*v}Q# z@^V4=)=wgR7gsrsLFu0_KNYMm3H$Ip=UgH@iuPhl4fR3Tf1mI?x)b3afP2@}u|6H~ z25?dM|AKtW7f|0Kz7|}LnDV8FDL*azH?)5a!IKeFUW%CVvhaVV{qwdLxJCIPlz-u*jP$z+=@Ib;a7p-Mkl!?b`VsMU;Ii<)p?o{qPhk!3 zX<=VpCwu(2imOD(bEeZns5jGC2ZA2ipGQW!$!{2AbN<}&(;ajFh4z+u{I`v-bxT_AVPS+dnk#Uj7Zr525{x_;zqvc;8)UuSd{6 z3gh9`8T?E4k3;?r)bG0RuYlKW!TyEteOYdg%hi80$)5duY6<04xB>gU2>dx=KY#iR>Ms(T{;8@9 ze||s4lQooA!xxQtw>sGW3d~=F_K4?e>7Lr}7ry>(#P2NHFJbkM=Wl6V&u`9Sd|^1VCe%Nj-wlHq9`!#ja-H9m4Ofi$(<0aT z9nZsl?gsSL2f}=>Cam+jy0L#Ql5>9N@IUSA{0>t*&!4*yBFyj33+wz2Qw}dDbAHG3 zqH%l@Fx}JnUDj~UnCE#?n%DUq&xewAe#i5mvHY~K&hO3&>-_GV@Gra@<-3mgp0JjO zR%D*@yUZ2AeVyMW!aBdp8hP2s8^SujOZtL)I=?Fk>-=s>*oO~^$@8j=pU&UvMt&}m zbN9ezs&-zpqpCRgvp_?5yE) z!a6@|3hVrg=S>-YRXlG>rkwLLo-ZZq{A@s2=Vw`Aou3U0>-;Ql?2j1xlg56@*k2OX z@KuC$e#Y~zaeC_aacUli?0oordOo+-C5E%YYJN!Ar)O=g%Ns5V`|msJYyEjzrqAIV z=KCn@W#itGFy+0F>-TS;2XA?Kt*eRLfA85?>-2j#pT6hUx`xR8_nt4V_20uWzN&cs zHqJkuza{(lPOtUnZDSrb=6U{>av$H)T7TX)=0%bF??cOopYY2F|Gi6VUB$R}I+FK7 zepYzjJ&5nKYh7K~e=l0W`h&2KFB+4ku>W54nYHc$IF9eg+tcwa3j6qOxx$qV^ZYL3 zqw2JzgJP-43~}hC1D@G7S=}$*MxoiE+T(~-vj?2aEPz4 z`rnA;y^yzr|96<*vi=HpQTVnU2>%+ye`AP0`ARUqXJq<6NO>RP503Mb=a0$$d(k|` z!$!{U9V!1E_&4w<#=FAT-pBbr<^#ex@GXyF{44wvI9tT{H{zY({!PKX7a-p@iSuW| zs}91waqMpx=6PcJukzDZrnzc(WOG_xG~9nxTAm0Wg8#Q*@Ry7DD)2?&&q4lPtUvZ$ z9qf~b!2QB&@5lJ`6w*Uj)3YS3`MD^p>3LdM)3Yk9>3K$2)3X*a({tn(aQpF{Z<_TK}30p&s1ulHdvKP>Fm zyI#clpYWRZVST!Z^L$3$dS~!&3*^18!vA*#9t9^YtiK4i{S)@yNKr!`Z%f_7|UbsFxnKVVUeE{UIhOy z&|gd_4Rbr1f6EOX{oIZG>%uJroSfVk$HNm`cR``P0p9ItW z``#VgFA1xEm0qL){QL2ng1jmGH^SMqLH>c8gB-()lzSLHq{5HkmU|1_dtJ;AUy($(sHNIJQ3^toF;`O_BYou-fm#*f+MH5mx&gTVEOZwkKIncu*p2zvEy*xL=JLN++^zY*rQ>kmV35`Vj(KX(US zpi1~7|9fGF_gK#Q6U$j&$PexH*8N>hWKFN8$d&sLxY+y!l>gX0ew$0y@C=}Q$L9Gh zYiz!Z^3C${`|l6_V>>lv{|w3x^Hb{&=U%k0;VGg~iMfI~7~RwO@GJ*e>kH3n#B!d+ zAZvcOTw0z8UvVsiFDI<#i^A%DSy=tA3akCP@SM2U6#nvl7t}q$|L24g;olbXrF%np zX}4d(8lI8+(&h2Au(ogg2h;6Y(Qrjr%jmW8!^ zxkJu<4DRJmhU^*nP&)n289pzp?qwcH_Xl}ljW73l;`HKPPn=%d>xt8cdpl%Jzaltp z-}wz(%w=Iszl(;s7Zax!_d-}7H2&P%AZvVvKbmfjxmOUk*W3$WeO3Eh+mG90uGPow zE7#`9>OR-rsdos z>)|BYG0HVubui13)-$$iWGzS9uWLQK06FW4rb8C(TFgaZb)W4X<;q;MBx`ta%pq0J zxMs(4pyPxD2|(6*#x*;#mXjtpE*Fz%S7YX#VPq{Q?3ZKanjBf{RSwMfYrR?&*7RV% z8kd`_u%^d(aBM$=elku^uJOd>ifcOYIG`ow)&BwMSJi*6nZ)IeYb0^G9Ksqz?0-pE z?Q;zxE~i{`h|B2!IJVDzC=Ng8_;ESroIWncNV*l(>h=gAZDchEmqz_E%59Isghdpx z$gBU(3PJN%1=fFGLAjxRO%00R2fWj3Tn^*KSUrGzrFpK|#_AfB7wYpaxL9ow^IU_B z)vtMXaTl4l*O7b);aG+L*l$xcBwT+h;qOO2$Lcn5hvhL=M}0UDPNUDeF&NHC6V58? z6ZOv+^S>$aVtI_!Cy>qzC%?6&;+&TX+Y3-@{2C13R2`J?FyCW!7SQ9ixNAjoK%PG% z{_<|7SbZEo^Y@CTc<*VoGnob5FZujk>(Fec)Y~Qe#e0Hw1La*6z5vqK_VbUtJAHmH zu&pUCtDQ+q=)Vo=%=A%pMqW8zpkz`7^1Q>RTiEN1MXM#8JR?GNL8ADFK0NsL%#_<- z7k4-=wQ4mle(0iBJ2VDeRG4Wl7p!(z&@yVHPqIyZE**=SKD?X!olLQQ&4+W7Z^L5! z&x$+!*T=ff9TIoWrM2A-UtTvYtH0OyEZO85SPO{N5BYT4LJl*zY?o|5ck>M$LfcDx!UB| zj>n4IfoSO+`C$w#h%#p*?Ihigf(`auBOlh24|R-T`i{N-9ytbPdY z(ELlHd0Ql0?M!0@+22WfuJyliL$L6ZKKy7uF^-DWzw_b883d#Myy(|`x^`?l?8^t* z4WG{){ofYzyz3^Vq{mwbBSZVFRw z|AP+)^i8?_Ie5l&{wMLYU!M1izqKpl&^Y&x63(Xa_gBT=yeYR&i@&_PBUXP7efs+j zC08U0U)`@DM~R{yVt z-{{{f@#WnDvHEY~KJN^O)otPqzvYkBZ;1YRqtAQsnQr_RKUO!0KEKJ2)rk1ZZ~J3a zlz8zQ`dIOd1l{Mi_p$o6nCCa*vHBfx-yiOZ1gE52@f-PAZPM~!^m!LE{pC05vHBS) z4?{-(e(`s}=)WfJ^V{=SeOAlufmj#!N{BoBwmeopspW7_tc$ZJk`5P*{#V6cej^^M zIY|e83mz+6Zv%z+8vUo=|4G8bZ=qwgN7G?ntc!gqai8BV$7-#_w{Ixc#n~LucSipT zai8B5$LeVfztMk-#FyU;$Ld$aeSQlZtKXFT;y1dn`d$sc(SKCZf#2B1>K7%P{MI&B zeeJ#5zukz{Pf0ji55>C9vG{RZwL7ti3w>boVr{I(!gJ0zUV568OBeMQXktbeR_$$X!C!Bk5! zkN;uG$IM5AzW&R*FWIj040o&!i~Bt5AFIEV@bgS|tbRnoSuy%ItG@B~$0VJ37Clxc zBt5gn{2h|NJj))dYb2aJGj7#tq<=THI$GaT`98#J6V{znWv&WuTQM%*=&tvB?bI0% z17Gp_TiP~&Uh%mpT1T1`{U5`8Y*Vx@R22OWczqvfuak)W=aFw4qws6UsxZ&+Xfvf2xMA4{sln@Tdf5YSng$yx}u_q(XmNU)^8l&0o7r|I-rwK9Q)uA4SG7{Jxk%ruw&d^Vhqo zm{<4jhC!Ow9MkmXIf;$#U1FLmq(R+;da)7ZS^d@LJCOj?*LbS_86+h2HD;>+w`d2b zpNPAvKP37CM*jz3faZ1hspfwj8AyFCpQ?{zq+UN~+`;i)uRm<`xqm}<@<#u-PtSL@ z*JIUvjVIIh&+)e?1%>?o;qPq3l=GVbD$4&Df4YvM%ro{>z7@k4WgJ8Eln==`MY((rZ3p+59{dKCigKohKim;9zwxA^obly1 zom6DTw<-ls?bjpr=_&oan%8njW_i-}F;qA2W>y+PWuD=tQulc#JLX!H-wglxi0QxX zgQ)-f4GSuDkH2A|O!NHB3T4J`AfPwT@JjT2J4-hbBOeyl@N0a(Q`Yu@e#QQiU%}rQ z@sG^>zF=7QEhzs>{4MbXO3m{e5tW9IXLzY7XL=@AlRC^ZsWBI?OEb@0s+{4`bz{26 z^yL{!Dl*eICr!2b$1{vnYMy5nm1&-54wdQtKR107DW7b%n7-R1{#)=+#D51q81dhO z`OPpD?O(*-V-f!YcrxPu0iKWeTi|lUuY;e7_}k#q5&t9jGZFt2_=SksKJi=#6~n{) zdnsbJ@6Cw+8vJU+OwWrEv;E;Y5-N5Be~!N`5i>srB4&Fs81dJ^xrkXd4@KMpk3`J= zVmxA&?^47pzo#PpCU_}gmXA*v<~P(-3_s=189r2`GlYh`%inkAkxiGre+#^M;FtONPsa zD~79vYliEF8-|;PTZVn6BOZ(|{W z9m{iu^M;FtONPsaD~79vYliEF8-|;PTZUb4IzAc0iD8~sOU2)C-f+=y$#B_l#cPhFgXm z=DBfvGKLew{46gvpEH~{Tr^xVTsB-WTs2%XTsPb>+%()WtOHcd?~Dvsl@r5R!#Trw z!$re9e;KD=*>J^h)o{&l-EhNj({RhMlli^ISJwlS6C=+W4(sim`l9OxYQJdAmkgH; zR}5DT*9_MUHw-row+!oooQ5~-r*`6#aN-4}@~q*U;k@CZ;gaF9;fmp^;hN#P;fCR+ z;g(^Cb+GlKY{qtjADJ*7K{% ziIMC2j>>aJo;R%PKWe^Y{RLY6rtl^wte&Zk8 zFB&cxE*q{Gt{Scxt{ZL`ZW?YGcC!Ae@y!@c3}+4J4Cf6O4VMg;4Oa|T4c83U4L1xo z4Yv&I#z8y&vi_)?7}oV@mFJ8+Z@6fJ^h)o{&l-EhNj z({RhM9!1vpWMuz9IWe3yoHLv^Tr^xVTsB-WTs2%XTsPb>+%()Wtl#-)eDyqlaw6vg zl(U9&hVzDthD(OahAW1vhHHlFh8u>PhFga9h_S{`&*3ZU{)uw->U4R}8O|Fn8ZH?w z8?G3x8m<|x8*UhG8g3bO??}fdV>mIKHJme?H(WGaGF&!XF^Fx)iUGOPy# zG=DQ3rqhVzDthD(OahAW1vhHHlFh8u>PhFgZ+HR$>GVhpXAS2J=M5JPmkgH;R}5DT*9_MUHw-ro zw+#D9WmMiXoMc9v7|t5b8O|Fn8ZH?w8?G3x8m<|x8*UhG8g3cZqi5~>-(uo#SihH2 z^Eo5W8!j3y87>>H7_J(w8Lk^{7;YMF8TN|;QGRE*NDy&iIBPg(IB&RUxMa9&xMH|! zxMsL+xM8?yxMf(6nrVJ!&Zn$B%X}D$B-Qc`x=fB~^aMp0naNcmyaLI7luzvTc;i(#V&2Zgt!*J7Z z%dorA#NV*q>!9Ju8hOre-f+=y$#B_l#crqhVzDthD(OahAW1vhHHlFh8u>PhFgZ+)^vO`h7-eC z!}|TXreEI3i-t>v%Z4k4tA=Za>xLVKn}%D4-8K_{!-?Un;hbUp&Rx@^XyhfsWy2N2 zRl_yIb;AwAO~Wn2?lu#D!-?Un;hf>T;i6&v9$(X^Y~&TgRl_yIb;AwAO~Wn2dcar1 zli8k5kHm1+aL#bvaM5tdaM^IhaMf_laNTgjaMN(hu-lQ2Z^m$9IBPg(IB&RUxMa9& zxMH|!xMsL+xM8?yxMkR14vZvd=l{-hdL)LkhI5AVhV`BXb-!ffWy2N2Rl_yIb;AwA zO~Wn2?)G$iGlmnxdVh$v%Z4k4tA=Za>xLVKn}%D4-JprT;lyy(aL#bvaM5tdaM^IhaMf_laNTgj zaMN(hu-+%5^*wV(Iz1A@S;INQdJm1dS2XgH;j-b1;i}=9;kx05;ilo1VYfFO-;CkJ zaMp0naNcmyaLI7laK&)daLsVtaKmuZaLcgUXX0--F`PA=Gn_YEG+Z)VHe4}WHC!`X zH{3AXG~6<*AMk5=%?zc}BQcycoHLv^Tr^xVTsB-WTs2%XTsPb>+%()W?Cwm*m-l2- zX?i4v_1kEQe4IGnTWYmb=GhrVlL%04Yv%tyI=^4{?mNM zaAG)XIA=I-xM;X!xNNv$xN5j&xNf*%xM{d$*k9-q$NxQmh!ey7EzMY-Gn_YEG+Z)V zHe4}WHC!`XH{3AXG~6<*AMCdCe}6iDiQ%l_oZ-CTqT!O^vf+y1s^Oa9y5WZ5rs0-h zcXv9z8N-QTz28#PD`(_-e}&46MqV;pHe4}WHC!`XH{3AXG~6<*_oiuhGY8V?kr>Vz z&Kb@dE*dTwE*q{Gt{Scxt{ZL`ZW?YGcJEEcH)A+4oHd*?oHtxFTrylXTrpfVTr*rZ z+%ViU+%oKPCjN#K!&$>Q!+FC+!zIII!xh6-!!^To!wtht!!5(^9_LlN{2NXTXASFp zcxpdy&Zn$B%X}D$B9W?Pb zoEXj;&Kb@dE*dTwE*s|WBvGxwAIGCr!!^To!wtht!!5(^e)`t&FJm|{oHd*?oHtxF zTrylXTrpfVTr*rZ+%ViU+%oLmXX0--F`PA=Gn_YEG+Z)VHe4}WHC!`XH{3AXG~6=m z9x(AYoEXj;&Kb@dE*dTwE*q{Gt{Scxt{ZL`ZW_KA@i3y^$A6&K;E%8ToiHkL0e@M; z!x0xDA2D2tcog!o;b$XW1^IJ^UoiZl;g=1+YWQ`->)#Jgpco(ex5e;w!}|>%GW?j~ zX~U-sKV$e=!_OIh!SIWQUpD-z;nxkX{{Ukj#ozFD!}|>%GW?j~X~U-sKV$e=!_OIh z!SIWQUpD-z;nxkXf6&C=@OH!d4IeW6nBi%|Pe*(my#AEo=OXTb`~|}=8h+XEtA<}U zy#7!pzI1?l{vi{8!`lt-H+;zOV}_>?l zK5yc0c)Q{Kh7TEj%<#0~Q-+^0{F#VvMbMr%`~|}=8Ggm^Ylhc+s1skhzuE9jh6fGD z_g&Kdqeeb%c;4{ShCgNaGlrix`~|}=8Ggm^Ylhc6V&ZT3Cc}DvCC5XI??EGf)bP0B zdBaZ|*83^d{m&Tr^M=1*_$9-y7=F$0n!_gkhHo-FX!xMvM-6|u=P!Exvga#3f8F!d zo>zPRd(YQ<{=Vm8&p-5B=y|2*Z+pJg^NpVW(eurozv*f9{A17G_58Pc4B(x{KVpXc)Rz}_w6K1oESYa zJyDpQpLgRY<_e{m*?9*jOwY_bzECQRo)|AIl*UKrCkjWWXHM=cKpj0k?@Eu)7sh92 zN^fLtes*+hqHtnj{^ZQ;jUGETJ8=vy zkBf+I&5tb>rcOXGH|Nu9e$3!vp)@vX^txy*7A8`Tq$s$N((+EI&JlQiYh$(h-4w_;)zj?K<2l%~dgDGXKlcCFbHbMvUYQ*)CO zK`a%Zn|NZ8H5(CPO~B)+>6y`B$rqr~%+!hb+vgrXa{J>)XD3i`9zWuC&dtxleLSB! z?w_+WXa`0$`w_!-OX3zzEX+-e-{}tBd+)A&h435<&uj-(e@I{U>C3LY{%xpmN6^U@ z_OwOeZK$wYb>tc|UXzuRje1VPk(fynD1C_%!M+UKT9Xb1L6*!G67`NyuILU`_z=NwmCFuWk7slZ?e-4Jn?(oxtRQIN1*st>L<6a-M&tz%+9e~DbO+Z??_Cn)tOi+McHyT8 zLn+?pUl3%~Xus?VZwRuAc7->T3Ds%8>7&(!&PJFj=VK3gT(;n3T!r#sRrudt=DwxxM)xPpgu;0!9iWU;Y2)3jU9jg2iHZwEtE6s`0fI{BL#f zpLW&0`u_wX-!1-U#s5S-lBe2N?c=cAE&iF8=_=0Ui9yEyE%;OWs{i~7{-3{0|M|NB z-TY6w_`kl3|KGEM{};vo0SUi8H2lgRUBUls7ysYZ#s7b|g8#`A!G|~U|EE{*Ki9>7 zrn!b+)BhXLgA>c@|FHP4zn>OD)3LAoH+bom{(12~q2o}@|4sO_{y(%r{L2;EaxU@z zcK%WOs`CNZ@8*B$OTlA}C;ewzr}kCVbfKl8Mk|Cwg6 z-+Uz4Wc|Alf7bs$K=@W4`X~0bW&E{OYj+6veQllaV#E4=1W2NoGFh#QNfvr2(s}%; I|Dx^wf2SF_00000 literal 125880 zcmeFa3wWJXl|TF@O-d+WOEFk8R^BwdgwU3@l!D>nDQS9OLK6wA*H)|7tyZr|p7$u~btT$y^U2P`-9}ColEhIl;H=$Ngq@?V*SzvVIM4(Lz>u%9|#L7 znG11N9APA3v{xzh&coB}{oL%J^&h!R(+AMmK47xu!;+rdt9DCPsNIsguNQvXQ7ShB z<-=UUqlShz@F&W>P3ci3+Rv^YGw4i_FcNCy5UNdkv3_sFQ|kSP=p31==_96hhS@{8 zXy+b9IX^u{7}SF*X47d7AJ2eM=cnajWzS`$)vk?>9e> zjpOId-j79ZA~64M-!1WuihmKB^KWfO`gR?UQU7K=Mz(6bWIT=W^Y4nDL)M@9b`6f# zeC0*A2pIgBwkO!XMB;7eA7%}%TP*R0=|V6sX?thD@1h}Hr?Ht!w7}qoQ-u*6SR}L! zbz@4yMwLsiP+C5z*gGAtytqi{mb!hC5bQrqXnFCK9;@11{63E}ic?d7EHfd_~|9b3gvjiKW`I$oDI6l;!`c3>$j7vebN5=1MO?`!6CGl8tE4m zc-*`(vIh|aH(z`W{^29;{Cmgr613d&(g*(}^&YD5g{BYcJTz2MEpg{>gSTn=;H}CZ zn(~aKKLIV%pr@m@N#enEYM-#;LWxKHVg>7c3rocqMSGVb{sihDL;P!?>=Dif3_I$7 z2MP4^fj5(Q;gYFBg99@y{yK}l$>PUZT-Tq_>>M1RRhVDrhu{Eb8OEmw4Gzq(I0gek za6l)V(D};&T`0pPoK=`!E;L-C>tb+#y-R&s@!-G-7Ds~+f&x-LgAjrP+Ro7YAlSc9_+h2BfByoDTl@FVx45-`|0xz%KMVHPSlrsb|743>`}bE{ z+}gi?p2cmv?AM7nthDy;pJVCP{{6EpZtdS+WpQi&{#h2c_V2H>xV3-3Z*gn?{*x>& z1mm1up?=I71^adUHmDk`vy05{bUw(RC*1tdQL{w-@fJ}h{Zx6G^Ml$gIzO;o3#O}mI6fIqFIGi!ir)G1%P>9XOL}6o zKAdMKs(-on2&A!2?A38Q)UM+;u)QPa1(p+NH+asT0p>jh&WrTSpjhk0d6y%l!Ti$M zH7L>cwV}GAuZTY(r^CVvCaIrg^%bcfvV75}-R1?Q>OXBDqGyt}zk!o5LNHnF)&?T= znxy_ny`m?na-3J#e{TOUVDe#$wkyhKzOY5x7y6o?^D@g1eQL?A3k}O9>IEe_ezWZ@ z*74gg+tN$bZ`cmCpUwwa`)zP?I$pEwovh=vA!X^4b$n;rTc+c`4V9DjZdW-szc-j2 z8#XJy;XI{@Q0Y`lXFYtd8ZJ}*h*r-VfEgBn6)w{Lit^7CzV=&`?+c&Sg1sYps)et9 z5#`e!;Ue?@h`jW}BJIy`n)W;Q8Ppt3Q@0_P7*qcU8`Y1(9%@7SKAZP5+V9~s^@p&<=Jg_7_j2P_*F86G2XE2w?c(7y zP0!g;^9KhXmw7Oprt@t2;14Z+n$#mFzf|JsgHKqx4#ZsgGD#micv#X0Z_#-s^xrD{ z5pAZ&@g)7^pIg9}e)7*L;7dRG+!dieB6@tpL;ozY$MB_}{MrJ(^ppP<;fK@C5Spt$ zYXv>G2$A)G^XVaV&w_Koh)xq;{`_9<4ZTDShv2XH=Ot}DO1A4hDt~^??QhkunQm2( zig5mCdt7-}0<%iddUF4lua|NxzuD`-&qDd=x;5Uqm*1{ZuSMFGUyiGPaN8%O-&tR2 zca5&YiC!rl=g6hz7dCFIl+Ss_2Tazkmh@z8Oli#uqh|~4XkRNdxW)Xnra|I|TtA@P zY4fl@FDVlHupSJ_+A~#;?MIWfjTXPe=w_i|?RuddwJjFkDl~nojfdK##93b%*R`z@ zZ?^T*m6shK6)Ba6F+{#%o5V-*+U>?mvbImslQlg;!@b=qAMtk?H#J=ad>J=2I|}$R zZfZIU_&)3s)^rr`W!%)X7w~1=)TD(UR+h{bfItRHhpzSDW6jUHdoRf0^$xWO07Pe3#>CmQxhQ9-TQ7 zoWGYBz4SM-nf_=bo%%R4y7EiCKZ~53557!eB9wZ65`OyNJ!;o!I{#$tI!))P!Gm|I zee?am*ZC_tABrFNIuA$rI)CbX9p(F|N9gOk9_5Q4_&Wbb`Qiuu5>_AUFiX>WC-LuT zx}g|6I6Ogpb)O#9r}fqIOq8$n)pJdh@1wlX*Yi!3ul3b)PL!|pwe!w2YK_+SWQh+R zym5m1VlqL9>eKqpE#PZ?=M?aLa6(@AwM->L$6t?w-1htrrlTHi{E4<6h%L48k> z^k_L+-!~WVwZ1b8_&zwHult0k9<8tKt0I1=^|kZ#G)hJ5s~e}mgV#(@U)x7T^l5!< z9~I$ieRUrdt(TATLSOe)QNGq!_g7KA)>jpW(-@1^SNC6o2Qw4YSNCI4eOh1LpGEmv zU)`rg`98`Eeci`J`C4DwPe+$->udc%LEz|~xQ5K6tCzmX2T&6o0rYO9q_nISYdp!>Sq zJ-zPd!X>(11_yK>7cS9tGB}|7x7hn^tw;W z-P6x8d0qd41G6n&Dl|Bt`>}9IiN$Rnmc6I1G;A~S z{|v5MBI()tsr`#3?(U;<_w=U2$Vdupdat)4e{i~8kB z+AMFV=CJr_^d4orb1MCx7I5#++`U#u%@dN}-LrMn=skhE7cBLTmGR;311CC1Hrsjj z5XxYfNUpp{FQ$iT9+z_2PBtxB@r=gR4ZP${Kb3g$roS3}!RQ~1J|{HTD#dw0LJj2l z$@Ncp(ed7sSLu5O+7tDh#EuCP>bFil*s6Z(@&#Mf&v~y*4+)Yw4zl<1I?TKVbvNA4 zXnoioAM%IL4vv6B*uK&eOFf-Og01TB)Jy*e66){HPlBY5kL-JegpME9lR4dec+jeT zpS`b7YDF5BLI@#9={U*W*C%xxW$VAu;%tKY&Y^e1F6e*S#l6=mFSYr-N*yDZqw{sXT(i9&3#!mz2*Dg`8Bd@3MUc|@ zH%QvNjA}@^=Fc-1Te)sM$S+rig*WqbtCxHKn_sTUZ8`l#sZXh={TWp0_;hwFpY(Z2 z4-z`?ww&-siM#ZpU`|>zsM2vWhuhhFo_~k`9&VxB-?<%u* zUc1XD>9`%Lyu2`)B*v>{#yl+3f?WbbrA4NcvBwFZiqLKif}K={_T0t{HyK5V-FLs_6at zes`=smcaD{t=hrjKt=uZLcQD7w&6C?w{#hxfRBm%~ zJYXBRCfn4^B1^#gTxFE=l!V+3<-UCOgxZeGy! zF_@zhXQ94Isc*J_Q`WzE`eruix)UU|!<`*+cC`7#{32^N^AGpF*V)zOpSe`~C#cf) zWcz8hwae_}?CaL&nN7OUaplZhYV%)}wRg6)H_y&9o7_5}{kLUlo3y`FzB0u8U!wC< zQ1wdvVDpNNi=kRKPni8|Tn=5V<+ypm%@2bgHUC*kFY&x{PPhGg*1k^P*m-(lJ?ll! zf{p8hx_Q&BZ_SflC+k=+d$y!!_dQir5_fhD=FHJ_^gnwhsG6&Bw1*aT^>_Oeu7?bR zw>>WN;g-5{B<{+0?^D=2^YE0)DvkF;$i|kR6KZ)Q=qHA$HXec%8ZR&Ti_O2H($hJtg&?1Xi{j=#_H%)GtQRE{3V~N=M$Jbm|(V!H0}CE1h}6^HRQBzm8Nc zXSBj{Im$5){N3fMA3A#mRkj~QJE=LCZTrDGDF*Sp{opLy55j!Xf9I*3`(CFU-|^70 zK~n9z#lBZ_<+*zU_kGCMAm&3pReYB?ZjexY!B&ZSZeQ;9nr#pjJVhq(OBlb+V|$Q&uY@H}#XwmW+sar@(WFcbp)i}q~T zBvG&YDBIu7(TOaWXXlYTx#puReGU>3f_c<3o<2K|wCMl}Qnt<^yOe8wJo5s#pPisw zoz}gXn=IX(NAk-xxheRJoLpG(~Bka>9>=7M@2w zEBx$vWF96Igo5**wY$7j_s2sQ-y?n#n4N}pyYtCdd%1JU;2oNt-6xp7T)OTn76jjt z@(1m^g9UH+hQ!^zHF&M(k$IeDBmHdl&iYN|agrX)vvYy-8hF3g~OkfA+KjlYh{fBm2 zzh=)P)~|W(ar3odxS#9hC-dIWQh7kAW{HQsq$MtXidBpsA zW|PkIZobQ%OKjiVX5+)^F?g%l?F><${k@EteRiIlt@R&zoApDLwFASHe&zP}gSXlE za{B62UoKtCnc1ZG^)vfy{+g}r9(tSAFYkOZVtNK|vwB*5#P-88n{*!E($_2n7Mxe+ zSwH2U8_izjrP_aP9tcu8eugf#_O{tLa`yVD^4^`BM zv!xviHk~EZ&8x0I|L>hwbpPYZf5r34fwxM1U;TMS$GbZ>4Bn>u(5cqHuXt{`Ui+os z{9t}Ob(K`d3kHm;dw4VZ-MK~QwO3O7P?V%?@8Qucl=(SE#r6Fq`XIFXf8WV_kFTa)+n)vTKm(ggbly8+KFE#92R+Rt)l%XS^P!9}`4Dd|sg!)I58E>)UG(aCo$0jym~^SH z-bXQ=Gs&2AFq%usv-;V@G3ipiuJg>#b!<#J7|kWRKVrI{pAJT|{T>g~WBKV|G}}Ib z=^3UUZ~H*BjWgHRjHZev$@xEB2_}MjAJQ>hT$MUs(JZqfZ;vanMoml*JDl{gF|9zuQrv?{zyW9yk8? zjDE-HzZ=zaUPr~hS^Qf@zhPA8fA@Rg9Ti_O{zFD}{qLywqQu$$eQ2Wlz38cSu8scw zWDn+1es9eFzY-6(z~g-nLjUQg(f55F6~7bx;)JW?t_Sq*EpMozQR|KR zFm%*hD)D5^u55eAAF9|a@ge)po$VY2e`FU^aX+WqLoeFS7p$H1i#6T74-DyzR!`g>6en9huwO_UL zQelL(>Sw_$)&k#U)O=mj?e_@78uhbay6La^lBL5CBCq}z?AHY(tQoO%(_f?h8XRx> zE50o0>FaddKLIu3XB7_$pLSq;X!mxByZ#xxL;J<`Q?#F*KS%q6dOiz1F2Bo{{#cLd zJ;>VQUKw)Zak z+jX4`+jZUv+rMe)I!}e|`gatG*f{U4(`|Ap;OT3qMBu>C(RuJdNt z{u7BO_GtgR`{Dt#lS4Rk-@};ylTUvLYjmFzR(xOb51`-J524Mki9M7;e@F7EKAp#- zeAV|~TCWQII~F;8I^PF-Pyhn?^s7?uLW$F_7>4ZcSxofz6^1IbUtRx?9HD=!p)cCM zY?u3c8qs-x?Q#3K+&q8-7(!U9{T)=3$)seO&Av*n8-(R{Jri(sOTEtNt8R z>HaXRRlg3Z^xPQMsy_y^to$1F%V4^-zefEtnC|8w_0wRwn}^h2gXwObQ@;(e^PKu` zZk|&=4yJ2|hxXl8aG5SNVU7BAFkR0}VU7A(FkSDf!W#9jV7j%x#{6o!`h8fVeiux) z_Sfk64W?WBYjhk3)2;nAI(~xVt^E}`j?(rWC+E=^`i1_w40IFdTS48rNx%Ix;`#I8 ztC>efk#DMAM|kNwZN54PCPH-n6u-6gl>W?pg_{>&Y5tpP>+{GR)C#@mR+$H>1cCdN z=scLVUpRN;`ibm3@hkDObg$-fzc=>;)=?SvwZD|~sJ+gFXlVOC>WLxFc}Ms)h2Jy% ziR-!5i+`7-|0V3FU|)J(Sn(Ut&v*+MgSYBB=GLF_>`jk!_DYz2(|73pH7frg%e8i~ z+!z1y^2?i$7e(Rs&aTBz)Ek|Erh2^2eg+_H*L`91yhT0v-wU#Q>T$nM$9O58^2ts3 ziN1_{pT~Z7-}AZmc0qEv$nia!rpF}SVEfOY^*l{SW7!WuqE6!|lq1UhUVMY=Hyx(l z0I2lCN_}tX?or5JgnZ#j{mv)KuNJne?-eqxuU z67}5Q1!x1`2h06kgmbQ7uH61HJW=z9C#v4?L@nQ~8w0Sn5Bcy!tw)qEb~sV{CCV2& zoT&X5UKK-b^)qSjRm&y*O8({cyYBvq{sogA;k#9Bc)53)=%>G4B}jj8KA`1Gy#>M; zJlH7XE?LnhbnxJ2p<%`Qg?8+IpHPlV{KLJ4&SS|bR|r2jMb91VU!Unyt}(vu*OGhh zk+{46N$&k@Rv*Nn?|PMYf9I+~_jk#P4;%kRqqi9yHhP!QJB5Z7*9y(IXY?M6^J4z^ zef4(L^<;!bKxhN?L7iW?_j}aeN&ySB_blN2-zU$W!_7|*L58Z`_dEQ)f-B;Js_)|` z{FIXlf;+EI)oqnGsBVh))hZD7>pB|H;Ss`qT}J~>FcQ~wG~kPViR(HVY-8z=PuV@X z4qwEz-CT$8&vSpbVQQO*d%;fAlhX2oomNhY(wMJKR2rs~AMAAHX#IknR?id`F3Rt; zcBD8;aL;j~?RTxbCm<%{s$Y^lcitpF1{VLG#F?OBzm`)z`3&!>Co#`YzDDL5uDk4y z1(kQlXMZgNT2Q%D;v5I8*T^D?dQ%&f29tF^>H0IYb_Vu$TtaJKFxmE% zDQ#C^fA=L!X}g2Twy#X79|V(aUzt*W2qtU&!j$%ZFj@UHOsO9Pli`2}VM_hM{oS0< zzH<*IJO9>oF_`T9U;QMQ?8eV@(GyIjb*NAMCXiWzexUskEVA;=k2p?Qv#fnum>c8D z{(euGQvb@;bFQVkdZ@qU>ZyL1tB3kuuAb_b!7{Z+m{NZXCZppJ!jvyG*AMC!uD`;R z`cJMO)L(M_Wd4%v2OWpGe$w%o>j(A!z}{A+2l`tf4jt=;t*Q;MU zKji$#Fwy&s=HlJ>i<3D2+x_BDyPiXb+FQ6i0AKgVL+!e69%^50{Fu=*jV>};ZPYh9 z!{{`lWkzND0sSw$q~$zk^ckavjXrMlQKOF-9W{Et(R+;EX>{1=jY0=c&~+nvZ*U{d z1cn>(tmAK?=3po^uA^exDLB#?y&pij&{9A?6CXfj&^;&(9!;g<^PJ(ioXgiFPbCvE-#ub{+e%} z^z{40K7%KW3Z1IUy4TTe=eE52^r#=m-xCTXTlT$+>lfFr?morcvj+Bit6_!Rr`qpj zg%x(6TB!~Z+TWcHOn+Em_o?=KP+^7IC$Qg(4J-7%lI3#fPF(J{#4hgl!NLl=XU&$Y z_6!1@0Ky7;Uz07@?s2o_>OF2c(D^C%zU5)F|GDzLb#u`lF*JL=8$y7_r( zrBn;+=TQQ(_wCvHe0SeoYU%dAA$y;1?;EoB`RWh3`+R%fkiE}W{|I{3k;58$-;lk} z|Ebs|cb~8QmAlV(?;CVng*EPdgPptV_r!De`R;v#ox5t(A9MHl>X%$U*v}K&Mg2DS zox3PQ-ehp>6gy@%-4^)bx8htP9rXx{_m?4o{`vy1s# z_C1ICW9~hNu2VU?=zV$aJ%^5~oLzK$x%n| z{Asa6{`__%KaS2{X`BD?_tj;6Pw!H@M(^*wlJ~yg18{M7zgqYn;sx=$C*YPcuJ=AK z@xfbE|A>u~!hE_f$h`;oqvWGKxKe}_f0lUq4qbmsy^E#YA{On&h zpJnyyG%kN<*@t|xcAvC6S*!c7-99DoqQ0P@NHq4wiyrwCu>nc9yP@ag|y z?VZ98Z@NP$zel1SlC?KVeDGF1*MzmVOT5GWe~*scw@ExPtB1ws{yz}=e<+d_mr8nA z@oA%F8hjLvUG@#-W$xxc8OW z_q~_T$;ykqJvu*S-}hcISJN@(MBkoy8n=48_r1~gtuo)Op5Q(2YQ4vf%Co)U>IvTS zu2$bv%CHw z-zVSJ&HVKuaeNO_aev{`ual^}|C4&pSRxGfz2N{FE$y7KxPUM1nXyRt;f;E4>GrM6 zr|EWYylcM54IZ@lVurnMQFBQ7)D+Ip_PTuz^_(euZFiLK3t#OO<&zW6P`gI?+79*0 zC|}#H{ux$l2XpN8P#H2;yxaqXg>iR7rOFueS7s<*eh*Y8OrJ5qgqYj1atpX^N~`qEu(ephP1-`3eZ z;LmBD;uXsE_q3{9o=k6FpWoLDg*i>*NKBM_B7UN^m37K$N_O{mwsy_y^LtZas&{9~ z57CB{pYHPewx|4F$ZqZ5w$NXJ7WKG{Z2zjpwS{K*|1jqxzbwkGp0?FI$-e>{ID=auhoM|#XRGkp+aSRWBpb<;Fa)ivyoD0>6w z?+w!ie^y-3uiO#Ywy=!-e*y|dpOny6vw(F+gZ=~NTMi%3BEv1fb%uw5`Rk&z;bA-% z1G8Sw;b}2UJEwq80;Lmlrh_V z^c4%mt{)LE)?JOHOyW`(Ry`;lYW$K%HGFtF=G zAC))F)=h{ZNMI!-hG}++mnLagX6Mfgdou8u($un}HuQydC(5 zhB+pGV)z>1Um3m;_>YF~1b)%*{lNHDKk4U3fTtLK6!=0s=i+%B&k5kiKwrR90el|l z3>cAr7Ljc~6PWS{Wn5$Cf{%alRg(QFUauAm+I=}FiuW2o5z3b1#};7x6Zsv=_xeC7 zKLkqoYn5MC2Ye$i`bXq%1E$>Fpp?506h0z*_cG8&5U0;S2I_=_o{z@@quwC+jb@Jb05jYS$~Ju5@;m}cvpofhe_}A$%zGn#pk3Y>phtl= zg0eo{pq#_s1B$iMI|#~g_Bl}QRlWzxeacIq93vG_!Z#nMgR33(9uf z1IlsvZBY8-uR%FJ%TPGi^Le0b>k3fXBnis(I|ItK`yeRiwueBucK#fc{mBKBYi=c| z4|*18CFo|*S)hHORiM{{&IY|7bPg!rAIt^)Gw3|f8EAAhXf5c;pqoHB*Y5&71@s0` z&Ykyya?XDgl=J5=Ksmqj&x&B-Wn2mV2IKEF9JBaF(2QY_;s(R$l2Qyc@BNT(G9Ld6R`yXO z6dy)Eegu334s9=n=WfJZ+4n~Bd<}S$$vkQ}gSdraw&`*3QMR1lSnrE0k@Y5ySsbL8 z?S2Y;R6*uu%D+9APE_pd;o9>n$ZRp0=YYYFhT^L%=H!boNyd#=2Ani}A}~T(3x2%W zZ~}Oi;jO^cik)4GqGt&tGl&;)yrQ3FJ~C#hW`#eHfv^M&*U$C9(SAT(#a;~&{@a1u zkT1r}8j80YfW36|BkueHQ0hYeXI)&N9nQ2!htX}&J>UdD=VSa+rVD(IJ%$W0bs6F3I^B4k*HM&Ri%0C5ZOJi|S}+$)9P zXMoQ%{yyL}hKGSS8@>m46tW2RKeS4nOX*ul$aG1P{^&Nm9ef12FCaj1C6wOG1>^KG zz+=FG>80)e0DL*q5quH!%C698R_0{zQU7Qt{v#Cp1bpmMuXq88&j4R-5`O^xIB*P; z(My&$1I+)*m~+lBa3%03fJcE(24)?&7hD3&vbYyK7x)2SZ!-3UhDU+l0sIB9d3H*h zd<+ZTZeZ$f0p1Ht8)SfQFgy%AZ1SVPA2)m$_yOQALf&2QjR3~5aDEy5pHdI-zXG#e zJUIRkn01Z;KLgAU8Hv^9XGyIG{umHaae6}$LdNC*#(uD9mf>RH z#fB#V#|&dLR`uxzXgl|ZvdA50)7WD_sOHcZNM@HF;;pEb1V)52h0n61u*N=0?hwP zoOnC%KN;>h!7KWV;Q;uXhI>!&CO-lEeaP>E{I7t251avh9(WD#C@>E`#Jt4df64g= zz!>mM;6DLpfad}~4?GIYUy~!|WycD`G2o4cGr&pUKT{ru7M45jEo(|0Z9md13AI~Tr{`Vvw0F7ZjoDcj# z;0*9m;G2L)ftMSOLBR&#kC0Dg2nYFN5_}%~TLELh47cGS?g5|m9|gYBFwgJ|cj7q= zd=vP4$iQ6sQPblCGkgq>!zfbDsP{qUJ(8+F_8R^&_z3duh5+(%PW(6U-%AF<VpZtZ8IUpR|H%0JzV93b1kmus-P39u-Zvc*29HiLgCw6kg|AoGe zLmr_FekB1cv0)eZA2Obi;vXZi7ktXFZbQKT6p?|Uq|ST5hi^*Q3k=_sa6K@5GaAIU zm{P>H5jzNumfVz#lb%r+^`o?W5a`&px^XIA(E>VwZ>5$+6CC z^bUF+6RS_g{c}f$1BpGs7p9$WQR4EklsG$M}09^49`?%J?4yMkxCjXd{BlWo#GF%549t6G*l>YVuGEf$M(1P^!y@;?KZ1Yzk zk5G0$etaL8Iyqnb9N5{1_kf$hi{W_|d?*naM7_`P2g0Ah=iHOQK67lEx z1A%vc4=R+OESGiWc;8_B=zZTy(Emy@%!YPJ-Hv%vZZFL{S>%=eWm~8}hKKi_#2Gvm zihqmFWPrSksS}}RR5;ilMzFDp@yUn_#Sa1TPtf3BNeo6>0>Cwh;Vr-%M=?BaH9QJT zTYVWc20iuQ?*YyLuLWkm4+A#>%YG90Lg356XD9GHNEsja5|d}STMf4W^IZRLkjVgV z2mjl^qrd_1e~@2_I~CyX5MvL2De!lRG12S+{vI&*-d6#CpM30%v5d%`=Ne$X3#32o z1Liv=;v0c)fIRUq@Xf%oPXoRUn0`45d^hkDz}~U&SzwuG;Cx>PeiG~q@V^58m@+8z zF<^wUuL#M_ha6vDHQoGBk@)YW;KSYI9_|{$n;~<(;TGVVfKgBJ@mqlZ9e5c0JAe_&9up$A zlR4u)@ZW2^2Y}rg`ylX5#vir3a2M|}VDv{x4EQI&nuK-6F!__-to#gcx#3aZd4}2c znBf@kMNS^L*U1CF7nptPBQL+}ab@uv8sZq9n_XVu+g)DZPXe>QSm%dK1DO7k z!Siik_>g=<^kd)$z+-q8Smp!Z=Yi=*RQ573LfPZ^QF4-C_l^wJlu-N}>NW*w7=B_S ze&g|ei7TfydAb{{I1uF`}XP zNr#Z-uwhgsJHD8A0QoQ_Uf}~r-z8lG{!`$^@LUhvWcX%aH>U3brkzHi-_4f~0lPW! zE5P?a2BGXHLga_*Cq|@wUjiVrm>dL9S(hF{UanmX-v)kKq4y*(+97iu-(_tw{?Du| z>iMPO?7N-k5U2fVx4!`Y!t%Tf%y}e+XR@!j1=vFI(^RQ&UZJij@!Q2c@c zu(Ex<#ib(OUz{M)X)6Yo?(7X#@$Vu!IfhpIl=RA85+XmOFHfDNw#632TLw%+Fq~s} z5%5Z2&bcjkEJ)vk_bh2eJKYYcY)zt1rCYDN1D?*P8ua2N26 zhP#1349t4=;Bh{a0lwAv`+ygmEcPtHzQdcSU8h6FwW}8RIO~@>U#~VgF8~ zhx>fj@2)=V_ZK61OHRr9k2lpYbyfh==V|A;z})lrzzi6FvOhcx{NurAIEQ%+uL9=x zz6_1PuQ$92xZH38_(a1hsV??jL`v{PlEn4c4)WnQE{yR{rlp~iens35?0hC;m~D9v za4FU>%J6Jx`@+&2!6!yq(Luv#XVE7N9|inr!?4ec2Z6y8J#HK!yXfSYfy|_@Lx%Vm zRO`EjG07GkHjM5mdfG7jwdmKttTXHR!}+=X<$6gy3{Qb?`aSxF>!Y88&u0KeDLp9iimKJDQ8i^TFR&uYwu-C|bHIl!55>JNXBJ(z1R^F83~zzBkqadsf@i{r@n zOhA}t{q+!JPBu(F>?3Q-Bayt{G904-LN)6N`nd5&ff-yKo{Z#uDuPu?=ONb@><HC=_sxhrjeH=-@|7%cdk*3Z zo_^>d9>&9O8i+^n@I5Z^VLa~rH!E_P$@{=OQ&J`d%1*OJ{)bj!w>wqrks48BWrFae9Cm_!l8!z>}70-7>((0;AgAFz^gum_^>> zQXT=dQCSvgYRDSHx9WIMFFsxgJ&O^EgyKJ0+{y5}wA1orp3ljMADsd{^T9h66y(^Y zmJTzHN$0ugToiT-9rH2-)-qmhd&60p8r zF^qNNR^SUP@7=(i5aJq2za`!W{GiHv!@yqyPJ%xQ{5{|Rm>=W)$gmImOT#hX-y3cL ze#vkKc=9{!egXJ(hDU+lVE8cb9AK8kkEs_Kjsc%-G8y1ihKGS0437eDGJF_#t6_eO z-C@`Vey8CW@b!jUfNut-Su?=58|DYx5132__)CU|fxm8e6!<%a4+H$ABlt^>@EAz{dc0l8?E2IxuA#fh!GfUhEYwHhl47ujm}ZZ(r<{t^j6Vv_Sp> z;34#B26&6f97f)>;ir&yx8a`vzt`|jf&ba?&w%eT{50}@$?!9ay^^mReikzSZunQg zKQjDl;GY@(4e&1vKZp7}Z}-+RpET?P_W;v>V!-b*oB_Vp@F?(yfF&>d{h(nV_+y4+!1o$%0Uj}&0sb2BrLglb z@VA`|#@l~78Q`BA9tM8i@F?))g!c7e;Hkju>*s*qX!r%-8epm0>G*vB!#;4#a13}A z@Vkr9@4)Mw%rbnF>|}sj4YvRXhBLq!!^6PW8Xg6{+3;cDI}P&}5biVV1AozQ4EPbl zEx_M3oB{r!;bGvXf!P27Zg-QQ%dE4+C#7 z%wLRnyI~)A8*ruwHUkbFKFjm20p5W6#LmID2Zl4i9|wM#^#uNc@wu>l1Na*10saB- zdw?^*&p7;6jBAt6#IOjXvBlH_d;&20@i6c#!`^bt&4zv8w-}BA*8?+e3-AVD{PQxv zEhaMx+-3MM@J`?vkl`N@NUj< ztO`B}{06WyvoR+d9tQrU;Zfih4Ic(RHid7Xp`X75Gy|BwryB#F4IBVxfENRE=^h1s zD{x5pxo8V;Kjnen0X&`M&h<(=4PQCeE6NzY3iyEGtATGfd=2s*GQ1D?D~4|be$4PN z@Kc8G1pXcHRFriO@T6_H14kW3fsX~=#X93|=R{!XqiUR04Ew-m0S~gyz^j2@kF(3; zlkg1+6kY|L3>(1TNS!d`MZmN`Go$&^Gzf2CV^wX zJQKJAtHU zZB(WUWlaXZ1^j5cviIf4kiADo0sU6b46tkCFz~yMRmnbsvtu=iidYB?bIF7|Az?vYXtu-g1d5f%I|ZyxF?6Fyp+Sm zy*WJPg)ucc4gRQ*ayBG*!usZYYpQFQS<@B zEs(J=i8P6aV}J&^9Wto<6|Cncftki|pXH4Ke<7mhtA;b+e+QU8IE?2h!*`;(zcUP9 zE&7|`f5~}()PeP79T=J1E{S5>SHq72mvpGiMh*-7FOU6nMRri*|ZjfL*^kKlGin zx{v(*7xsHjjkr~o&ol-&iF^m(e49MJccgwFPaDcgg2wPLIQh;<9gZaLe6=?sf0#$;>2#}%&BnR}Gi0cY@> z4*nX@Q9Lc+V=Rb)&IX?{+{@G%_JP+qJ-`fTyWBZm1pfQMF}^K8kp+`ylwaByY+;&d2*kN$bBC80OUAsE<+yrHEqXm5c~>I&Pg9L zT@L``p9{rb%tfa7;C{;FzK|#PrHI@^zz;wUYpTpi-_|_de}Pi$MNrhoJ8q|Tjt|Oy zSq6&my3st{UybO-m5Fz*$$ceHj`OUADa!#?n%vj&P3{|G<)+YQEQk;N zE7DodYCIQOo`1{fqUad8*mY=!VM<(??;g19Q0X}`_2*MxX<{$$$jq# zawkIWS(D>h6bWPMcRCU#4M^(ierR$Ph{KP#o$~H59?4yXxvqRNf_eSvj5&U2T zk3{g7BlzJ6{(1y|D}ql#-6mVzSPus7?bsg-?7ve4d7cl7X;%XIXBhTD>kX4X2RhMM zY43-uEc(KS4U^m)!T%h=ABo`M2)@-Y$HMJ~IX1ry%synfj{(O(S@-_{ZUv>kd>8mC zQ2NC8fo}n2o*w{z4wQZSpTLiTvi}yK{)kHbKL)_S`kxLy!cqLe-)lh-9Ewx zKEmG|!Tj#e$t?E?RZn|F=6dXD9KS!pzchlcieTQ&Ir#$-{wE^%-U#OJeLDGvBK*f9 z_@@#4=Ljy^mCHLdf@ef1Zv=lZg1vNCpBIlW->_l%Me&Vm zF0PL^UDQ|~k9&QCJt=QTVplxX)z_O&g*d(RrjmVe{sqp^rQrenryne%L{vx@g0ez*BbT-Wjx&@Q4q#@x_kR1E&zWyv@0aaDN84J^u(n)f|A{> zDKEV(p6+T*?eh8;FGKbDv%f1$w{@jj{ct-q_Mb`qnK^cHd!iSw?3a*@pILDlK$^27 zg@z`rZ5Bbp^lzhv-qyiBSRw37pl&<5ySn3j=^d$fU+AT}TE{ZBbtc-t?(OaBM#GXl zUQb6~oPS#tM(hgXFk^SGnGSy})$3ojly$<-Y@Is0@wZO-BbOC5=}e#o>;bB`2>+Oqz1j6iOzoZZI7u+w6*o7+91$sy{k=j6=j`We@8G) zlkIO43B9(??yZT=IQ~3qS6`|Z#!PpD#!;-ur?#bc#XD17Ub-iaKOdWL=~5*~z)c#_ zdmCwG7pd=Rb#So*OXxZ|NsCa@;9>`sc=RS!NJJUZ#qx8BN1tka@{i+T18#j9OO4cpk9AB|!Lw#M-nuhhRIi|uIuQ6m@URPJ&*yL@f ze;dR-=3KdEZBzY*_;Sg;k!4z+WD&BhT(dsDa_#a}-o}dzY}nYOoW_l-6)dkiUqSPR zRSN3s>J`*&+Ms~hvvO3LjZj}UVzq{Uc>FuQsd#E9=74x_Vj$kJtv8j5haFqt^nD&E z{w!~=*V&!KUjl~7@Rx%_khpfOWybU|2v13+cJ%bAUwA!mNMx2V0WXtG8_z#4zP_O; zzG6c|V`Ke__(kQ>o+#9t6w1zCdN0^FW=a(UW7L`*4M3Bxn|td_{N61^G!)K zU*q{r@ur4`_{Me1*RCzl+EmvVuS0b*Ed-ovknFM8k$zuZ&=;1 zQQI1==9&oJ=LbR3TwQqn1^W*1UdA-E#CX`h4w@_`2q`O%gAV+q`nax_D#5 z+BJ0-6{JD``i9004NVONNoJc3br+u3ymDnhvSf;{t1mPvt-Wr=>FD>B4bAHd``FT( z*Vk`GPdAOz&nB^Q`I@!O8|n*+wwj#>=U&@@0R)GN8kxB{vNn#Ja&i5JhD~eMuV~mb zZgRug74a3zo0gBCcwzm9BjsJ$u&#c?s=^AhMjKYvopIWd)8dWGH;rF0mb|h4g68`5 zb>nMTv;M;6YuCJj5N5G)h6gK<&3@7PaT`~+wqfJ=1+RpcH8j?bpN-W(RB8Dc`4uWX3JD9!cp#tm?pam#6d%(x>F?%mMbw7y{je%2S7{k+DN@w!z= zY>eB)pYKa+8ZTV7V$H_o=dB&bP1o12S+zP|hpDTnp1xX8Hx*#ggt?_@E1Soev^4XI z`sL$|po!{x0V-QJzAfq+o8uD=nawK-XR)R_xQs4``BrIOzk2!l6=)RtWZbEPQ~bE| zEfQ93#57%BIBZ!+b6rE@Me&u(F_#q>b4C4y@$sfe^3eq=3MV(3HNF9jDA2VY1GRAC zg7$b5oUE=uzG*q86j{~BO={XaLE@Uabra3_HkijZpsWYWSFG4jzhOhehC;`c^a*!pcR2SX5oZio&@fPo|-f+q?0si~4GP)^ChY zyqepzgw0uDQP>9-*l%O=c^fa<7+<%1-C7(6Mkk4|#r8raJ8F?N91|(lGAk&aYuI`6?WkFNTWE|D&zbBnywYt<(nCVLZo$?aoLgkJAfm5;}PdJ?_dmgBhH-OH5Tbldhm zg3i=7sD}PHn}R|JrJS%s&Rr?C&RL=lC&(Q*iDNsyLtq>&>d>>T7s}C;KXWHio=o~e zoSBGe?c4d*ztMI8+StO)H8kJBr8mUqRwxQDZQcW)|=Z)$i| z&;xVA+0eSKZRy?}tajMHV+XpYySqEy-H{lCE+bdK{>Jkf`n z0-SF{^ivYmgQjF>A`H{p260~5k%U?*=Apef-E?}J zr?pKbcO>vGOIwt?aM4aOc2}xzpu4w2TqxCROpY;=D#(_XAtEYGi^m7DErgrlbnh(B z;kdIi(G^F%6^M})_9fHk=&dQ-?{p=4(!6pC;Tq@(wojU2{{c*EoWroUyBp3jrmHNP z?oqWii^vfbXKVPX+*L%Ox-FAJFVWY%Bb`L5-Dq$st6e0MnN6c%ce2B;2|N6Ad^rEY zu!GaxQjth?_3t1vdeHUtybEca-!yub$aAW-<6jOn@DvdWA)a25yR@9}EDNc`B##W4n&h9p*ZR_qGNc3XXkSUdV^#AU`^;v3%6x12Ve3>N7 z_O_IU6@g5SiEeam61Q&DCwEIkJ>8vYTy9YWlSeX<+%6UGhTdFhVOOi^86aP3$a{#m zM6;FZ3j@1@VG@QFb;N;ZJJOsT2k)eu3 z3!^ia6DxtVhL^FX%&}ydf@E)ZKdQ-@h56b-Tu$;z%P=#_VuZLfQzsw9`gZAzpm|{4 z#MVwsF-Yl1^>)R@a}dGY1&byr&$Mi`SE5|v=ps_KYwh+l79o~k^>7pCm^!7fr-2N& z6{%jCuo0tVT>cX(EF6d@QDr(&)(zotZP(vHIo82@U8?xh7-5-G+%| zYkxRM2kNBKYy_ya4Koufl!BBS16wgn;yv5NMchE1N%CPVv88|bz_Gj#q=ioJzOqxzYCWX z0rbEU+s~jexk)V<-|ZPjE z2o=`VciB>8Wo6uO!A>80+7vcq!tNWu(xi3N@qvv~e`jYnrcOBqHKn_-7Rm+_+Xymf zs{ykt^fB6nO}QkwNirE9fbU_ug$<-^nbDO4aZKkbDkDoK3RYJp42)=u!-2pH@t@F< z!xV#_Oqw3A)pIOFBx9mCxjoB9rXVYV?IADRc~Wu9r#FyD_nAv^dj|PDj^sdN&M z#UU_Wqv92(+K=P&A-$qJFg_sU!( z<9vIGX6;D#;Shj5SR^j#_R23JF@_^DC?CgOF$Win?ylBQm5rAmIk^0cn`69$yqfp- zdeqBi@D!~;WOTO|r~Am;=|K-$1ROVUf{Ui>7Ib@eXRGZ-1*(JcC-zL<)^5D5j-SV$ zlC=+KKRcC~^Yx|zxUVj}uw2UP?%~l5qY#BAIXUR;0lWpc>a>zp{kB=!+-zT4?lx1wQa*u-|K*Df#^@}AGky4 z?zcUHV%96NOfRZ+M<5i?zWj>Z_;l_%$E@K`CC&=T2fgg^ei;LjI+f`gy#?Jg4QZ?)v-EoguF6 z7S&3nm!2j?<(m*+Tx1OhObgnP6fxW>z?i&$TW~Jj&OgN$oI986z1$MjUD#mJp1a6n z0cf&n_O`=4G5ceQ>hAANrT}!#f{CT7W7H*k24RB*=kjQV;^+HNvQRb5_v@Qh$Ct0I zUvK6dHzU6iiDc9=$7F?}q+AvQ?b&M@bOf!|)z`f}tkzcn4b&qMkT&Plet{YH*)xAL4nVHsNrgYCz zED({ZG5U(R*-1ytEv@p>+i_Qe8$DiP^>Nw9nT;C?ceCjA^>yOn8fQjMKK(netK;ew z!kllxFjOyjg(lEt^%hy10@Kb-_bilMcBEO#V8c|m-ZW3BTN^lmZdt#hx`m;-&+w>| zp+@VsykpUq$@?ZGuNB)oq7av-kIyyQSHZjX7|AS>R} z*Xz+lvWFZqtD&@oT9i!;vg0feVKqzu(~6#?s^=oJEm@Z4=4tL-p+j`3@tMM@#Zsgq zxs;q8T<0LSgDS?Y7S^D24^s3t3A-8`k5LyKsq`)nC$j3fTxw+*wL0aGrZCxuT3b&s z66q>NUrS%nriJdKB`g|S2x?>6y`6n{J0xx3oi@%6JX}*%q3xMvVbu6$zudAU5@UKW zzrp>jx%L7_nv2+fzqO)1*I1T-wyNq0T7|?TH;MvBY!kn>TbA3uXf)C?WlXS%$$49D z)Y1HfJ?Vy3tbTk^rljHxKiRLrmSHc*P7^J#k}(B@4`Ddx%@(x8= zcnYt?K;YYn)}7ck6ZIxU1@4I?HQkD{CC+!ezewSj(uD!fB9QK~bz^nyg&d|4Ty5hL z8pnNJ^5RvWU7G8si<$-cH5EIb6khY-;yaG*J|N1_W+sNi8)kA~8-9MmjW z8jo=)?cBalo6D=)+z1yj8YW+iJR9M8Z+DqxY!^p2sgn9X(!nTWZ2!tu)%4@V4XlW6 z6U*}}*8#jdl+?T*$XZZUXT80q&y5KI5Tp<4dqr)h?sz z!Cu9_pTPwUUq|N{dh1KxRyP;uM8!{eI79G40*TS+L?8U17kKf48?7Y2VZc?=|5M!C zK*xEU_q_uWPym~>mlUNy6eYcsC<&xUYC(V`C{glCqGVX2sQB-Q{Z zL#di>V%9+tpLW|Y3ER5uI#p8iM#OE_lv1vzf)~T8>Iljr>Q#s)|QG2(p zoHDt}&Hevpp6A`!2MdW#drs#NeBa+Z^Zh;Z&dhS~=0ka`h}8!83SaJb*y@cV1@nIU zOZ2ANCyH3f!oK=b^K%Y66i$pmV+e_a<>BemC*2~ttYgQ}Ib!7!n>gmM6MX`QHzww% zVIKY8nHlabfzBitdrGjXhR+RHs>L$6h^NO-&0*yVHu<7ybrM6;=O7?MPPoZlr?3U zxN>z0v&*n^RBELZFzhJBnTWBEK7l2tPFCIS1@4(*Lot0U!M7EB3&3MxHIG8wNOQnf zhMnw;@lgV67<_B18@k!;!DNt~#Q>{5`^X~ZdlR##c!FqPdva%@yW+N|eF=)sULWNb zW~V2T9ZYz%Xaj+^53nuUj(B4^3=+RHF7%2mC84F8#TpdG#NP)vj3yrcx;9kpzo<79XLDW`VK9#Q(;&%J^AszF4 zPg6ifFP;W6z4`|$yT|SyoGK5m;4Igav zmg<})7K;EfZmfteiGHvjeD-;X`jmRlTDD?!xh;9T2@b(fd{6o5a5O9nDd>$L1z1xN zgH{NGICQWtF>3pUx3{Fp!Gp=ro>m6?9&2*$xE$fZ1fqCmA%y!bsf*z>n#{P7@AMh1 zTUdA_Z1*uk9S%PF%j)x?A1F)vhT+KGuqK6wiS*v)GI!lY8$?p7w_f6U_c}(4Y80KXy~jFYNYF zUO6)fjbW?!OrGX`pfMi$<0$DQl8~WpeW*w!lXKG0vz8)^m{!V)e^~Fp#0QS}cMLXC z1FZ$6HUmC*>1v<4h9H=up>Yz*@CQA;UlGBRHQ(rN_davm=7dbcnD-3xR$4&N2DGIn zrwh47_Y?q{YPdg_^k6Z-@^;1g`V zNcJRQkUo2yE}U4HO2)A-8+qn8i=0M!JGxHAR*h*GzSuEtzAK3$goe#@FYp-7hWw-( zj3_o3>dcC*nO{Kmfe6LDFil-prC>W6pd+knEMiE8;AV(pOQcJWFu;PivgnvZ2@UF9%%QQ5h2xUG z_v7?1xpxPBNw82e_hPc+ULOjEq^-N<=a-iM=|>n@HRTt8wqNvpa+ zcbK(I)fH<78Ybi<^AqFaPxzR{O9OO3l{(Tk+VSKwAJ*0- zH02#^CL15oQI% zTIsNOffgs~fm-#5$!o25@{z57M+dCsbdW;}9$KDRvpi;xVn&mEeHVki3h)4a7(06# zzFE2^J0VQsk#C6bx&NAM$^m>0BWE)1Yfr&W6`4hd zorPT`V@9(#n#L&U*>QdZ9o(59 zw91N{)-_61u_9qxbhH}xE=w$GiBG?DhedpU1;H50MaMd6t|~x-hDbzpl1?$CTaA0G zg>}V3Wk(O-jU?P^X)dEKEUIwoiBa0h-is5nkL3MiFn{wQ!$)gWbu<@#WGlhmogBiNI<~@w z1iIY>3=^jpPh-RHBtH1#lo`LJO$?ZNxHB0vwn9u~V5o~C3?7vVF{kA4f6u5;-RtNEVzr-szUAbyv4-tHjnFp(J+9M|JfyYOMB!zj~aaO#b=4Q5zC zr&BX+(}ULfg|89msz9`OA-NeE(0B=;W3_07#E_DtaKm^ z!n^F8Fl!QivF%-qhb9Gyw1Zzu(uth*CCql|i5MKrx%_D>R#=SAMS>L@HP48zGAh8S zwAPYA?3k<>wAC2D=?xumyllyS*_4DfJDhvL3@U!ul*;d-VYdo)o7|J^2y^D0$&S`! zK*s$cQar7R!@>*6j=3{4e#LMVap%FkID+!*$3YTOCLo=Q6G-)TJs9o;;@E2CH9(zh zw5GJH&1so2^byksxtNLL1WsWcb4QgftaP3jThp|~VEefEO`LhGio> zv}hDP%GGP{Z5kYr3Qv*cWEA5eIPkS?)oG>wD@}9^3jHy` zF*a9ArC^I&4~}5>#5G1DH7=fz{a2VyzGxbR?_BrXv$AcaiB<~Hu}8RVqth^69b3gp z93Jj+c(?G-iPgjOcazDPd5i(1PF!m_dbcpI;uco$+X{gnLVN$h@+0JJS&QP@DOz-0 ziadJR)r!1`Iuv=4TuYG`qt{yG+kzuG1if@|HN-CUf29e>tm>rNZ0=zt)@GCox1CaH zMYgsDj=9%r9z?{ny0&a>b4#)ndLg2cttqG3+VZATfTL_>oH{jQ)bEg?)V`M7q`7O& z%2p6eT1PE;8cH#O7EiOmNc_15%|Vo!#F2i)0N>rGG2+kOkHhqo=z8kX!?gc)_qNe^xK$^z3dAxqnFe*EQEdHjNb|og-x4&Kkn0EUmW1n zdT~%fGhN(+w>YpgL*;|DsyA$U-qRcDYSMiM$Q+LM(hDQ4x2?=+OA8+@b;R0XP}_T6 z?~30hg9Yss;OT+n^(z7nv-xYk(!lUs-F z!A_@GhYXpU{M-1B%Nti$4h@5e@ngp(u@lRiSitQydLF>h8>Yd+6elPr8A85dDMr1E zT5V$phZ)}65WYRrQ1-qN@0W3Hbtm361gm(P`gNd_UpQ|OLNXXmgP5S;%!P=<#(pEl z2@Mg4<8wxg!y6QL-X&+I>1;eZ8`O{2JaYM-JNjDZ5T%g$_SnaqIl8sAsLYThP;EyL z{dc@aKKk^d{(ijhDVU#!aUxQtv;1oIKqq6ZIW^C3(vm-K*wHK{;vNG`!>t2}ot2|k3Ac>P?f5xMrD#HZS>Juv?Jb=W|#V*sbi=NI^O99KUf2mRSM ze4c$U8TS9bYhbl#wZ5B#VeRB+g9OkEwpPa!p-+W?=Ebsi019PXan>O45)plP8ou8jyn1YY!^w8#FEfXhm zu>TX6aNy`2j_bgt36w+heBku{uZ0<21?aODr%G@!0j`akn&W-&h%WA~XhHh!??B>} zO=Gx?Fr3bWt>Z|JS=<-pvlcyeOM~|-h0C`vnDA+gLmQJPS_Y2IOfw;l^WMSJxQpNf zE(chc!Yzz5xHkfY0P#41?B`iSf2YgwIj_%mO}%m4(Z#ce`16|?{fh>v4S0b=y9Vwh zRBKWa5YA)BgfwnUq$%7-=nENY3$MJvQa=vf5I*S(2Jaap`Rk4(zsTVhLR{aDt?#%n z>lANB&?hq)?-0E(J|}OXAj2^3bHR~VUT+l2u;h|2&$JlJd}z<`Fo?I#BbF@kI3#ph zYShU|q#f@PvuhW`ZPc z%d}|t(wXX_K5(}=Z}7*;xKC#`6o`u4^Mqrpr)L*@ZN#?$Tm)A9Jfm!~6p=_gMS!DkY7j6Z>z2S?@VA2W1u>g;Y;linJVSc?x-h3!-K*c$wp^ zRQ_EX1#OB$6M=*ANI{jcoUz?)6)xWnwh;cmfct%rSd(*m9Qz=D;Rb#>V(i%S+SWt& z*c9%@aQB`_0(cA$eHlePhhpIDfarOG6vx>!N2Ip77U>av0$?SWf&4 z(xcdBN4jw@L?bol)0mao>ma#MUtS3L&KTgw9gxwyoG6`E`qywd+8JD#g_|LQ4p+v) ztB*D<@Twj3V>ELr7-a_W-p4Scz*Q3%31JH@MnpWA3-`h;Nidzh@TLhl5<@T0$MeoQ znDEzJ;Zwf9lme%A!!O0m%(|)R5Dv=5j$?zrdwv`TTaHUey%p~&1In|boNqWJ9=7cA zzFLQ-H4Mbkc8etu4$OXX+MRJ@kfN0N+c$Z1XS%pA$W+rC#7(xiZcbi#U8}s#46zeb z@>frZgja9KD{jWbjgJEU`=3Gxm>QcrhBOx7rHJynfV0e45KugYBUaPJaa^V1Yo#|d z1~a&6%F!q+a%_%^fxQSnU4tJn4W{_UWM|34PkuNwPu}HIxSiOW2vMIIe;#h(Lznje zEebA7Msti3B#ryr{K-#*%h3GWgukpYG#wm%z^Pe`{Kj$l)r5Nyd%e&t(}pxI9Pzhl z_)D5~fD<)ep%wC0*+;hQ`5u!aERG%gZE0HHK5=F`l=Bls{zMNe8m^#1b2Edc)aQHX zlqS*Gvz)N8Wos~xBWu{s!VX2>Q9jV$a5C#nhFLoMcWTk8(AjWY#xo&Kj-GBw8k87`6Ms3r4)S!5Qg?I>#0fqb>h}UXgboJCtj9 zj53cSQhxtdm|b9e#hC{BO9`EFsH5{3Gsqjent9x5Cv8Z0;QHne2=_^j!f_QeI}qeM zpQxpLAoM}#9ggg#cXdlG%FceTaHP9BmDcL$?)UNK+ghHDuY`&sZ%YjWPcopaZN zzS1Wz$-;iioL}N>J8Vv~;S49%?%}Jh4{?o%YNg9!8Xr;@qOEG*`{ugUPU*IeZif)G zZ!hz^!02^y4ckG(EFPn(^h7r;qLIduT6&(F@ozuNjXgwR$fGZ~|EzWD#?ANxB#!%) z&=7`gOPqyA8=!bomV<5nB9xx7>#*_{Np-nTSFP~enY5n@+{w*NK47?mi0AWUB@YXH zdTa%@qTTW!?rGPfZ)(A3c-Z-aG6t8pWiFJF6(0xmi;7;rh3mxp3={Dp<>RZ_jb`!FOwCNkO83r!K_B!f3lFJz#X`TAw-EqzRkkQ z1X(rgTy6`uPo;}L0d+r8N1NgE$=}3g`cX+@A)ay5FwQdAtFJ`o8p6^l6YVbgo0+1h z;R7beIqIS>8nUw&OK`*f3@jNdhmyL!1kd1ZvE#FCsT?0BW!N=qeHj+J9TP7k0TWN6 z_udyrb#$kd2@IP1wks-$-|CiLjfWJC_q@ptG~UIfIT9D*(5{4Z zcXXMGC0*+Rx}sL5hx2RrAZ}ZNV2>XIkl4DnBCdRJ1`>ajm1J6Mg=x&OUR!-J4U)&W0jrUAF8f(7|2z>+H@|vx21T&R|#+&EfFcg8{g_>_&i{s5kMU&=i z(9lo4N6LAhxRfeB@ykVeLy?xTPCBLYOlgY4Hj1%OlIO7fp-oP)V0zU9IQS7s|4WnMw=kBctBS z1wX=(bKI%DPMW(1LwXytyZ5-=&tp!p8=HFZ|J#l#`j&01*TGQXK62S?Bzr-8&oM2!yhj}6!TTHqz*YuYQ zV;ygd*UI=QbL(n*w5?cFz=Fp-XKjATids7Jr7T>Xf$yNaLW2SI34;QJ??@mzh!qOl zEQX~VEc;T;vP z9|vzg+r=Pw_kIf8cfQLNg#Rsg-DkU8QTUtS^_4DH7Je=PFLyco^Y(uYd}p=G)r1jj zuPzCHC&BNKBi-r|ZvbBr-nbRUNU|M!8hcbs|nLS&0iA!A;>r9ooj%92Jfn_ z2>)y7JziPknh`$;z8di(V259{r~S3J!~f5&afz_HHy}*+dSL#j@Y6Jpf>#&*8S*P@ zTo%8HPWQeJ?tgiW%SC)6cvSc=An&WLaRo5rt7=N*_uS#!6OA>l81aX|ixD3JFA4u7 z%=dk3javrC?o}i6eK21a{sPUvxyChw|0@}Z*EI56Pl%7o%fgG>VE!%G7k-0`#Liw9 z%voMdy;fAg__LYh6xQ^Ji4}&mn(&c&*Dv{2=&f z#E*cdjQwIH?}2meHuZ0g}*^QxYjKj`&Hrp2lAfWT30jX>%#9JaBk!1TGuf0 zD@J}*SpDyPUx@#mw0~%=>lapeV&qxj55fGF!)x6LnCW>Oyytc2Mujzfi(+2Wb5ZzT z!u*rVYh5Yghrmk_KL$Q8{MRtw^YU6(6;}5yMCN;7{<5$S-)l$@;m5ZleXGcSVV__B)H5r1JH-V*AA@I#a@puB+N`sLPz_^5xq!V@t6wCi@6h#v#@NBjhMK=^Ag z-}4UgS6JQ4Mdo{8z98(wzpmR&3IF9iC?DP3t{Cz4;6>rBJ0b7wc4c98|Ge-hoVXty%S-U{|&D@mlOU7%=gt%AA~;+zVQl%0Kyu+0+{}tfqdJxZdVe%2<{s| z`he-5+OG(!{bg`$|B|rUr(OR2$u6{i-CeFBtoED28+N1q_jb9f!n?q?tm|?E7&w!5DZR(V-i!@Dee4(`8yxXaar{}|ji z(&es1+z;kpjN$LO7xIIM518roIC$sIZkHAQDeyg6q`&b00p32`?Jh;;cffpcL$I&r zYr<+iyRqH<5n;*);a)Ccx<4xXpCR9y>vrdbcV?Z-9_)4(gfS;|*%5@t*k8mrEsjq~ z__Hv-=}@;@7XEYa`q6Gzi+BsTE}URYzW#8xYl4|xs;&x8a?Ji(mmj;Mi}mW~p!ciz zb1$v&<67k&a0c4s@8EA!X^ra_rg_Q}BhL%pvj^kQ&tTkT%yW)_p^8^@ck>Y3`KfynB>yfV?R3Y4E>_$B;ecEVwt~K5$0($Kd}BHN;og z=Qrj%Bf`G`Is3zcurGgaAU}nD`D>s&3cmvL>>n>hya|jh+lL1s^Qs~2%lBJ|kFYPV zZzDd!zC2%rmF{-?7;<>`eECIxm>5Qv>*c<@qVS_j@ciEpzK!Vr&;+OQ4Up%BeR)NF zEC@?*-Bcvs!1^le#|Nm-W#JpJUebrkvuwBu9)|2N{yvbPU5a=k__DB%Kf=?9 z{}T4s7ZG3K;Rix}ucEW1e386__N+Isy4Ty+&Kcp?V1C0Q;%9h7xC?7#8(!*ii^7kC zhwI2M;lB^wi2BO;DE+?({$KD4{@xel ze-HAl&mnzwfdp?8lh45MMjxxp*k$flQ{T~eB`ILlj zK=?Oce$zR)Cw%?G$nW!L--Pc0Z>Ye%h;IcKg{LXM(B+m4b3D)Z{|x1?Aw7jJfp5En z{1?9d5y)Tfa!uj=cqJGWrmYs_=L z!SvGjm4rXf@F!RgH1hMtejf7$+E+Ct{B79Zba0I;3;X=V;DqxJn)msK!AaFH=OdIK zcog~Z9M%Jc7r?h-@Ny;ME#Ri`FF>APFw>9u1nqx^_7^Y@5MKWn+UG@#9}MS&hbVsu z>!ZRy1780v^rymK1aHQ?q1YeX|0D2iWy}w54Sd~)Vg4M}E5U5f2Em)qJ{=W)9Nbq# ze|B4t{{!*@I0>xjS2cV=_;+D`GwRc2;TuO0Kh(E|@P6=i)Td^|cZ07+d^@;zYw%Ca zXM|7CJ=DLf@GpY5yn_B&_$%O>mQnt}oDaVK5$D_j)?^C6L2}=JHX2k z(|k?%QP^L$(4S*RRRaue04f_XQ!F)pa*(cyW#v4bC{etju$gi(qyd3d*@Fn5%kl%3`{Yu2w zgBua=1G_sz`2G~;*MAG+A>kViBR`tx|0CWFE=If`T#k4Yd_LljfiFZn2CfOe0QYZx z8{vuMH$Z+x_;ZjCUB!6)t`I)*L2xGGN5R>Me*`=t-1Q{t%R5NFNPZLKQ^NN{elyye z#fWbJmxPZ)e(yH4M-kryE(?E-?(baVD#BkTqrF*<_$F{w_&bmf9YXsX@h#w*@ZeL( zufuEHW#JzL5BH z$B=J9dsvG29`I7c!{FtJ9|l(=ega&N_*w8};r$;&`msHYc7*VaL7qkX$#r$M zN8|(Gfry_0FGlZC&xPtHr{|flFx8UE-5I*uv z;GFOuf^TUeKZSjLyNdcEyx|D!pGAKwd_Vax<|z^PfJ?$-kgrD>IB&Qj{37Hxp#NTu zcmud9{7aDEi~jsl#QVUPg}wg^Xdff)0bdb*3-;G9VoyNC-w)303h`HYzu^Jlbw2?A zs~EpUybqigR{xJiO#chQ>V7d|`o9!0!&f$35!U=%7Jdfdx&AV~Z%4cy>~@FvdFzhto?a^#O%)#VeQYe!rGsYM9ls?C#?PXXvFN#^TOJnPYG*( zUW}Oic{yVC=M`b?&zljmKkprGFMk%+PDVhCwf5hz1MMjp#O%)<*3lVW?a%WOvp+8gYkyvenEm+$VeQXr5wkyUM9luYDXjhZz)0|) z%>H~tSo`y0#O%*Y!kYfvyT`dy z^MQ!jpJ#=&KOZ%m7uNp#XvFN#3⪻mm+3=UKaNLFJQb7G5gbsu=c0bh}oZBikSUr z-Ec!#`*Vl20H!CI{dq=M{qK*M{wKofe>P(JKN>Op&l@g)IiJ1vSbN;R<|1w_UY5W_j6&NK5ydtxv)>4x4K>EHUJ z_!~z5DROcj_)^3hz?UQL2R9<#1ilh6?Kg$*fqR?qel_9@-W}HE=|AQD;7r7~f%_xA z8Jt8+`vbyxx<88g7;y$X5;6B*jz)YNI3MxN;G+@K{*>^GbpHs}FCxx>`5l7cr<}Yb z{IfLw9LBE^XTawTFAM)F%}?NaLc|&H1;dwwze@8nsLv5+z?ThQ5&kyKFCaf6&Va8P z?!|Wu#;5z^F#i(b6mbUJZ+JlX4w^rU^p7|L9x*&B{4mXb2II|$GvFys&cOWU z!^j`Q{ldQn`NJrW1Hu}=R^P-G2g#CVAl-JzD!9TyBw}AQ*@r~dmVRgSO z-1kHHv~vmL1H=5T!Sq)1b>W9#p5Ol)hMUIxRbiiAD8Ctecc6R9{lY&1_j*uX2ZYaq zA3^yV5%&9Wmr&n@ov1=&z7OV0!e6)p?O_@9S=jFfK8O4?@}`mJ4u$af{lX}}WnsTx z__K&FnDy;D2;atH7dnn$-iPm1gkSg$G5Pl=iMe;t#mxTTP-`Bv81^4{^-x~PC z!N0!;_dZZU{fT%TxDIA|s=6#(hxz0R+6%+ID6_G7wwY;uyoK-^9ucl#U9#sLlqcaE zei-Go8{Z!IW#J82huGMU^Ft74NSnV%GydLgx-Gc7lIOE)1Ii&C5zz>5rjbgo5 z*ze~%jPeul7I0DcpThk5JmyQnem~_fzK3F-AD7>fu-{L46zlWCem~`NNWX~t!42Wx zhx>g6tUn2Be)m2V-1q%g5#wnv>-(uwFfoAj1K}F@hMo8x7jYjrFZ^F1?;pYUP~ml_ zQGX8NJcICKG5!_)Jlx}Y?~?Ez zgZnB-PhlUvS5W_i*UTb3=nt#H2gw)E{~CEe=E)4N%BO^1p!wHOe}ylRYbZY-3+A=G z>=)i##C{CS4-?@Bz`I_@da|$&|67<}3j6T0{t3Sb^F7^24`CmlcQF4p@Y+p)APc8pFsH5p*#ui1n z`|vHp7Um_C`~BOm;Cq)aza#ZjQN)CQ6YkxF{$M2H_28WF{|ot+8v4VCH-ifiQ$7_j z<%`0*=V1RL`nQNTgU?4yc_m`XtHNWjfBPkjXCqz@ZVGFBGMMKvzDqEFvqOGIydFFt z{3V75{Y5U~&EV09DbGhtc|rKU(LVZ*qVSG+q+bK$fr!_GmxK$DZ+R2(i+D45IbzDI z5mR0h{#n|;g87!PQ-ygT<9m_v9NOE6*Ms|oaSx8W^AOS_Vtzjx5YAGL_HsnH0KPdv zdPKY)oEQG5kZ;?D`VsNX;DYd%DIY-n5Z2%>3j6Xp(dEw?t`Q;kjXpktayf%|K#%PA z7Z=g4KtDF;_aUEcnd7=n&r+8^n;4t(?x9^$_q@4Emp`+p=D5G2`vCXzANsd-ms;k?Jz$=3jLqe#kNLm4=ly+ijX(PsoBKyJch#74I5&ZC@J!@xJW^k)_+{{i7e$UhB!HVN`eV*ZNo6$F6x|M}J+Zwmi^!nw81 z{WU~S-5%sihA#+fcwBcdubdVBirAk5)4#2E1ow)<>L1U{bs>z!Z9(1;eqA_wU62pl z8RR8!?B0^Fx>o~V2lxI^%#VIwJ6{$ayeqg@=nZmZo`Iu%P0!K3;NM$fJ}<2PErH(; z^O|1gg*81E-`8%xB&_zihi@J1t9keS!0KKCW_;Cuo~a_M{Sx>Ck^PFW+V5T0Zl8Ph zV*5OU^uZ|ni^3Y7^TvL~*uM(?{>XkW$}L&#^Nb7qJITTfHL|gtc~${M(5u^EFEbS0 zkqwSKE_?y)!Bfzi!rw0Fe{63sR}?M@JIG==>knDwqryA)1^abjO|OPwbmQK>n&%$s z*gfu{j{O@z`6jD*?m3Rlmr%Z0Uhcm?_`eK}?JuMJFh8~aa8D-X8omM=hnO!M4DO}= z32S}f8XNsnc?N4bWX+EOVU>>xpE(-BH)YH(39I|d#{MN^|BCScE$;PVjfMXIweW!O ztyp8B8Wny(_&nBBSbnY~l%hS%wU?igu-1o~u(ogchuYh-DPb+2RU^+l+}@sXjf3r> z=EqU+r@?D6C#R}_89r^_IH!->GtTMB+MaPvPfp!?w7q>Q3Tu0G)yS_t*4|!K(fHGU zZ6B@(YkF`_%=Fgyat=(^@|1i8^K5YWBx8g6iAol?|I3Ci2&;Qd!&w9-PA`txbO1Um^O|-$Eb1o3}B4K_E^)0W4^e3 z=9rH4Q{^0^#qBT0TycBKF&0_Vi({y`z2ulFZZAVFbpC&B+ETX5*Rm5f>fSX-M~s41 zAqZam2Kql9^tdYHg`n%)f4wf~SJj{haO@MR+vU7mH1YUJv{T+4Hq?>&wz$u;&9Ryk z^GoP2+LZX)@8vBChHZ!~=G17vsMt6YR9iE|#)$fQqevlUl{u$o8+-86FkJQ^Lvj@}pRV(mfFlY7czwL{$D-nLlr_&@WVd%|P& zUnHDYjsCNekJ_)RoxHd|b(iXd=Wz+o=$-BQJEi;|-Px}Hq{NGR#AEgQ5}qqY|GfCi zJ<+i`B;hO={T<>y_dLhyX$eos=szU>a!+%t?icryfmj!3rp5h=(SKah;k?n$iu>F{ z9IJmV?sG43tjYAk`CNc8>`nOoZMR*t1n1= zxkoovH;8%eg^kt!Ci>h%8>@=wbFXKtu>AuHXNrve7e$|YU1RltmItGMlf;XAOJns{ zQvSJTHCDeU@#0?BSgno@i2Ix)|^A+h==qR%~|v3gtl}?N%5C^L1T4H{O!fsTCAoe9k};1R=r}LdpcwF z55-^Zb&M6)l^C9;F+Zl|7V8DEx+v~*Ph+e;sp(+!e?;8p9>iGvuDGAohF^pek`4n# z{~2+gdjn(j2O55(|3eaA?){6^)8an&^u?-Q+~HokSbbH)Z}fjm(t&&BV)Z8yPOi1a zif3w>&Lc+uZZV%V`TK_GCr1D4qR%~5vAR{_#l2>+`c2WV82tw&eac4v;}S3K;fhrv z;pbkoSe+C7jM2Yc+;15DereD5M^qtXvOv|UyzeUomZ1j0w zpfCSM|4GrGlJUHT_5Tw02e2L!E3QA$or2N-w^~k&JGdsro9DO7ScP#U+J{52u5&-F z^jQJ~?{>J=|NqynBsaXB6 zgp=QZtXhlo@1)j7>lG^hdF0E+Xnmu1Q;=N2V}rXTokkCcgBJa$ct(`oD$z*%-}d6VcxTeY&qo{n-k47#?jV)%>o{gz_b?1KQGd#YyK&tU&w@7`Rk;9l*f8_OVN!J0?{ZGRp!(R%< z(s17G&2M%-JMpgWf79#VBBrr!FK-nePRS(bC8GbsUjIg+*80Xzd;R_u`u)&n_%&zL zUw(h0zAvV*rTX8-3&UR#mo#2Kj)2p==72u?ygS-jaOEhdQz*|HP!82!ea^uE^);TV zzd_7v3{?L-UT8iMcUAvCA)}~2VD#rjUx#&S{u_uW&1<<*{f(lZGwv+;_}=VBjs9Or z`13{|$CkYNM~%M5lj-{!{uYrRROEk$zY7slo`E}5lz#z#y3V0omINk$34e`%2Q7QAT4CPnP7knWRp{et`z{PFBR z6`B6=tUnc*?(=Lv6`Ahy>^>FwtN2S~-9XLfBBuEZ3I2nkobkzRfxfgY90wSKBx_ABy-tfb$Xm9{9P4e;+&*@n!Hr#D4%TMf`{0 z^AWTDy&N&~k9*Ci7$3G@uSI+j%)Mt+l(W9P5%F(>-x6js$M)`O#4HcplHruIzGNb1 zecci<+rNQ`S-*!PW_y^6_#5EE5wpAPhJB=>_+=Q$h!ewE!#Trw!v(`d!zIII!xh6-!!^To!wtht!&t0ui?5D%G`|xg z*Y7zh&l!2%aKUiVaLI7laK&)daLsVtaKmuZurJtAeuwWYt@55QqLDmnIA=I-xL~+w zxMa9&xMH|!xMsL+xM8?ySSLW#X3@y>Ft$OWT43`a83|9@;4A%`e z3^xtyge#5zbte9Xvxakq!*`!n`W1}4Xt-p!Y`9{$YPe>&Zn$B%X;>%NY5Xx?qf$-` zXAS2J=M5JO7Y&yTmkn18R}I$;*9|ueHx286T;rRO0jqLiIDDUKrBBYt^M(tCi-t>v z%Z4k4tA=Za>xLVKn}(gtKQ+EOUsX-vP6FB*Bt zaM^IhaMf_laNTgjaMQ4EebVspZdEGf#4ztxjpaGRyjwMv7Yr8-mkgH;R}5DT*9_MU zHw-royAQU9H)A+4oHd*?oHtxBTr^xVTsB-WTs2%XTsPb>+%&A~Uz*>UOnZ7HhIPGE z&F73fZ@6H%Xt-p!Y`9{$YPe>&Zn$B%X&8&(UTJ(Xh7-eC!#Tsc|4iL07|{Mx z-j3r8o93Tt2}Sy1;a(dCBtRI6~k4-HN$no4Z}^txe^!#Trw z!v(`d!zIII!xh6-!!^To!wtht!|sOm_+|_zhO>rqhVzCChKq(vhRcR4hO35ahU^Fx)ikzTf1(;lyy(aL#bv zaKUiVaLI7laK&)daLsVtaKmuZux>fl{LXOD52c(K&Kk}c&KoWmE*dTwE*q{Gt{Scx zt{ZL`ZW?wswZ}JOI5C_xoHLv^TrgZTTrylXTrpfVTr*rZ+%ViUj4cvgX?|x6Cx)|z zbB6PV3xOWT43`a83|9@;4A%`e3^xt?kCahR7)}gl4d)E!4Hpa-4VMg;4Oa|T4c83U4L1xo z4ZFm|-*93$YdB{(Z@6H%Xt-p!Y`9{$YPe>&Zn$B%Y1nObUTOK&{aDJ0k!KC(4Cf8& z`A~JQXyhfsWy2N2Rl_yIb;AwAO~dYX6Mw^rVg5D=mBuG$IB&RMxM;X!xNNv$xN5j& zxNf*%xM|qk(H@_S;lyy(aL#bvaKUiVaLI7laK&)daLsVtaKmuZu-j(hZ#Xfm`w!Fn zH}bsUg5jd!lHs!9is7o^n&Gzl}rHjlVAZv>8wUo`TP;j-b1;i}=9;kx05;ih4CH)GR^ z4}be6=EQK;aL#bvaKUiVaLI7laK&)daLsVtaKmuZux?=0{K*WM_#4g|&Kb@dE*LHv zE*UNxt{AQwt{JWyZWwMFcH7(In=zai&Kk}c&KuVAxSGC2BQF^)8?G3x8m<|x8*UhG z8rJi_8otbq_Vh>$XAS2J=M5JO7Y&yTmkn18R}I$;*9|ueHx27~<~08InD`s!ZxK>`TrgZTTrylXTrpfVTr*rZ+%ViU?Cx!kPsVU!IBPg( zIB&RMxM;X!xNNv$xN5j&xNf*%xM|pBP5ccfhO>rqhVzCChKq(vhV?v?mcNRTR}I$; z*9|ueHx0W%6Mw^cuY!h0?^96D8S{C=1;a(dCBtRI6~k4-HN$no4Z}^tZjXt-;lyy( zaL#bvaKUiVaLI7laK&)daLsVtaKmuZup2V*H_YGiqSE}%8s=|z#qzx2g5jd!lHs!9 zis7o^n&Gm<+ZYw?E=-5b?L& zsF*(FT*O>T%Nw4Gm`hEIhV}j$y2qs$z2`>xLS+6y$oX4sRFsn&5q}8WG~9b1G@)od zhrfQq{9Sh{%GcoUkl`bSCk!tbK5O`#;g=1+YWSkzHw?dN_-(`8_cJ!F_-rz~&G4|{ zLxztSo-n*%_^jb`hF>=Ps^N=<-!S~9;kONUA2jhdyv^{i;X{Ux7@jb^VEC-zbB13w z{Ho!LhTky!rs1~@cR$b`|1A;U4AD-*ha%?s^%27ph8GN+YApIK4kca;R(YFhR+&4XZU5quNuB+_zlBv8h+bwHyb^uH2#LS z86Gx#$nX)v6NVQIpEZ2W@XLl@HGI+V8;0LB{I=oloR5Ah{)V?19yWZ)@Dalkh8GN< zHGIzS%Z6Vye9`b5hTk;&w&CuFoL6c78{TGk*zh64M+{FGUNC&t@HxXT8-CUBMZ<3x ze$(*VhPxj&@i$!S`f}IBuHWps)b+KluXp`c*Y9@y`>r>-zS8xju6oz+b-mv8AG&_K z>kqoV+4aS)uXcT->vy_-zw5^SU+Vf-UHk5Tz%3jrj?Eo=VeI(i>4mu$-R`-`ncWMM ziwoiH;UkajCQP0le|~0iY;Iw}O`M(|E6&a>IKbGAh>iDSzS3J2eHZeC_d@pmfG7+W7DT0n4kC2T{vcNajbZ3+~{@C zS{$2fbEJ)e8*N(N3DtQXo}ZYReSUmq?AYv?(+iVx2+XPR;=G%mWIV=C%)5{jrx4EZ z6O;2UPA+$RdS+&9a$#y}V$O9=8ybPLV{kN>o*AA#eSFp(o0>d!(&yd8tWTA(U=T^7 z8SWD=(uTqL@l(Z_$Oy{=!*`N;yJu!zI5jptF)=qkeF9}GP2b>taeQum655)i?Yf$b z?Yfe-?V2rjnL#U@jWttOV@-9mr8hP+eQffy4_&AbCyR5l&riA{VupM`p_-kW@TDZw zlzX+}OwKQ$ZcNWlO$KqW*HJvV$clsDdMAtHFV4)42TMMKi?h?G7w(-u`TV^nkIzj` zj^WMio?n=Q`*^;1%0K63QO(9Re-Qq(qPjDu&&*Fw9B_vpet2--Sa=SFXSRi^KcX-D z^<{9Me>*TXl#vkA>$xdF~C* zVZos>+(gRQ42|vAm%;RHuWAkH%V2oRj_pZxnV?$mCIqIJ%_^AnbLz0OI&yYNOLh|?rk|#We(&quy*sm{x`}`ZS zPIbZyLa!R(1>sSRLDkul>c|`NF4fBJY3s|h6W_f@f;#9^2brTUgX!CT)!L^ogW(N< zP@O&g1*sA=kQOSzOWcX39uJDgS@l?721oR5zrO6#m%-NCUez1Ym%;FcKx8#iUW7s& zj38A01=TRMgSY6U%vPr-gC|0_Qm{I7~Z`p>zA z^?zs;|1X^k7N^9%KGeSQy{q`2KNtK@#321=8moPc|I@4ZpPdO73*nLeul7~rsa5tuX=xhmrmus*}?xCVOH&H{2y5*{!7>B|A$tIzxz4HD!m5Px4-=z{O7%L zG%3wAt$0-x|9gTns_om*6!7xnkTXtjrcGTq9k9@*+ls$xzfb&M%|6@gSef9spN9tZ zto@q!e<`+%cTIoQ{;K$|tk0#{@b=c%RVO@cc)kq#o$@~;>7PXX->q;${nz}bo{bZI zTDULhZhM&2Qr7>?5OwlD`wPK7e-DxB4*Vtfv;Kb+;ahw7uNm7C{!-Pr9PGUghvuk&<-So@Jy$HCV+Z? zK!8z+h=}-V0s=lp(NTkr+DAoTe4>D&qSGiSDk>@}EGftm!+VAvH(X($Ic)Qa-zTtc_CUXA zvHzi+#^YA15ZlX>foBCg&P{z4aY-_HW1xLLE^CAR)Z0l{O%LUB#w1Nbt1fc=aZYCh z>UL+)&QB$Y{-1u=z`MlvRCG>Kc#GHRw+>jmPQP>DMis1GuXB>N(>~)jU>v8VT>8>iQ5?# z%ZuX|^OJFGlD_(pKipo@Jtj!wI1iy$ih8;H?N?3m_)(`1I<BXYnzotuj2dd%=`E)V$?vZ@ejF8vNQ=5N%+voQ^p5N~%#)rIk-0F|_yuM@L67kXR zTwZ@z@^xp0d06zL&+{>~)A0LTem|mm+bTNm82_%(LEhGyY!%vgOXyG6tYohEwW-cY zOV@;Cp1{&6I@90rp}*~!v9;5>{R5n|XVN0nb#l^S^q^!v(Y3}UElU@Oqd!Lk+SZYj zK?~YaTT>UDx_`1oCHOI46!|8|=;dkhMriZKgTp#Tz3Wuox2MjmC#jKLSwA-mt=lsh zA=;DboD5$X=u3_cv^>h|#c`YL{VR;$vvD2RmSH7HMtELKjO#_8&yToH+}~m-k@*s zMSr4~+qwWh!TI5?ZxKR&C+BTtg%5dhP%_T-hk1FM^Ucoh>U@*)W1Vkw-c}IoH8^jX z4X?aXe{4kH?Mef0{e-uJJlj`*^(FLwRx-!=(EnMM!_ zmY0YA&+_>_#``}jnc?N3|Fe?m&WHZbN~Sp<`adh#%lXj%S&4a0d!hfclBr%E`adhN zOr|{ae^!!udFcPFWU})j3G+M5&ycq*#KZDzsjthdN1>m-4iwjixGtQ%b-?0P$iETA z_53PpRoCCd#<8ehn14?d>-#*6^QBBsEY7)g!Qxn07g|^sEM7h@Th@&Y{L0Yp=a#n) zyiTt9dO5D(*TqFYVn0aM2UPFUyL`Uh5$11KYly$E3oTt~$j=zpu8xq8zAm;>ZpYd{ z+n0KN#`tz+VPCj5Gqo1%TR)dBGCsi`p{T91t(04Tz5Vv37H!6RS%}x#Wp1Y<Y@9mcnGH-vF0$lH-y zybJPHudVZao^L&w z^`o`BF|7A}$Ay0OguH6+Zc1$3kNe7=^;VvC4@+$Q?|X~uZ!tQvd#7+6=)2tI6{8*9 zEeTVM>-;L?6VJ1StUR?8)t|49eL_oxyP{u-s*Qv4Es`eTW`7Ef6x3VCHsr!^yiDG{4r7V zZHDE_7f8;y^c$TW)*0r_VdDFy=v>?9v>-oJd_k^x_L4gCcI1cDk*CNHt|Om`{GdAW z4&)0Z$N!g#y8jE|ZDthL(`}v7+Hqgn*Ae8~vOs;BmD`f%FO_7Qjb>8fe#yq`^gF^n zs<=Mq_P3VTULFQdc2ZeO+GBg(PIJM}XNz`SPuH&=87S%(u3tM`|BC%;N}lhOonkv( zKCWHI4t!AM#eTDIwSzd=Jc#oa-c}^2<)M#53ZJS1)aK>ccMRCdRLj#_2W(&C?Ihc5 z9A>n(Wwt&y2j1%22w=xFp!>L88-IAqZ}z9QFJk{-c?WNKvF=!(2e%Jos@G%Zq^0}z z0n6v*;Xc6IXI{+qd9iiCR(`*3v~+JBSfu*Q$9~b%ZyQ)9>g#I?|8ns>FL;?~-#ndb zj8D`O$UcDe|N0$Y&xyvW~n1`4N)i|FNRppM~&;izj~Tou1?L zOsA`zb~-)9=}Asc6eZ3Vh!*nl;=ea9`#NQX`>)RDZJzCrpQF9JHGh9!@~o`QuW^00 zn$_-anm2cbdfj(~{3zP>d3jsRH=l1;GT&arkH7v=mV>>L?cv3{y#;Z8Hzj}lt;sEm zlhPMU;`ET>ufG;$9ll$Xb<5T{pFdjHEx(y(cd5LvzG~gIo)+Xggpqf)r<$oXsQ5&qs>rycn_y^dV|POBrAzqSq*+Lyn!vo6TxuiZx#YxmU!x%{>LLP0KnZM`nY<*(gu7v%Cc?8p2%(UZ!HME-{R zZ{K(1{B4&4{$3+muqS`V)sf5JwmNe8yK5b}{I&g5!M^;peOW;+e{CfpzP5Jc{Iwa1 zzm+=e$lq|kxqbiTukGs!?Z{tSKMHdByK^1;^4IPw3;Occ_Tl(z`^lWYEwY2Z7uT^T zf5Sd~M}M2@=*!>6I&%5jP)9C*?S8+Gza{D8uiX#i{FON2`ft|@%D0MNHF3b=Ua%wo zGnE(Qinm?2slQ0&ZoeJguJ_oh7kAmSeL)?2wjZUw?PGKHY(I)U%i}^l*;_J%e|Fu* zo?Y*A_UyWiy?S{pdq)gm&#u$hJF?K<#p1D-iq`ROSsl6Jw^VZM9VeRe@0H@QS8rXA ze=k>gp&i+?{SNkCq4JzP+izgc*4aWm**j?ndn+Z!-pQgldn?3auiiQd!2RUviDlavA0Sz=ih6@W3S$IOZHAtd7&NId$r`)J5@Ai z?^WWlw@|cDPxj8NBbU9kl4Eb3XwKdl;<2~CXdQd2>&RvAbjh)|Ml@&dH1XKWMC;gF zUq>!`=Sz;gF43I5^TcDX-aJ)*&rx}y9oaiua_pTenzMJ7cdjNx zyG-SE>|H83_FgBNvv-Mj?A4p6vUiQj>)6{YIrgp<&DpzJJof6%Q`y_3@;dgelpK3k ziRSEGAs&03qJ?_0x229;<9og2*n5j;&fc5FW3S#kmAyBrywHy9{gdR_dy{C+-gV-! zS8txm-pwknWAE*fWA7cJIeRyW$6md8DtkAoypFxMN{+p^iRSFxARc@5=Be!Us=SW9 z_ezevTSasB-Xk7+_2#MUy-VeF?7dTR?7dqwXYUsAeXTmz?yKT+_)Sg8U+g-&xnjk- zpR)bjrMC~5;L_gbpAq2w%f6z#f3f`&{2b>;io);X{4UP#?feMmXFI>M^Rt{E?)*&W zcXED)^DWL#cYc`j)0}U1elO>noZr*=M(3wG-{AZd=gZEg&X=5@?7WEHYwltE;QOXY z&WHZb3ikoLzX|=H754w|q5re|ecRysrm){16ZWgvZ+Cm4|Fgorn);#tv;2MAn6Mv2 z-}d*EhyKs<=c@kw2Fh(ePkHG7EPuW_#^1Ni^8NeZ`zFf}^h5t=`Eys>{nP&m{(d>+ zk={4WlfUtKYVLhghvby+C%QJ(nfJ#QZQh6eo$s5rh4y%k7~-@s@H{^Xc{$nbUb=Ol zUkw$WpX~4)&lbNVX^8KC<8xvBdQA1P`7>Jhx}RRpAqfiJ4;6GZjiW# zIJLy_;rjyIuU-9vZ53%H<>jpn%#)U+Yh1Xmjr!QJJdgF@cUJ`;$JVX`LSEP2D_Ith z2Xm!m&-2V5?@zid^kc-f^;XB`o%!``;3tX=`%!jpsZK0lJ{!$#)(RGo(Ukd$b zrQFs*x7)sSWzbu?%<2A43K#f5qe8N?tuJd0~D$UChs`q{R2EVxF`mrI$$1m+Fk4 zW5RQlTE5H+&tKyEwXS(dD!tmcT@l`+tgT2&Kdv;IEWJF?ZJE(hTgCFX?0lJgn6DDP zck!So?b<;eTLj{J<5S0$?79*8H66Czwemd^U4Ke_yL!Fwp0%0b{D~x#>pHLrIvsj& z)LY(>{0M$D{Rr0uUk_t{V?Xi3G6TJ^z8)Xe)%djtKKGZAfEnil% z7uNgZ!#ZEo!=BAg^3P@-`EE1M<6Zln&ZMz^U5VF+V!Ox5nh~vMSd|Y@wdg|Qd_u^yuuvT$NiF!8A(f@=5jPn9f^nWcnEYmqD z$9lAPw*zmOCKlg$`r?yI2iw8aj5Bcgm!AJ zdfOXzVkc|$NP1x5a-;wVAn(9Z2R*%_NJ&E^4mt9^&!NgU~kfq;ktsJ ztsgNCxj0&cVjOF6vtBTtJ$~eWRqe(4lMeSh@cz`sY4P^g=NR7@FZ{9nZfplTVShg_ zT>pu4=x?Uh2IEY%P%)Vz{c+V2!N}kxx!tqmQRcj^9;)~<&M-($4(jPG+EDtT-_zSpzC zNs<}F+2TJe^rv*_f&M-xQ>`Q!<@2HIh`_gH1Ah`CE>@3z*m@D)Gmbha==t^EzGseP za;=@r%1ehH9rB@XSS<6zrPf}Z`KyRr&f;fJ$28p4#oE3 z^8(**r}qat>6*0kAz5Vo4EGP(4RN?$?Y<5B_eS03TmU503 z_V>2W&)rAtqXM_9`v`kJP|HVKjb7kBB312V1oule-~7Hp_Y;v8UW!tyS6Kar{d|Gj|j zC+vBH&)2`@ej>!%tl;NS9&elVjOS6|dCb~Ot&l&>9&cm+6ZaEoFi;qWzx)1z`wH9l z)$S|idAw{i>CYIyukh;)^TArK^=FL7D~{i;!LPWF@pYuczEOEyyRV3Ryssw@=Bwu4 zaGzo0!*%aL>91+E{d{ZWv8(Gv?S3P!*X&2NZs@+_m#UZ3N0e;J@yf^AeMqd&I{6di zqgl^w-6SqHPvbmW+D0h{?aQ9-FB*+*@cr?6aa=bqG@7)nukBBpCF$A_>Ukd4^p>x$ z*|T*IJzM&Vde{s1BNzGo$O^w7S!V6E#QWk&ZH4=hV!K_zzeyK*yYYUc*sklHdSQqs zowj+!dTQ7ExKAVQeb&Fh`;niDkL7V)q+Yxq`IPHhUUEP3xcFh2&fDFOd|U$JZ5c;g zLcELje=DVh-ZH-*d0+T`3eOS3zU$7~{Ylstc)a3$%En7Vd2OHI_HyM_*`G5$`Kl;+ zbhq(c$(Kdl@4x?kWP{(2#Jur%*YYObk8JS!f`VQxZ^rn3aD(3$6!fsyDnE%whtb0R zed%~f?LH&ylaKfNkz%`^*OR(x`5NpN+jYHDyOKL(m%Mr5`;j5y5c4Pcf9ah*u7md@ zcK^V-684Yg4f{vGPhp&_obj#7kNCSV>=;kEQSL|HqH@OnZc*+#uNRHu8vS6t*S{Y* zQrhHSydOEfman!CsWXpboWp(D2H&@@H#7Ctg>XNz!T09{J>oM>HEZKrydUv=W*;Bc zRo~z4uy4M|*AutTx>%LnrQJFcAFns*XupqI94PNE!Z^(Hesq|6OWfbH&kf@c?d@;n zx}T{EQEn9f?0p6HLwpv8`x5M1IoBo2)U_FvSZDj(POKmI*L}Vp#-6R;ryd>Nqc;`r zS2hfBzvA&?J+XBst^@2FtY`3{zZZGl#dy6c$m9AS*ONI%2R{y7>Gvyx&q<=4ur9s0 z9eX}F=fd!P@aSB%l%5L1~}d~Fut}A z9p&S_&F%R8(2n;l$Aod(;r_w%c+>^5&V1={x;)THD{J>H+ubj{Nc%h9zZ{dKviOg- zzs-7|H1Hhx39~a>XV#Nzsqu>1SMbLS@t-)sF_elKMEANQm7{m-=(d6A?& zPTRtB`kg^03bp=xsdcUp_wNe_)(!ef!Uj)AG&%`K{|~gJ^NQDF2$tb>eti zAj!J9;XUcf4io74z4bTDW4?s5$jdV;FE7u+{mu*5D;MjPsHc7Qi?r8Rtk+1rRP}sa zDHZ&YUAtfR@^YbE{_c&@YE2G?KPMev?s z+w`PX(8_!1-wM(Gv!ZRSr7t>F`=v|o2;bB4cGv%-?Xq7rk8|U9irQgaIZ2dq^Uup4 z=(|`_`qvd`p?}YL|61bTHHiK6`kyZPx$&&9uw(FLUi$o~(OEnAc~& zf8aCHb3g96^sa$S{HV)yChzR7I9$JY-WT(;9LLGy*TvsA2zgpxf2i>u-nK>iPri@P z8rt*mDF=B=E$`V!n*Fxa_G`m=-nfPuf3sqvuDp=y4wAIMgs zKE}oJ5+)`y1n8^D7q@n`h*)^|zS6>uw3_j^}an%8<{8 zv2WM-H+vp`GOnX9vLC2j=e*tnaeaQ#b$6k9QTzQ~S6oNqJmNausdDDgNlurE*5=Jw z;#mJXecc>qZ2VnbZQWcqu6AAgl{9)q`!+Z zF!22A+}~#E|62NiZ$5K~eMY=aH5T_7tF8;L~HG z_l4tzvEpUo`VJ`=fRLP6evyiNMuo#FjvkC-G`2*0a9_h}&xJ*hZ;Z+u#~&x-dS z#BH$()}}h!`?|vg;(Jk^CoU3Sd+&Etk{NGj-h-Jo`ep^aAW!4p=QwnU-*<()PP$ix z_Mm1LYBl2eSj+EKiH%o5-ip4BQ$gO2{0RBgeP$Tf_&fvNJbbbIiu>Aa!TzT{`$8NR3P^FiNVT_!o- zPqgbZv{Sr~YYM+wJoFT!cZI*37W>QJRofzYoL|_x!1M-xUwxwUmpQ%I=_aQYr*Cv> zeZtPWoWI%W?M{20e#q%vPCxGSZc*m>c~0*UEsjUwdDgOG-2aYovFlF!zW$ESg~D~! z?VllEboh8pE&g4Ew}s*)K7Za8_)`2Hk3X**IcHvC`_XMSdh~BqP#m=;(2W*#uHzT^ zb$zA3kGaUN>np?cmhuMG=w2DFxA0}>Z6DHghF{lL`15pcr+Y=%hivPVcG4XG{>Z3{ zj8D4Ox}6oFefZGMif|q7TIKtX6+upUNj0(CC<<>Bewg#5ww1Stu3P2%W^b=&y`-!+ zD~-yI*=y_6-^IB42g^J4_sUx0^S!D3UBM4ZGsel|JkY%?tgBGFLxj$j9(0zd>xpl7 z`FN+}oH9jCe^;kto!SnSdZV3hbsF|X%lz+CEer2q(A&$)_jEed=@h4_Q+pqS{!HiX zim|G}dD?5l^5i)w~yr1 z-&ZteZ*TEkCxr3o3He*@J~By7K0o9YgZ)bJ!>rZh0LJS=@t*H3eU?q^e=IAo*BSPg zaXe|?`hk3;>cLyT;ln=D`wKt9%dMaAmiOq}9Ws2_XRZkO03Y_5E5i80TRx(1c>r(u z3UB!U-|Y4+FW_SyTYkXDJhwc7kMm@V>cNNkxFY0D7tf5$-ZFn5>wm|HI9ne1d~C19 z$67Z#(T{0vH~Kl<`RIq`8|_9vEe~^kSpLCBKP@l2PWSoX-;1F<__@Nzb#NRkUnmcG zzar!Zd>n_6AA{p$c|>^}2b+iRahz;^!pCv2{3lLfoP1uaJ3+l94@15+uMG6CT;5rH zw4dadwath<7q|FCo;c4_5!7~a&<^MKb87jH-1-TP^T7HAzukHQzumlrxAvemPSC%1 z9k9Hop5;3<&TGqWc*|>OT;Kky*RlDEL%fdF=Hm(Bdpm17Y+mSn!2;AuP(@tN4QyXi zxX$g$b#9^Tt(&4VaX&u9yC*fa1>WLLd6*~1hw;gk+c@=E6j@I~z1DcW?7P71_{$L5?6hFV#^-OfLp&@$ zi~YyGt%G&!+q~-Jh5^>}wQtqSI$M3t<{w`{2 z#ZO&jBx&4uV2JCgoBcgjs}(fNnNU7msQZrR&lipRa`M&Id*{d0e~$6|`P)*}N=mVv zu8F}8e?PTT1>4_;Kfiv}==#6E<-NyWq|e{K`jgY=oc_V-vrd2O^cknWa{9E>pF6d2 z!ro7uf5PdHoc25Yq0`5le$VNnPQUH+VW;16`k>RVJN>HDFFUn7p#6J|zjfew(YDsI zl|bo7dbQS%151*FwJB>d!w1-$z({&~H>d${!N-cAH;!~P(?$7^fV-#uYp-&&>~?;j>gE9@6Fe~9}%;)`+LR*{sr z-{sGv?;7Y7&-;{(+q{7Ph4VH);D6@4%@g>aI&bp@{z>O;e!>4zJb&MA zglOTtfZiAUxQ<-!3v9m8KbwC&VcpUDhsULlJ(JY2_q!qN+5D%ztp_=KzmYz9V*5A- zd8K%dyPktyEdIsbFZ``}@gn2C<9i36H+9}S*t)~@GvxQ+^Y4(~gYN~xyz=)v=!gBZ zzu$om`W$3HqZH55~vF1%9OSHjePSIB(+zZ{v=>jT`*VUfwSX zKiv8F{6Flcw+>i7=AN%xUix*TJ+4=E;$nG=-53|kU-%do%VYQ$7t3e(7#GWH_!t+< zZ}=D&%k!=oejlLF)o&m@G&lyhww2jA^!&B@=PHvHowu2aj|)oi;K;# zxX$tY^F68G%DS~08o%exzPEI)?8dz2x!0NE$?Hz1VO`pBefy-!i|gBq$=%rw>iYM@SN=4IG*9YXxJ}VH=a|)o{Cj(f&Zh>#)-iW^Y{Ban^{Lef3GjLzr z|Acwbc*RO7F|U^=iLEQV-?#l2a{pdiiNAwzv3UL;hE7rHT`kIciVK`>5?wVl3I1&zsEB8s>~wmHDE+v> z>06v`aeBSeH#>ck(>FT(CsFKO=KL*A-{JITr*C(9lhe03z0v7goo;n{tJ7Ym?{)ef zr|)+9E~oEw`T?i!cY2r8JDuL)^nFfmcY2%Ck2(FQ)4QF1#Oa5f{ znocUtpKX~H$2>3hbq2numwS))25BtstgSb%6Yu&Hg>`I8jm(Vi=~$O+O2qlZdNoxV z{=TP$?>p%FUi;{A90{$g#PaCvx6T#&bRU+&M1 zul473wp#JN<=XH(vwL%Re%xc$d9SrOJV$1q)hW-ZuYFR|H%8~&e(Um(M+JF1`ZJ|h zkjsx*b>uUVPp>0Sk;(aunM+@FO0dcJR*;(y=!ElDPuai7Eb|B@Wv2)|JLQ0sVms?xzDK0j_t z8@?pz%2q4mKC9u2LOlO08?A(OfBBYhUs$-_EG)Knm6_4LuHhcRqJH7}^pawI&*$3p zsVQl&>$3N|z5a99fz0FE=+>JPb3gi&DEyC{_B*xb0mvV7{(DXzb^2|m4?F#q(+8b?-RW1Ie%a{*PVaMiuhTC$ ztva>+2JL^wdE39h+jv1g>2lkrz<Z~wM+g~SY;~DSc zx>k0o^<=_~!QUqw6!c$!GqUc~2Bv}5a3 zu3ei)xpqIH_Sg0JbNM?5?7qVNxF=rEUt~VT>%4xKkjY*l-(xF8Q!*D>v^5u zC767>-zAv+w$#2yH=OUU+dOhR$?`POI#%y@3d`gd^DgA6zlZF*MN;-NTkQIn#r>V< zMeRA8+t+&nThE~Ps~*(WN9cEq_xDOXH?Zp!)YduZ=e@k@)b5**+qwt+tjlekg#VKB z4?BIxsja(|f7AH~oqofqt;3Xm&G~OTeMEHCgdyJ(d`0DPf76qSV_&{C{EmkH*}C4- zYI40N_%|W!nWRup?;XCc@`7CUz9%{L?K+dQ_g%^PJ}Op+5tq zr}jPS7arFg-vdmLHRk!4KzSZ)`#Joz>lM81IN@zv;ln)M8pans%;T+LoZ%Z}3H_=l zyzN`yZQS9>cPqE?=kK<*IB$7C`6%Zt?(id>w{e2s#d*8V!jEv?#sz+7=Ph61hdXb1 z3vY48AIrDg^KQ#K&;Rx~AL_)#@)A2SE|#D0F)o&;@G&lyukbN0mbdURE|$OWF)o(J z#HHQ+wtS{M#>MgnKE}oJ4L-)j@(e!4CFIv&Tx{M_9^+#37e2u+3l zcpsqqB)t!~0h)Us5dMys=kJUC?jWv@wRP0%<$rgODGo!fs|!_Hdrqj|9c1F;{Neii z!t>=UX|oP?`Z_qy*u?XowhrcgchE159-IB#*ZOwEj!? zm-i{*ev`OPr<&d;XrAZZCxqt(uK(X5zV+9k7mjPa_Z5#TPJe5hiPIG66Mw4*|61|? zUE&h=gSB<}h2s(T)p7r;_alE-JkYP3cf{o-|AXS9_b(QgE}y6HKT`kTOQQ65lIZ`C zJka}-7wj+PKeqmE(V6-1{_yvE3hT{>hq`a>t5~(9t&+S)sO!w||KnU0#7*&c0X=cO zyWF4020pHHdJbnhiGseK+u3!jAlGxa@oJ(V*K<1?*@CR~=Q=R#JPJ8%&$m8=*o@>8S1^#?h&!JyoeCSW@@A55;`=OcP|H-M@TNwU7o%nne zd&h2PZ%O$7gVtt}zWDEb_?^pBQk=9X{68)Dzfydyz4q|`?d1GlVdWh<+jI2g#t-fP z309u!Ogt8b{bkPo<5f_L$CB{>3;hHBTO9G<*5RE0owA?v|25)k?X`#hKd1H{V5~~f zAB_L2#n4>>(6%MHOUSPiT4(PT{|~I2X6OIkbu)RIZvXGP zx#PO?zw0K6G1Tu=bJwl^T{pFE%;Uu2-z)i_yl%#Q_Hgyr*Uw>p|NEN6uA}@vH~%a< ztj8^)#eHh-dk^+p%*$_=u8wV~k&Y(+y`8V~P41WO4{crN`#&d16S_>aN2kQTA7pls z+jWcUW2%DFw}$6yZodSdNzVT>{RdHgA8?8&{XW9@Tf_H@crRerb7-f_ZNCG5j`L?Z zJ=3Yhf$|HS-{5q;)AO8O=JXP$7dySk=_aRFIITE+z0)^3z0T=1POo-)qthFlZgKi% zr|)uVPZ;s{X6J8mYGZX>vEI-IgQmdAi4-4z?yx6<&T)%m(MUB4?=(KIiz&lO9 z)v7iB1M|e^+h3OC?9~1q;Wjfn_@3VV^7r`32}6|I`*W%eRbH#Fo3|Hxe=a*i>A8D5 zkb!|U8#bK3Azi=f^t0Dokgncv{`&Ov4Qp0iu3M5<(lgFEzbBoxdU&#<-i7N| z2fc#Mh6^r8FW7*=K}&TshN^wunhSc)-*9%i=At#{UC^D@)Ye?QuG_pS*edA1kn=TL z1rw`QuU2&A1@lu%>&>hKtsu-GpaNy6(L6f-~2o8_+%N!ZY?wk04m< zV?~eb(aUW4+9A&WzY2cwUQ2tW6~|@O@OjUp;>tR)XXR5eExuO_O`}khLu!qDcH`3& z^}s2gXv5~XJ(!o!>$t8e%Z6sIo^3?&9$>8r8K2oBIdSNniIux=c5N)$MaR|>*BJqS zU<)UXUHBYEdy*Z@=NLY2I#~f8%}0K8^4XP-;&P^AtqbeQ&FQ| zI!d!S?an6!cR20@AMUspd?Hw5(GPwVRDL}J?sQC??Qg_Qh9WAR&nblx+tMXq>?=H_ zD>+fOoPlow<41W3_!Z9R);cAVq3Rxs|i4y4eyCiG&*$Gn8RtY>xPh~<84OEFa^^J7Ac z?NERA<&#lYYEUrbLle$u}N ztaV%XX0X;!;k&?kW+;3QSYxd9;sLO7RQN$YEBGwt^Cl{J`zmd zOADbZS~zhWj(iD8+5jd54O&Mz8cyLv+#61Z632!v*T+V~8*=)JEl0ymoTUG5sPyl4 z{iDI3MqlG2`}eutH(U>YP5wBPDh)q}GOHRCuN10SWvD1l6)16_nWT~H1BK&jqzGL&_v zX#*60n%)B42`YPBYnr|YWgMFxgN}ec2j%+IoI(j(vtmvTH?M@^ceDCQel~A`vJN%h z4W%E=nlH@D=HEccN4sleY=&tKChvzG1EsIS6ie3cVOK#XLbpO0_hIsbF&p+2l)2HO zc(CTRs6UgT@*{<+KU1I=LMdx`8nH3wR$JTVT@;s> zL(d7{*xuzDl1zv8`7HtsI)!5?MEPb1$s*M9S)%^Pq|GGa7x%ciiOdCHXiJ64W6 zp;gDHg(@R&hCdUWxqKa_tJrxb{Kq@ezLNRXj}Cby+zhe5>OhE^Gcb9Q2v@+$cly{|1v8#{EYsW%W~`EHnI{me z1>@-1u{@AbE1<7(th_qKF)>bF6R=fm08@9=Sy0vC71RIj>RL_AI-B>L1WY-P*r|H0 z{|W5rl41XsoYoplM*Ufx z+o3*jv?}j>oLgsuYvV8&uU6KlC~wWcXY&z_ojga_=d+*=%Zr>Zm5%3Z+1Kpe6!=-z?uuPvpaa3WAbA3 zy^i+)KjfI}6DhD#=9Z<7@&h3s{ zp>dAOCVnIrO6JE!PV>)3@-{z3IepPF=3Y*+kIgY(8^Y&@DOapH#yrN!@peA^6uJ41 z&eMaqrbBn@vY-=b~Au3USPe^Na#*MQ{1 zSjfOL!PIT6fDZ<QwIs`W#-3YLGLWv^KQ zzZHyrO9s9jjQy5Q@W;W#prr!tbKDF5GFbXmu*O2TAN&OPbKGA+?4Zt08S-Dj{RmtE zKL@5?T)LCCCQkS~JOdM7*%{sm)^$g?0^SFF6}T6yzMl-PLUsI@dnQQR(VXbl&KX$O z=qI@slKeDI*cp+6yTPonBQo%HVEQB<>nQ8$O&r!W#<<`%^Rq%8$ZH?*&cYq%% z*%okI=9oMi_X@|@8mIAchU{oOMZd80;W&$XLK)k zPsewG4+O7AemD4K;E#f<;FG{=s~@}?OdFaKEJ5(2JmL) zA7s47R5%6S4#v;18Tcb${2AK`{#VCmg6|V9C1Yu4{0|*>f&U=foQ&PTunhx0LS16q zuCe4dv*0TDIPmYu3rPDa#~Jv1#}%-y1JbF2 z-wggEF^06io6~=QGw{d2=!~m?nR8{0e--?7F!O3W#qB@f#Q2TRz)v7=21|!n>ikQ>2XX{Jl|rF zQ$qwS9zZ#)Hu80kfPJfEe%u)7#N+a9xo>JqYfF#ooB@?x;#FJKHNm2NC6{ze zyo^)CZ+4sL*u6Pk#Cyg?{yXVtZ+&Gd1!BtPt;m>tK)~ETLa!MwquZCHpC$pbHzz=b1^5^cuLM^ z;t$>b+umvM4j z>!b8vJ1F1ntQ@{{5XW{Kc1Qm@kKx^vtApxtid;GMQ9c~Iu{M~iDfo8qHa;2nPOtkh zaLf%%*zqq;6@QJ3d{fSRJji$ZJc20yIM(UF`v zyqk2~XL~-V`Ihl0U?e)~7dm$6dd#uA#&|rRO0I4%ynL2T#?B7;%`xg!PMaXz4=qs;MJoIaPI3}IM zNw@-j4YVgTHEFfuR^;b9PQmzRIvvDAv8TVvuSpf~81ORi8y#bN(#?)L!5V-3R1QwM z1N;qeFZhEvu0yfb{7?d_UCsaNB?Et!(^iAYq$*UlI3~sK47GzFp^q|^ft8!K=Yagt zG4efzIj(>wIIe>C4p_0@;|RwYSZfJ(|wx!uY&N(Sfb)2DdtK$mz z1CFcUf1x6AP(tpZvDEs0Wql66$votVApC)U)-5DmMp5pRdJ{+~2sG@_98Vil_;` z^0APa(p&IG8N_-*^H$G{wuHGV(flkqVH2x~zN|4_Iv*Qh)B(oNxoREoU| zP&r9@$d%{)d@`{7l3ZBh&REFb$#+8^!kH>`g5xg`2V2jO-;2)2ZD9B=8|BssQ5~8H zetaHEw}RVEfl_VqeaKb>oyp3bCmcVHvW6a=Q8z$e?ed>_TQoiS*HYk6j%4u9Qpe}s z59LAa0u zt8(Sfc{zP{Q?>nR7zY(_{7EP5D0V(RaUUD=NAk3DldWYtp_ZP_=`VaZQu+H=J{9mK zP>n|wd*8(Q0N%|ef zlK%ueV5@V;UBFSG^b$sz6bJ*S2M$q<79IMtm^^|HCLIx@(pJwPNwY1sTG`p zXFKiy9}LF+unhcCmv@4-Zb+ZlOnD9XXW$BWt>Y@VJ78thlq(%)VBIgG-%bMI2 zNWhB!lzTbR?oJu_tByONnzyvOQw90MVDe?BE#MzJz8U;1n11cl3uf#~OwpaH_7#q# zL`+6iz&oR}87%qkj{Cv;p+DK0NTwbFo&wI09|a~xtrhSx@HB80%y^b&@Qf${)A!OW zu=F<|z_-yA@MVs#0blJHpQparF+NXaT`291elPf4K|V%Ud4ioWcY#0Y_-?TBMDlyU zUkA5>t6;6E!ViLJ7iVu5!swaCka3WS?Q}8f+*aw_}nc~8`3hxqdoA7w> zobb$NPvW658dpJ1+zj`E7lI!N&wutj9DUXoswI0K3r25z20k9#0j_`*!@1xpcr|#Q znM(FNmrAbyOTP#C{$S}7!_oq<^sfgW7}8=-;#i{X3DPI8O62YY>Ax>Q87%#Oaoi8S z4;{vMBBDLN!HIb~F#|siCMFXr;6H$=JF%Cq{3uq!Rd5@1U&5Xg(OxMh{M;=A)2Gs6 za0UEQF#X@H3Vu2G2=-~H?sY1dvEE(!%9$gXi^AO$z8zcvzai935$$y)C)!Oj@LL>r zf^SEMyi6(O%?4={Rr({w^3lr&Pf|0pr_L>CoTO31D=l zl@Q#|SDS=e!DYVQQ~^&0UkL66&jp_Xu7WjRPGpOnfoZd}99#jf2s(z(K+bmunI_4! z^_(8zD^e^F(=I{2vNf4SJJYUnc?y0P@{`cXz#js?3S0qe9N!79fR0}B{nXHpg&!WGe(E>g2_i+;|{5UiL2oU!NlHl`oXkc`gQoT9n;SR9}CWq z%ZJ~Bmw{go{vBV<5+)|4XYK1*$@Ck*zXwbI9bm@nunPEA@E^fdu;N2(QAwtMj#GOn z$%ymxFFW1|{1BMOKdqf}a7?=S39HXaxTSjLwX)mIBG53Yfl^OZIiJWX2p6 z`@>g8XB?ixj9X29KKf?A2ZX#K>09W!p^ zBwPXCLPvF6sFRfGgOTBe8-|IvT>pW4zl#TEQ5 zkN<5d$?5mQM*Uw5>Tjn*4o01s1Txyp>&zssH68k0S0~Ns%pOALfSgV?gwD%zIxC0J zS)J1%&+GbgSx)EbA#|A6wSK*O2%Qh)bpCY+ov-I~elUd2uW~wnsjo9D7w1{zLcM;? z+IP^-Eap#Lomb{`UOR-&hMdl(A#`re=@5syZE3CZ9GmsEA#{G2)A?z!IH+Po_vYR72DHLkc+syH+|E4-*DvYC{D7I^Isv{ySrwbLdKDQ z(%bt=D97Hih3&mR0F{0p6tjCj1eM;8pK2{}LQ$j9DY6t5= zEN^%TbtX~C?d&r*sO+O0G&?k;F450@7{Ah~UQVn_uW^jM66%iTioBO;O%s#FmL;g1O$c2Uq_%_E?@JGmQFtc{NJep<{DwRYCqRVO87S%(VUU! zzY&~)+YzuH4zGZxB7akOoI8i{E76CYrK4-a^^O;Vw}7R840Q?4&Q&nuRC)`zAJH)6 z^nCqK&A6i9v}N?LzUK*MV;aOXn)^E#L|m+ogAcrT=#D zyTW&h=DZ*L?(jXLIsbxzM@q@aPIT^dybAnP$ESh6@A!1^Q;zX#&Tk2<9L^GZhNkh= zVeee)_CsUJMq@#Elq z9sdY?gySEBH9w^P1k^GRJL`lsf*ebAD(mF=>i8TB);UI5l!)8k(%7`HQ;-lW> zg~RKfANwvVD*W)rTEp}fp;SR z88Z1(0sj#{S+9~RZqtX-8?0+dht{eq_+-c@g&3Fcv12xYpsL2JV*!-@ha-Rn+KocR zk69R5G2u7mqL)+u5FfiIRpRw$=kVU>=x+(As1>Z3UZryOW(R`Fi6j%}b@$0(jfu-k z*9eC^Z(wYqT$X{bf@DhQ1YPTG5XmAo=Mtjy*d-j(eX6+y?_j?+_@cG;U_7WF8v%_1&;v#f)8x; zrPM2o-^#IDg`sCT-5RLcpKD*(glD{Kb#Fty2mVkldEOtr7c_fWPG5Z*s;xUz#vxnb zl$7qo{!Hu-LzIDuRf#rrqzse&4T%c!LlC@zPZfMP@(=KVor|pW2|gM4c*hkm^S<s^j6q9*JVn+d?-N}zbpWwqW zPyKo|pNx}=a{E){|ul4elwVSP(IGP3950;!0$vrY)jJF z3LVC$3jP4{_Rz{a#BEQy2}p=gIuxu&Uu!AX(WrI&S9~&bR^!8$z!mVhP}WH0=lm|mRWPwP zm-?lTLAsw{dcO7t9P_W`M11Bm#?qUA8zjB%gz8hSHZsx;LYcB=ghl%?U zW?;tE{OAOK!{rrl^r;ujc$t0`%y^rAKltB+9CwoWPjVtYr4;-O^ecQa`w4fz74S%x zSHX;#bTktCPjf8&xd_H0uYeCl{xxtFOfCu2)nxxu9cSQk9HVdF$3m|GeSP1j^mPkK zvOlpa(YHqFUj=>;Tmioc{7rB*v7)@kf;RAD;0ijsfqww5^5G~c_n-@ALVv_31Mi36 z6g(AvDY74ftKg%-PlViA@XEyE(IB0b$UlX=0)8!W&z%M5BOeyJvfxVaPr%YwzWfwi z0lx$M6u6pjkTr6KG0|avugDPn^08XLIv2-b0ec0*nUth}4?AJ=GoCwIM{~Ke;7hPR zhPClWbpbh6>X$BC=Yc;{xem9P!=^wtj<5?G)tpy||j({U={yyA8+W@PRqJ^B^w$ zhp@+OPEP*kocyGmocT7Rk;L=k0R6y7P2<}ElZodcq2PeMzysh+y!&SPk?R-nw|<#u zP+sd-nyYJh;C7D9$*1PD>I!WZ*{EO=xy8ou|e1!2mRJknw zISyUS$;a`~qro1xLy5ED44o6eqiCZOEFXkdgHIJ^(K!>m%JDkz8pmgW;Z5gk@VSo9 z0e3k@cIbtU&jW9C%yr<<*Ma5RdSViDrh=TVn0yQ0MbP(J`KM=sL-p%0=1=U`VY?A$ z@fmgZ^5hy1j(^V%(NK2m-Glr|bN(>yneNx+19da4*BWDv>={nNlK+`@mGiB9*MViG zJOP~YQJypZRDTg;Q`Y{GW0B@Bz8anh);^nK(OmE($K+WVJspb}$1>|wICA->@sw{2 z({_2Nen?&}$YXxWpJO?xTv+SA>ToR5SPQE!XKf00q@%GLZw=XZI1Ixsy3Xa;TJ%<7 zI)ICd-bGz4xzw%A9oR)Sr>IEW7TKHv?*!gPq5c*G%le`(2xE|#noa%v4c3T7Uq|k~ zE&eoy`6oN}y*?k8#a1`SEgyVb7T-^Iryx)HXk1j6&*Cp5pDG#n8=MqVj>X?WzGuMl zA&!w%!Zp>ct&OnxxaD;gYo8^bIi?)U$^ECc$r2l5I!D(f()GQ(SCSlkI7yGJJWXmr zw@j`q*^h>mQx8Z^$*e$^90)y`maBm;lS}ln_FSQh3 zXPeMY<&J=tyh?$lY;T?MfgzE;{+3dCAIYe@7CZ-#fzJl-2d?rlWl;{=x7gFSCF|Yh zzA8{aA>`eXi=fiuSaK;R;fzl`JLv!Y{ty{&OSQ}Mbf&Eec)meS-beX!Eo^&`FS)^S ziu|2m_iagKQy3S9Z3#JE-rw~<0@WIrBHt!V$20I395W_Mly?^r(8bURj*mnB5H>Z4 zxU%GN==cz?@Ou#KsI8w$PG?A=B~J?@9|e9!80RfMh5VX9`5&o_YIy;A77Wg;ImhT( z*i=MSwjH-Z$MfN^zk?&3GBvGDaE8w3ppth|U;6;z3U*%axEK9<9aq7ha@-Fd7BFrk z$NZDylxyL89A{kCo^{+wTjX2$Al$A{SK~oj<&?S_2jPsaALzIf`5lfc=s)bZm;2d1 z$5nJRKC0Uf{&c`f?PKt{%zSM~!HkdLUhsE=PNU>cI?lirZ-%N98AtgLa0P#U0>hHn zSViYgVA^Ve9IHlzQz%E7YhY7`oMAC}1>6Q^U2EzEPXS}6sS3@{18gWi%xFEi%2cQz zlRn2Xx63b)Oh?(y4cy!1bw+)Ql9yT$C)YVu{9qm(OMaIR#NI;Bqr<#h>U?6k{cd70 z{0G%BiWcd-qn%gU!)?kBAENwHZ|BG%%0KDlM{i$l>*);sJmL4e{1~>T&k>Yn^vL)8 zcTpyO7}>k|RQQu{D%ZTN@|^Qr$NlUdWFNW08NVM<%Fus&lXXirkNZ7^+J|r)uajbu zf^{tt&cGVCk$gJAdqA}Zs8G!H&u|sF*7ni7e}o)=fMb~B<%8tdI)1t12JkxJ#-v2| zj$eJmze{Xb$R`m>(b++`H-v%{Z^k}S9LwsyEkaVdOcI?LGV$ipB9>W!!jsi zun)xMx^OExS|iclCk3AcUJK5^8^ITXE8xo=_k!Q-xC*|>aXMsV0v_qO7o0k- zf>{?#ryrbwiS2%n$}x^BU}VzikbJe{6nu{34BX?m6YMeRsDM2t9lc=I{_?fhse<7Z z^SKr9onZPocMJHF!sTQx*N@75!lh*HEk0i!YD(r}qoOg!pSitYVp6^iTm}C^`sHL^ z3jU*TDVaAlNiNr=Sa?tH1jl=UHQA%6ciZzg!9>!5pir(*{4a`jzx z8H~%j9TT|AFBaxmKEb;DDq(E0&RniINS}5tf4gI@JD1_uKllPLV?no)D>pgLz;6T7?!pRK<1X9_ z{)FQy__L1t!S@Gz5ah~79H-!Z#~IlBeo!a)S>kg&e+st3UuP=?e2DP4#J+=gsPJBn zt6(|!g#Ag_WYf!n{G}Q4lc-CMzO(|SALX0DRq#atA0~WtVm343w}Q*uS3x%2%87n0 zO2KzK&cL5?+zIx6EvkUY1*_W&?g!J}qAK`TVC*c0Z2F7il&|14g4>a2;IWQ7!Fz)7 zVQ~elF~#oUUa*{E-Yvchyb??vF2=8`&H&4P6}-W5KbX8Nza#wK;i|WQZvj`3zYEOT za|Eha-4XPU$iN?U+zI|Pm~lFy0w#yc)H$LGemKaFgk1F)@?QR&ZU%k|ycJvlYwT_V zSNZ?rOW+>+Q@hF49vUncJa)35Sn=v2V{ z;Maqz;HSaJrYGS4;KVvIJ>xapFyvQ&Dcj5#mg&y)Dp-nRnvxli&HD?Nk{M0lC61fH zuXc=$&F2a8oVW%22FI=7H#)}lX2#HTQt$^H&jf3XMp3r|{1tq-8k~XEm(BK7k7V-? z(IG}NE69K2xR*b@-hj?E$gAL1oS)3@{wd#ca1GLN%|vuolcE`T3IgV1dj-4?dl%+d zdoOrCa(rkf53gZ;FRuYt!AB#!Kt&UFwlj9~|4!+YdYu+^;oFOMq z%8UzP=fDU@!)tVQ{MsB|h5mt!q3CAtQ9#<)aV_IeK8x|Ff?tWwE(nRI?M>UI!96$g zM*buzIwzs8_-5c!pwg*;88>UX7kmy_F{*;sLm$E(BtCmD(N~S5a-h^oXogY97?)D^ zoFl-az>M#;8^QFql%exeF#Ro6ps|mg=xoMk85e#NC1-=}XbQKRoyhUMET1^8WlYQG zJAN`)gAJ*OlKF;f^7u_>@*omE3P152vZJ4AS2kwbuS-&!>^&^zi*`&HnGT_TFo+{a$;2`$IfTo9Uj%ud2~~->;ZE1^@qI z?hd%oM%eZx-9`MOd_@m`WA4-7xo++=&OHZlgy$^Weh-3r^Lr491@Iq7JVnHR5$;(C z$FTVS+y~$f8lUIB2kw%&KZIYz%jY??kz`rj5{SA&Sbu*9DgMq5wc~(D9{xvt7Cqo6 zw0TMdEJywic%J(t!ZyQi6#5MFH3YxUfPEfmp-$4x=%2$cV3#-2V$nodmv!Yb8;?=w=o_gfdIc!QBBM!l1e$-yeFL<+l~?9Tt}S-R1^= zbar#z(qz(bwTeG&7*e}nUsrlp>U{}Ir9TH}E`Q*|fbNB6D#;~Lueml5JG>P6%~ zAwK-$lHbX{#-9}*{&C6g=3jIs8k|7ZqVh|f`FOvFZsS`^H=Z&55#6$^ZUF}EL+`*Z z+YX)&QD(YPMjx{Np5^wTlKGQ=4sMh=%gWzR;yU2}tohHw&GcF33vhqT+^iGsgKC>% zoSz1?e0=D$=1#%?=ir`21MYzP|Dqcun}+*0=*It1{C5rfqt8+NAUn+NhyD=pvwl&| zKf^C+cs}%X{INRqp;yd33jc4Idmh&VGD_CL1-Lm@U@hp*!F?~?km0A{-e7L_!=Vg} zn=&Ad=l%Vukm31V$l?p+g?l#`m|w{E`~mz*d7+(km^+2vAkO6TfIiNCdE}NH;QqB%l9P z^GAHoQ~yN>bQ2Fh$<8>RyHo2u?4z1aG!u1@gzF%%Y3sOpsNpm7k+*W zcKsg*MtmPeStsHC@N=weaQ_7Vf?vuHS65P3;Pd=R^l?nvhx^!HSUBq3^p})=haEQ_ zl*uPw&rBfhpT-}lgR1A`&(hcU&_+IakMW&XzEARnBRolu+he#ZhM%d$kvta|i=|UU zIy1)if5IE`eDY*%xKEy~#WnT5A7Q8)Wc!o9Y^nvgS!^!zLhQt3)e$~SL z-@u{|6bzo^uk)|f!F}NSZ^rj)@DBNYBjD=--=7=bmtuUE0={nt-(MNuZ^rn(9Pr%@ zzP~rV-;VM9PQZsM^2vWOzOTmk{%e(w-}?IGza!Jk;(x{u>$+AMcNTFtv=^SiJnQQS z+%lz0uIyu8n7?7C&1@rq3w}(yCA!31)qJ3EBUgg*5SJ!dkKN{b)8}PjW*BP%czJI&{-*)i% z{)h7|JfGT*$)VL%`Ia3$QJ%>!(t!tcBKcp`Dem72++PXYzZ9Z_lR&19z_K-nymg{!}S&KO4BetLjdQp+h76 z_J6DTZ{J;Yf9j)w`?G=jLg4;V)%~Xbqw46O8e{YcYm(~P4Z>+lS+Z?zb3*3Po?hD%2eeVo-j#k|-1@XLfDDV&3&P)5M z{%`xvs{5so{~dw+_{tw(KTyrEI2>=Evd7j(^j+ z3ICg}xNK!AT`UzUg-juDyt%@RE0;3q>9LHPC|Bf{I9C*iAEIKRR1qwWN@;ASOjk9s zTxPnM7IDOtDNJNtZZe&lnaIw$S-j?%PV?{VOgVRSCOeTTPcg>XcKWwhy)#o|B{#cu z3%&6-Vy6NmJJVTYG-G))fQ;dj_s6o8vKv2DffmY@F~~Mum?@+yx#?^ggzU^jt;b}3 z>?piTrI`XUmMOa8u}XTPR477;GvzeYSSXq5N|lP6$yQDlO2^XK6Hs*-TAhn|x!E1t zC^h~@Qs)aZM^pS$jU2M(#~>rihyoaZ#FeZElp>uOn=NMpQ4kPCN%on{DR(S;%AFX? z&$9H37S-6%qowRoFicqat5SScm@8i+0ymWx)d*uLyQBHS_*g!jDa_7PvL&cHm&b1! z$qIjVGB=mbXJ=fln4TD`jCs0<1P)ZBL6q+>%fW-5nF;TF*gLkca#cAEkkgzGd&d^X z`lb=mMIQb>EPuB+*18e420FS}`^GXA*6m8BxK}jI3N5Vzz7Vmmf8S8g@UZLApM#x; z)7^bTJzXPx1N}Zb7KLSA5rTGhb@dF6xS^gW!R{DxZ{LBDo}qN7gdS#^Rwj`P!S?p` zr}rM{+~jRDUC#`ugTW&{Mj8Re7XgzR^~0gP zX0lp!)1|SK>0^_nY&Km!HjZXdarhmZm?*h?Au}c_D`m&Z&|F%J)&ir0Ifa^%%}y69 zS}$A?6%v7^)u5J1pyB;{()|M?>F%L{!NH#H^pTzsN58It{^3Ihd%6XXm>%ls93JQw z=HbDfuD-o}b*bs$fv)`)Nf>T$|44ddU?4qwu=BuyC9#fl4W_%0;1YtyW(Br{dZ>Gu zVEsKKR>qWJNw|TL-kzb}fnm*SC}!U>5yHQ(zqfm6NmL*V4Qg6TXovdyx;jzDDDwkb z(gzP67!kaL@9^HCgXzJ61ASdbme3G?|G?nTz{tQ7lIdos>*+m*_U>ImmO$x)Jxi5J zZ6EC3j&k2SaHxN285{jjf6rl*^hlj@HjcfWeFqK=^(;x+B-?|^eP93$2o)++GDEXv z4cDc-y=Q3PnZEw+foJNH2M%l2^u8M;x}y#oh(hW0HLm=X=`?b^9@%hvQ@ z=QH&M>+65I^FUvBdbsDQLp}Xn%f!+%T&MOk{wVm7{<^Yv9T*s{ALm|FqJhDl`oX9V z=(%o`Y;ZTCv-3IBKit!g4q#c8?j1-YzlVC#gF~nhb<-IDPu+%w+B%te|wf{ zb(Z`+NHK^`^VfGmZ4HDlU;t8Aw_-v}A4X zp*nq%2JY_Ztk*!6llLhI+f`o|U4w_x%e9EZ-Ag;EkuFpj9qr;;In>|V+24&!p-k%b z4D7+{cBdfh8%8hOv$Rz*kwaYrgGbVPJJD4wQD%3~)9HFWApKDU-Anr}sx>`?Oe~43 zAMJ2yzl7M+BdBCuOZZ1R(PPMiXUx&MIP^B0Xi0J(wQpJJ^E;Q=f%xdT-~_fk?XKzW$MsrOkki z2vx5;-E(-PUI_}lUute$(E@reW&#KL4)!e-E?{C(T?5@q`-&LPz#wOD^|T9lRi6Ig z^zwr>rgBSkG<;~!@R8y4!Or3RPF5#lB~06^GbouJ6$@CPY{pz@bCs}Z(`Nh|FHUZb zRu-G49QaZZPiwhSn$1*F%g)bI>zUfR)O%eDPi`{RI#EcM$EJ(<999;OKb~syv~+rW z7IXC6OnN#yUCvfoAA~d;Qf(Vj<=lI+g~?XHb?Y8;ZI3=IaX~IF-o~?>xJ?+hXH}`n z#}Mb_5g#$9@5GJ_=zmI7P{$eI8vN%@_cD9P{f+3ai<>&jvg zLSt5$!HNiLl)~gBC*;U4Jc`o%Ns7Qpl?`)R&a#&7bArml{#%H)r$mhz)Kt3&C26WDH zPq+#FpDt%gW0lNQ?V`?=#+5&vhO0PM!aN$2;(Vb*N-1}AszOITJBg?ff0|iAB6LbR zg}8L7o$*l#oXF;LC$c3h7_kJ$k|;e^fjIz6)H2E`1Mv`3CO=j# z=O$0NQg%9nXcw5&43?sP(W*v={I>MeSb55oPwFpL z{*$m0V68k>EYD)q%%z~9G^-_}*C?@K#VQ6_EN7%-)PzwyQDy?gY^id}%}l1HfjSc| zJ$Vwggt5}G$RoeC>L=QV4F(FDEET4sEF%`la#l4kk%#t+j6N;K#i*@Z-cp_Mr&P>ELE|_MuiufMd;MJPg2>9RSMI&45((=V1I}^4A{F` zUtBI^j-}R@kEI?@LCu@W$Jl3X6OQc6>@+>Y1#e2%z2MePQsHC5&;HuTyHoMunTzCT z-RpaC$}!sxQDDVElx-RheHmBz82Kn5l!%O@eW0bGHl#~)X}0somCj9S<*E8)3Wa02 zELqTt9^4Y5*&}fGIq(!hKkOL*_h@|avJC(yw#uQ8r-~HCe zl*P;lJe-`M=VZ4JM9y>u?Vy6;2RH+fS&O22h$=^a{6IiWh2)^7F~c#hS+PAK1Es9b zGKTF*^d#f6plM{J^-3RuYA}Yf zVp1}FLn%#2R&hMe%SB%K%KQIL7H*)k*)VSXG%guwu`IWNQ)` z?zzy-JrQ`b1T415DX|d=2Szygm1bcxRSic;v_Ph+(^w3`m?jqsn8cx@NmIeHRf0i! zhX^c=S?rAn!bXoexD%u<2&`lBj%^4^VSF3HA_CLpEarORT{(#XOJ&qXgZbKQK3}ep zsp`R)ax)kqW$uc}1wE*%lcrh3M>vD|zL0#+nMt2S-NTd&b5og(qbN_Zk5^V{Zqj>D zT)9&x)CQH+PY@I8zxUgZyiNp9QZfIHCKyf;DMWWe&=2 zY?~X~!A5`v%po|7DN$}>>lTniynJplD_wtP61_4;zchC+ku7wjnBHS{6=0#GOGbwu za@6<;57Nh~Sk%FhpfEF0*2wA+Bm~Emx*_TjVv^669OGp+xX4zA@aQxS^SnHUcPS(; z3OM>{?v1Xq&8b3u!seOc)GElom<+k`0(Pv^d-zwPu3%|s3p%S_rR;H3P#saBxvVP` zx#}CsJ0v=m$JAsDe^Ixy`h$50k;KNDUCg;?YQmN$6-mh)BmBygZa+|AxXg0^i57ww zO($TO;QAYl3zQu8-8?tiAs0K^Zn<_Z`7uI_1_ca0nAW31)E#4tjb{AdD2e2`vXW_^ zMxCC-lxi0B40Y+~BrE~$7^)V2Fd92`adN=nzc6bP0CmIW!euw>a;{)I*~|un#j*^g z$UoYz<|eT|9xFTcDIAmdi%}PidwdK<&5F+5c1&og7wLawsxPkD8SF=^C+0w47vG{7 z9as_6RW4>TXhIoOqIWCD+yu5yEpwFdgpCQ7Z*I|(1-t)aGHap7Q5`m<$>^F^ZBZ4> zEV9`NtS9L!>q*1W`Z7vt>SQ(9lVjNUhV{vdAxqi_ffL4rDixIxo9mvJ^L5ySDmm1S z$P)umdK%JjE-SylfuN#tmda6VPIHber6#&9W3dW*4Al(hSP2I`N@Bqy1WF;(e>zjq*t{qX3vh?26W8G?6vk2%eytbl zEW|m=qD76KEl){T$=*%c%!Djoqy{6msM27ImU;x3%xIk<0Tl}GGm-M%+U;!F7e2K3~6bZ>NzeGnHAq%m8-d{dL5?; z&3P;(3t;C+qzIY+@!9;b<8H24Mm|u@R8TXCMUoko7Lgja5j5}CjJ1r`dH`m)z*fKSeGI1x-6=i4C5>c7r5);iE%E1UzLC4R2f%8_F z7%^875oS?SDzh;Ap$WlSCkn0glL5dA%lZSXxyMikU~iu}rlrc}=?CAa%7mVDqtv;~ zz)C;j^>*GlCQwf2x3bTXN+5+M{hIb@B8KncG4wK5;N_?^&j3B_E!uEmrNl35p zAxCbqP|FZ_fQ}&n29+9)S}$m0Y+QF%qI8z}!N-rsAy>=~T($DVJODBBmMY_zKU77k z=LN001k5WJ&luIIAG&JS7*P=Yq!L$MocEL^yg2J zW2CvY_j1Py4yMKDjS?g6k(4u*@x0K-Nb(1Ls^Pk4Xil zzu0Y=wGmj{t~hn7jP4KB0$)bh09(g*URnb5MZ?2qFNQ2G+`0cA^9@Se%;4 z0A#RjHZ8l&)ISU@PUg19)aX9pj-ggs2o?#}7ueYLabP_mvjt;73r^3;yf&lSxK_7CaOSuYhJBC>wTn%X8@-3QV<57f`|V0}1@=gb4gZ1%AB8&qO? zj~qWVzeD}t=q3}zKot}jq(WNc)c47;W7*lF8>0l+*~V%V%UtMk0s&-RlH*ozmTdw4 zWzV8urhu7xtrt&d2R_(aDNe)WP|BY|`Y`I~8D!~&xwmbt$y+_A3bU|+m`vr_ag?aG zDXekQcK93fvV0cJ0ju@nFknE=X_Ony1!$`ga8mY;_5@}h?N0M=?T~&^Dx@j_?#lZSkal%7;74Wk?O`==J9}Fj)OTrcHTTQ03jNJ4Af+_~d zNuxx-%&ZRA>0BW*Zpn&#(~x6A&5e^*Me^uOyG1v|n z?CX}dZSWhpNA?9^Fa*N^fyc=to3m`4?kMAwi0Q(z1Nj#`Sr!t_5UD0E&UzdK1cp zl@TPMZJ!khg>D@%1|^sht?%U_El{+1NQ>c|9BqNUH#)i#usEUd`T_%-o~9=Q(6EB) zOS>;jM@;MlMtbaSV#?tmdAY!4kfW#&bV$9Xhc3RYuRRHo*-ebGhG|yGqlXeuC&tE2 ze)v*7lVh0WPEMpVFqFVca|H*7lHyT{1`Vh~5|#H+(h$+At#^Q{G|rZZo}V*r^!R9< zjMXN2T$E!)&h1lLQ&7~jcOikJGTkCuu#ejIs6V;F6C`|=ociOWAUYyA+m5q-tvG+O zejQE^ZrmLlGQl|*+iu12KZy!|N?F*SXYBN%KjhfTb1;ZfW7boM&?T%ga{6!!h1$AZ z;>b;G@~?>LX&X6X$dgT)#?mv5xry*dV{U>c8YvC7Ia9g~z*93DQql=k576-NOif~7 zNKbJ+a496x3x=BaTUVY8YvKNYS2y+0pY9}l*zM~ z8+WrVr5;L&yLy~T&tz`dDseB(u*6xrhC z=rYAq(80#tJWzw=H>42BCXHc3s%NA(-Fcv=-_%(*AYTIv0&1EyL7^zg29rVT(kA() zAqs+8o2eA0%B`9#+eE=q)6$6td0d3TTMmOP6<-EBC4m&4csb;XQ8DC-K`G>l%M}6| zi@egug2|SARbt#V`A6T{`XFPk76TG6*_Uv8HB(y*lq+t-?q47p;j4ybA>A;vQX)}&3M#T!S<^5PTItz}D9TRxeCF+SDm74FY2E^Rc};|yC3 zpq8O_r#l=|iWicEu?oik)$SB%xl)3Wx8~3*lTmi;sY}-=9VO zKkPCmOAe9*XB4wW^mrI*MLv@$UQlIhf<C)<>DYVrl0%Sb~T;*B#!h8yOIFH7&J&R-M;Ia7~iCNb68Ry%?&ElHnDHG7s4a_Ey#hc3ay4=YM; zI#a?l4x^%o)y+tlB9{Z7kWt(QYl6N#q$>!81`4PWr0~a>xCC)}aYv8ETX!6mEJq%J? z!r>fMsQ)45i!^GBS7xdfKkR+b8A?qQ&pQfE!fqjFSaD^7o^EB!t*45qO*DTluY;*g z)y(TDc}&mjjVXc~BlMW>CP8*#l?Did?O$*;xAn`~PV z-X~jJDa}BNyOAk5M2^8Jg@iCRA=eK~ymRKh4UEc?VkEVtfOQ30b9fhsWviav>jht` z%!i;&EsGoTt75_JHG4CGXynE0pBaX!4%P*TtLx|25ZbSQ<@Ys&lT|yb0$1ek50GqfO}v39-zIs$$E{QhkdO+pvHrFfxR*bGb~>os(R>IUHXmten2}ZzP-ub zfp!QGL%7^Ko+ts@Td?Dl;VW%89>_VHNSP_Z$beLl%40ZJ2kRA1dEjNSqiL9*w!jMc zFs_b4wHilNI;JK)VG-k==P%+tJFvs#hS%tLAOmM%xn6V?zN*Cy9JEXumf_$YocPdz zev-Dv%oMDe;1U`PXn4b|l&lD^rvo_@vT~x&34=#8xMhOU!FriTy0JesfRhTw6dSzfLg@55c^%cN~(2T>VcGhb9!Ag@Zj}7 zwMCx5LD_Yg?edka&wS%mwfv3jGsbi7VUW>bS zvJp*?Uei*M*$bD86gb)?V|~-5rB))ApD#d^jScd{sMXXYeAc!4yrniuHUl+itrX^6 z)uGi#=at1OfPzaG@k)p+6f|WiOSGpbdA718R_Ge_2nXkD(%v9oIEPaW3JWyAmI{Wd zgi6-Y_*rIc$F_Ay^x$?<#8I!ilw@6JcyoUvM~$dwxbV zXdNjuq=`{NM_+rh8fI!aykLUv$#?kRDN4)5zB*evHhZLF3c9Xk;|RuW8yjwvVl-0t z^khpqwO)QAqKymME6HfFP5x>}!urgO{-?)i_kMUo3cWELI0N*|z|j75&(pXZXE=>( zK+^krj#SHD4@fSr47u!qbA{z52Rf^$nMX{cyQxkxG%x1in~O}cb?+7fHH|{7giF>v zvFqR@Q$drwHx`YLoz=p}SF?hs7vF{y`ioiYMpYLNHN~Lkad@WMFc2S!X;#EA~k8$uEC{?ty)KXnP^lsg8Ou9_9&N1S}ht2phG|{ zqs*K*>S)#A4lBnd*%6FaUEw9gGh=I$%+)Q!B`TL7Rk{U?W%WUsGT2A~cVOkyIfZRh zs#LO18KmjwA>3%CG`{7i<*&X(wLB)3-#yUX+OU1a<-M5=y-?U#a+V&uikpu)+z5L0 zxVx@yXPmjQY2 z8ks` zNtt%Mip|$shO1W3xO!hbXaB)EX2vvIY;Lj!#W2QudcSqUDwat>6Xxuu)}F*ATxeZo z%6!nd;*~~J!&>%@yM6y4=OX0>->S$A80UYDWL3th^XeF6i(SZP)9#YBYsT=N3lTHiV@BvDQMjZ+`fTJC=vz{Meb@Vv9&5M5og0V1{G53>dCo?k}a7X-*pmM9(Tx1 z*ybS1o-*Id01&O!n*@oY6H)7d)}?kwhPcS8#NfIR00%OR!zQWq-b_RL7UJ0Q7z?Dk ze>lZWlOyR1Q)RvTP8x;(Dr?Q)&yCZHk1kfJ9V22SY-g-qtg;fP=CMi&W~lXNYB8m#)$T!- zQ2HsodR@Nj)obEb>o`P?9m=rotL zqGjV?JS3-d1ABZxa6$fbczxANNP+tbs|A@6~xK4Z*}vR5mG=SLDl5KUPZ=W^NGG^Wik4ezM;eEbm(N2BY zDQ;^meVtd}tDGgMo$}UBW;X6tqh~pFQ>^ZYO#3O}G8yh{fyW_EmWIRZnc`g7V4 zjf7E|EFlDYHLQpUX(h(3VbmXzKTt7tAGmPen;; zdLzFKfC}F0NcGPq?K-MkYP$A_$T!h&VY(9Zd26dzy^R{HAx!n=+9^51+~d`;{KU-LDp*g6WH>>M1vU0?WM*q( zZ%TFZVhXRI`(lFK{92Ex`$uzP6GOlA9w&NZ6?cU6JYB82L*3%w3f{v*FUZb+srqce zSX*Q_n9+r^LO;9v-Nd~-6rUUu4+>jdk}>|#a1N}y3JV}xTuXwO*rR0lOA zhe`ZKOT1RWgEBqSP!8s+;7}A|i`GutUD=^1tr> zcuh3Xwz;DO+6C7)AaIM__Q_*01d=R`AQIsQ$M|FR2szPl19t6FprrLYzZWg!{8$o< zL=*>89l5ZX4HmO=8>4V6!1FWX%ghSe9t+F`VPs}6sA)0s1{Mb5aPO4ka;#R?gyDNw z;6cOJtM7PHS6`jLs}B#Q*6W&iLu!3>F<|q4%@j9D@zsJ#YJF)oFUL}rA=jVWTO_E@ z@x5|J7b=KKrQgQg`h78)#AtD~&rR7U#p;KsE@^RU9NDWgD?q_$>>)3gu#TnUA8%Bt zA4!~>p(wV=jc#iY$Hj)Zln{fSobHt@8L5Xyq-0qUiDQ(9HNdmaIxkB2tCAb*mL4|kw$BoCulUZ(e@vU!|tu8LJta1^a> zJ-U(1%fyBKmYO1#DRfXNK(8rURM&dhV(eDsG0HAi<&9LQ${Xcos=Tq@Y?ZGO$4Ur( zu__;>zm&tVsOmkcmq?sg$a-2@Qmi$?)fTIfsH>YR)|$AiSmCD`wPKBMRIJrx>Ioba zD-F^0x-t6KDNuUfOlgwuW{a{aLQK~jVzo4uumruEt5=Bhs|Jm_yhu?sF-PRr7HiZU z){iwJ`&jFh#FDC0&1r4Dq5sWP8f&f9Xl}a7SPJW)^<`DMTT>%&-mEuDS_3o z{xE!l6@FOi)lLw54|~NBf1}35pmlXXWaTna%Rg<&5#S3|V@B)PZ4=dsyK6k9os_xR+zeIT3f z`eokdr+uNoQSjo!CQYum1+Gw7^6(L{mQ99r|5cN*p(Z-aj>6%5FZ%dwJlSgD)aXL8 zQm;>dacb?9`d*@xhE6g>F+Xm7(PU)#rm1Bo(7s{v(-{0q z*3t;rW8Ca2fa3XSyN0QkYb}scmY2!#il3#+Zsi!uWU_b-NH}okPxNv+4sJf;m=yI; znc^bl7^S2WTkk%}py!S;;WZgD;rypwak4xo-nm~UI*Ws1#Dd>Ygk@q|J{`RIud?b* zFR`?60l+db;`V@LD7agIl=^RM#TMu6GV3$??P^}<&Xc2cPojGXSWx#??ZuJz4bHL3 z`BCot;^rM&Q=8!fcYdItakWTl_wH2pFg`jv+}pQzWL*l|qD1^EqE;y;8!a)>rPVP- z`#5D&Qnz~7xAG#u3b#D*O)yw7<#rA1x$RDElfO4kv;LB|CULn4LO*aX^FF(gFHgMQa<*Qe?t_yEm;oOLPNl;$DBJiIL!3SS*2*}$cmbKY=Azw)w9GQUtrAks_bW<2&p8dVpczi!syFoUK_@nG9UzA53Qj5^=1bca=PE)e;*8u5Bn4a5bXrDFgtQ zT`56sywnrw_~4^lr2<7w;uS{ROwW7n@#Y||$BaDq2`l};89G~0CtkD89<4HDaPK7Y zJjoA4;(D;LqqxtmGKJIId3?bG<&6v+Md6|-an&R4dz~zaKiytYW4JhzmxAEG-4h0f zmq2_y;HNZdJ@5fZAB;LdULa=%`i}BO?M(4veAXw-+<3b)Ordy9p5P=j1q%Q_aS0?} zl(M2B*3U({GG7J71)6fVzyD=aTv}(rjQiJde+}RA)9Px)BUP6HLz+k{ zH`z&W-iIRZDP%Lyf;66ES?G=*ebfsGc$Fw@5soz%ev}ichu-YM`&a{KqsJ$B7>eCCsxdDtmX>87;8H)tgTp~-DFmy<{Aj1V z35f(AW}^^8rTTDhdlt3qV{1U@hUWo7ZEGj|c>vnROeFCE0r?=>-FHGY@2SqedSDwaKxgZfBba(B(E#=;As>1Y#YO7H_tO*(uMqM^Mq!2`BKIsh!x|tx zjj-Xvzx=qU_MwaeionHm7=D!Oua@*EyeQfGVS39t!tTd=ae2d=Oi?o8Rn53X>^}@C zRNmW)u~k`Fl4Q^EYFJ8y3O0^wr6VQc-DmmgEaw`+z#1kF=mWQF2Vrci7cY5@sirib zE?!ir<<1;9?3QLL{J<2ZHu_~ceDg$ZedBnh-LeoRujG>V^T|yk1V6wb?5Ld##R8J= zc(Cnb`GvON-r9C9%MrQl`M z*o!@8uhlaVy)4`}j}pF?h_`^{i$!jd-w&2~Aiv*>3P4HaB|DT3PFwO$bbgOPRTp)s zECz%voWw_Ea5cbW9umvPSNUo`J}RR$?izpYq^;dP`;~8Abd5V0ufp_C-GZ%jQz=k6$0&)h9k4p<_ zo14w&)S0G>DrMD1PllH;vl8<(zHlwfn)UqHIKJ|O8H)5P`GT9up``UDS?P|o|3UY4 zT$&kfSc6N;UVA8Zh={Pj+pqDRv}^Vm~ z??!20w!s^igj3gIoPf}aQR!&NiJQ!x{d__vZ&}Q4INnKe!So_q;Pf0b<}5l)%%%kK zD^0Bbx`L3|73Si+8XOb(+zdNj`N$5ltM5)bep#BGD=$*u(NA0HU;;=t26U6oW65cy zj7g2TI2M+1`MY&h`hyRFs}SyF8H-qaRGi}!#t8nRmGTGDD^VgcCu5qOIW~ivnB^No z41~LeaY{tgD@zdh5aX15P!``+<4sZ+=_YZdgnsZ3eH8BKXIDjE{_-12`hh^H3A*;N z2*8URq9f`Z8tFUOGlW~raktV)=g>&x<1rN_nHZNlt|ge1jnoGn{;i;Kd$z_}#VAOa zJ$9(s0}}@r#%kXR~IOq(-X} zhoY4_Y+>sPTvfYe_|-3Y)39e4{0irDe-nNArE!viGDu_AEg1liKWI{0B9{LutAwgej zMl4V)E1`sSUMu-lq?30{UYn97$PFap+guG$$$1%4;ccn<2X9^F|_pQDyO$d2tCEyqt@V0`r?0 zoB??!jwE^~$8?!5&?ae|96y7zj56-C8F)IDi*irAy*M6RP_!(?-yegeA@6&~ZPPma zW~x}RVxEl)ne?(1T+*cu+*&Iap?dtV;VrVx;_MuDJYLI3Q$q%GWZ~XwzuR2w)dH^4 zz`gA27tVHRhkBmu!QE|mA;bIVCTHjq?Bw!eW_2zys^=ZQ7pERi{I%XV4`0uAvOPXD zFvu52Lm6nVg1Q)jVjv;KiTktGWK^$Li$UvUZ5@fi--x*BVJ&j2X;B!N=*2uYS@no}Z*`l-u{_;`6R7xGDu@Bi z*E|kEw8_X425UilO-h!7?!;tyTE64M?~h3z0c!~tTwIXFzH(9xhM8Mw7P~RXDnA`n z{dzQ!|AI7#m}$xZZ7#ji$>O^qcOeEwg+Q8 zzt_btZ^6)d;BmezR6@M*+=(KOxWRoF{t{jJtm^x!!y>UpT*-s+AQCQQY^dl@|GLYPR`-vSn8(yofygKB`AH1!rz6Q15aALt!naKXK`$hLgTRzBU!swQ7}W`7RBe87`M z7?GKfe9Ku<)XIxpDYYKKe3DB)CB{(wYqeXc(RY&h86E_e4;x!Z`JAGCXi?m_O%;Jn zwrPG|lWqjd$^Fd_B^;g<|2UUu)c<=|JGZaJxm#a`=St^pdyzzP;28o9|2Va5UyI+jJNHBpzwdIcvmL*q&JAA1?@v1S%vt$9Jm}o?u>g+%|6G8d2L48X55o;zhz>pr zH=f&m0sp`35+sR!fNyVbiHTc)Q%a9;tKn~ViAoc)!MMmj;1bUwzX{Lp>1_d>{Kyaa z?;LZ9`MZH3zu@n7i4U#=9yR>)E^!v=C#DR4-6dW`IVR=||1XzVxDEKM;vJ8>#5v;g zhCdAF1HhLQJ9`qBjsE-2^{xf|N`PMne9iDbIkye+CNC=9`4;CoUJq>X?O=HDCoR66 zyHL(=0>0|p&b&_z81WJBJnfP{in2>y5Abg18o(OSyMR|3ei_vY={K}0_AxfJ1^lFs zDsJD3szr?aFuwM@OP&XAY&ZNPj(E}VAGqWz#Mf%+O&zuLrZb8k zJpxsOzsbTs`U@`k4dCWBr9XBXRFC{OTYevzat)27L;f(o&GW{;2}OZ+-)#ALjJOln z(i4of+Pn}8kNi^qLTCI}4F9HUSPi_w^0Vt6*U$pI(&LFysq)>`ab98P=2?h zH2kg~b`7kLw{$3W_S|Cm+4V=Rp&j+>mO@M41dw^E3VTMtpY&eE)aPl)yJ}EzN5VBc z3wc(VJ}B=8A@8c9(eHB&bEMDJ*56eNN{`YreRTA=#>OG=pSSSD(C;eKhtSbKth!|M zpLC6m@?D9=w-|&6eKiuY`q0<{`B$%0-1*0@aV4?oL-3uzYn8qy;Tl(y-lo{gySm-z zt*+r~1E6;V82Vjp`r1P}+^Y{8{W)}>ia6wKWpK??iv;`o~>R8 zFy%d`xU0!EyiEG}T79p+7}S4^C#x3~cRk?TW%LiLuNvO&+*S0~tFPCFM-httx}I{4 zt@neLGQ8j#Hz53N&=1>FmxaHrBNiSyp#G!qmj5mv9{HE>zmDnDTDa$9;h_(q|E)`` zME$xA`XKJU#U)l_%)ITQ;kUWOo1u@}TWjgJn|$4#-fr~$E|Izy{3dTihrD6@xBK*b z_=Q?}i>2r6Y4PzDU1A#=Ukmap@%@TRv_oGl^8rSFTYUWg-6eLBeo^(+^Pp>N+W>sY zaMm?$d&s%hAq@4|^9c;S>wrfC{`Jn?v9gx`j>EP5clh`${yQ!Q^oJ4tN`Mjn9gBv4 z+ckEhjPGn!ymuvrdg%7f4#OR;aToDmZTP#KVN35WlW*@FrUoG1g)q#|-rsYL2TJ2dJ|6L~k-fz0bLDJ_`e~4C|yXF<|>vV}PBy6s|LTUS^fxpKko+m`Q5k@=X998gd zyTlm^xi&^m#puWv`Fk;MD{lNGRyxqB%?*Kk*m~`}~3CrJsl5^K~0M9Gk*^{vR37!0B zE&R{8#5T&apt$STQ4~z?ywR^XNBwaR+lwYIePw2=W*JBKi`j|L?`r3F{ag^R@Kqvp4V$2s3 zgG~Rl;&=TgUB5Rj#PB)j^2hQ2S6w1?JNVBBnDRhBr0=)o{;Zv#GTcn19sZEi9AyDl+`{57Wx|BFjZLH_2B05g2A;xY6c6Qo=I$96h51${SL{spfE z9#wvbAy4zX#plyMWARO)>tp;2hJQrc`wEZ$(z(;9FDnf@d$|5&e*6O#!Q<;)Vh;7~ z7RW$)gj)?4VE-X~%J2ouUr~Oy6b)m1_ygqcma~d8ThPCtd~R7VdCxiLu{JmIj^(vmY6ZWNV2fk8E=X#3#p1!Kx z@NSp*IOeZh&(zQ@{Dh}p4d`eOtF8qY>8&z(JU`brVSHQ<2%dF`(`}gFUkEVfi?>Y~ zW_r-iZP2IipT|%I{@Z-`e{hKfq<5R8mwk&%{OJ9dKSOck7mWIJyQQDq>k{WS0=F2Q z81i#|F8uGo{0Q-LejZ`Sb9=Ah&toV=8Qwl<_#a*3^Q4akbhNiM7XO6Fw8rw#v4485eAK9BBj ziC3VXHCL2=6zOq1S##Ae+RrzDxn5v?kN&eu-UWGDEPqowU6SjO7L#uZ_NG>>=UPm@ zh;H&t&AH?T=(DA$@kR6n!#|0k8~s(wxfuOokp5cGFW1svXZoDFB4 zlHor@f5P~#DxPjZ`-gttdfo6g^be@7_e1|&56(O)QxA84%J6r)#(CuD{`LTac7Jbx z-wHe$VDiry{(0BQ|Y_anbEe~ABo=NcB8fRP{K_CIzFDe_+qFyn*#q!-?V zsUzqQAU%&c|9;T;ojngOYI+g&;m5I_+2!2ZR$BNkVg7;m-`1ko^S=%Hkof<=xr=v# zUJNkOd)vI?VzYBskpH(Kf8-ZT{<8tz0er#mA?Ln^@U2Kl_`e(TUzA5{D!`k7!4T0g zXK3vWFx;(!0e%YD&ZMpnpaB>)MP?jP%#F8^(UX{hNV10?hEpBjqox zL4V8m4;y~iB|C^m15ExY!{{Hnk)L&@j}kG)`*o&|(hs@h0hHIeGsgcLSPyIiHvN={ z9|yh=&{00?E-5bGh4ljBU%xiM(ARp)pR;FuyU|a$LCL%-{9b0}Od#(h~g&9p(E_D!?e; zhguDfW4!`>KLnXr9>NcOKV@~=)}<9L#AJ$BmWOA82xWtA`f{Ux)xx_ zvtgxTh$YX4wTfpCqCZ6V4ebGj@EbY;jPy5*26zv!$s^(S1D_7)PXn8Lv+Q4?zYXUD zI`p^Ug5j^~d~Cy&06zlyVt|L8+t?Q1cLJNdCmw>`3i{h<@(A7zJg@vGFkj*PZsS6L zNxx_q<@<$)fiDI4QQ)fqhJH4hK2N4x;v)DrnLL8Qzo|p{Pre`XEz;)#O!{fX&Yn$Y zjQ%y3xC(u2T2MUqMxAeMvh)O_K5V*f^j^%r(B3w$RE+kPxQ_O=d9Beu;F6rLZ*DdG z6Ic@yo4j+x7!Nj^{^q{w9M@x;2aVt3!-oF~^9RVcc{ISU1D;ZR>Q0wDgZ^dne1Op( zZ?^m~y$aJmYvGBRzUBAS+hLz!`sXb?F>ENCF9n$N%Q3uY_&u0kF#YQRM*3~0AD@0( zi_!%n{kFBW@wJ)0L>{Ey))Axk#<1zf+0!;^^j~wyD|aHi&kr%=Z@XZ0%qPEwwAwBP z82W6xr1(7#V858@TYlg3glqT;WN*7_bmCipuPOH9`NQC0{~;K3oMsgKo#`E_107IVkvx=QP?dKHx@x1*~fFAd^NqjP*Gy=d6R^G7Yc6T}#QA3Yn;q2EW(DL!fA`J;;g zCco*&kLQoI8lB@i=#RB4Huby5Odlt0Jb!FZ>3%$aY$3p?506>;b2gqoc0uWWJb&zx z@!NR**cHPVKRKR1X7c*+{IRP>|EiAXs3M|&Kfdo;Y1qc|T`d8AopZElaQyJ&`7X_{tbwi#Ce&`Fmb+O{<}g$J-2l(KWTCfO{3cca3Yh zi}b^akN%izT1&iO^lh#wMSRY1!8LKN{rH6d-v@j}(}!5{Jici1*!*EP(wFkE`NQsZ z!#01|-4S4esP9i)4lvd;Ph1Hw!!H{CO|(zOe>K3QUkfne?{EP|{2eO;jQA-N_4h2}gS#VT z_!VqbBmR!o0F&Mp!$^SP-v|2%?TH<|0Ve-o438Q{I#cAI3NZPLF+6YhQP^b2e>%YA zKNG_XhDXppkpEnO$$vhEFB&d6+0X5`6kzgSj^Rba&trdv@m~!v`LEUDPG=Z4K%SH6 zTp3{ScjCY3k91;`8L@vl+Z3bzx))J@JKObq#QXjT{UMk-2Nm=D>pa>|=V8U~Z*}hD zw*nUfjQZI*r1asj?gXAUeveNF80}%tImJ9rOMB~{Ma9GL|H3NZ>$SM6 zMdjc5vTGVedR@ISI^<`5hrjHaSRcBOehoeo<3DTk>#iw}^t+I*@Oyk-@yNrjse<`N z*Tq=)O9377bUVfETd{RX4E-=ZoX>4~miUU{pTbt=T43Z?=)`ZpIii)d@%LDMN4^(Z zm-m3)s{9ddkMVaXo%#P9^4~LR@&B=F;{Jcn=@=dQVEQA!>6$(d`Fa)%|EX*GQWvo0 z|B)%zbdljNE8W@CWBC~&Kk3(%{>Trwrc1zkksrqY$RD^SuK)HT&w@RDDn`Gc_|YS- ziRa<=S$Lf9ZTbV^!%D~b-lnf1u6?75-#_CLl{;^EQ1G_(_j*uDKENJlU@F_v8HB2X6=N z4e)BQFY-}2|<+ustWcjQ@<%KX9jOe&?Hk&&T+YKbGeUzv>b^JiGsL3|~=<^Tdgk zEudeGg})Zi(cc_Eei>ia11@nF%8z3dv40MvjQ+PS(F*www8!WjMt=%x^ZP*`jnSu! zzJNB*_=}2P_-pj#7%vZ;QT+Xoh4$wICVxa{L9s>W22DPm|C-16#CK`f zUwjzv3v2+>;G*HbcF8u3PlJ$G=){QcDacEF=1sca`4rL=yxk=`P~V<9W4HkOKe2^B zgZCwR!TZ#8qZ6Y)9Tv`J<^htR? zI_VOZHv(T#d=K!}CV}S(j+U z{Q0ou@8iVexBPt^_LP-OZ^8J9ncfA(pMX7p=i?7s{yqU)#mmUwVauOj#)|C9suqQF!Fa~(C`uL zACW$4`1@Sqe$tBpW_)vozli=AWpZRbz~nz|_!XDfK)UHqgvFmD3rc5yGKKom-viI2 zQh6@3ALqLAs28pUVVKN#@!|1i$)knhV&<9jj2w?Mvs0pFi# zx&z;ae)VxD{gHz{1)oEIgZk~D&j_RcVTBX?C)l4P9r}szW?crfLXhtVIpbBf{8 z=e*IMcFDEibC(RCb;&m~zAJ`5?~*CtM4MvtqsjbTz`Zd%Xt>)Yi%2gq6<~%(c~PFH z-sh4PlwabE;z3J4Ve$=H`iYB5=X&Eb<1_i5g4}$6CvjQnTwnYsaMJi8xjaeZANmN| zJLX%-Ys&9&1Gp%Ue;QUA9qZH2qkS~2jq#@p|GRS+AWuVEEPQ*6-m7?cFH{Tp8qOF# z>D*>x+hhD4N@xDL{%Iby_-*~ud^$!yWAWMgr`hr^>lv(nnlCEduYZ~s z6+3&HO@6=rS+UaiZGE%C*6h_01~C!}`ea>zgR=Rfm<|>tofZ z#c%7ERVJTb&#W?iNBn1%->+9znf@bu(d4o9%c?6DzpY#XZblqI^K5h4`*E^0;b)-S8u6#Mnd>cQIh{rh8n{jz$lRv)V`#Q05q=Kpgk$g`;Q2%EfK z{?*r&&itQ8{k_fPjWFV1``o?gnmHf5&E$Q0t82d01Kd%Y{%yUAg--q{!yj?YU&Hu) zTTwCIcWl0l{`xlLhw)QCSFrxM?Tpb6y5_HtzM%Pa_S|+^=@Gu7nEA`6z;F5S`1X~x z{I|C#X8vf8yS+7r+Z8|YJFa;i^55Q}_~|dW=F@xhJSye&&+Uf;I{D`mKl5#_`9M|j_7_~D@&xdi0Jj61KHvVA zIFAp$*IW8tpRYH4Kn?P6O3V1XKHrcsZ2EkI&!6e@4Q)yn{lWbPq$&9|eZFDQu<7#+ zvHXAAg3&F%-xkaN+Rs-eUB}Dug^EF)cCwU-*myS>GMsO z0*t)9>55^~=iQdxJNCHbix{u(?ohg)Pv3pmu+69Mo>KgdF||kBT{MjM4Y*%+_vsiu zWB6k@|4aIL#d!ZM@ujuE7Y$>*cM1LD-6k*XL0`d~{mm^F{$-a~1pUnj!}9U;w|IHJ ziSuD-zi*jW`a7{6yOsi8u=p*#w_LFBuqS;3^WV2D8iqaTI`aRP>j6eu_pDU>&Iy;G zJ@y{N$@KkmPn*#{=aQ$fUb?3vz~mn^%=6~R|2-zJ*Y7>^O84uPd(JBUj@QE;h5X#J zpxCeH@<8Czu@2TTZ*~%BHsCa!ze4kf3COz@h4GU?OeCw;aaH%UsUYnX#_9n5pFmB&q{FD zIE7!N_lW7Iv8Z^brPsvxZ^fC2r_;}&x+m{zM^;++5w-HqcMC%4-$kOQJ3L@T-c7efU-9YQuBx9?H+T zcXDl2{?%|3`}kJ3*M?u+5et77{HY*3(j)faZ@2K>m^bjb<1+q0e;&&_;H(>z17mI9 z6uI!U7@kom_d5;14SEuB?ti=b)7B#R`xL``R6|cNUh;c-ui^vSjUV!m?)8;eti`O` zqsi_tF`{&k}-*3vuRE#Zyck*ekI07Dpm$7n44d@THeVz0lBi;CZ8 z<=U~R*thRar`XqzJ-zTieewO*9)u6;PgkpA-yXZB3|s%!brtcgtd?(gTMUmX_Vu;< zTnt|^ek+e|#K-h~f427m=t2Lr4`~Uu{)>B|#6Exfm>#e%-+kABgZ`>_t`_$(KG1!C z)yMJ;@RKRUzCJyP^eCV2FP;QHvDeT3R`3V?6ZZy*ef{5m7I+MMdB22kcm}TnLk3TO zYAS{)FX&!>vgUjW zEq=N~vDeo#i@-tt4?D#^KeX+$fA;yOEkEq94>LU5>t+PRa|Hjx{`g1-@Y;YrsMyC( z+j-c3(pDb!pJJ2V$e7_D_K(abv9%-LUk<#eyxa#Oec(qFCx|gm@a2$P06tLdCz4kT zTf1*4D)w?VTtYmkN4{M)wkh`YDjL_V-8EiAc*yAW($pTqQ;L23P3L3y|8MR7W8^r` z!;asQ_nRkaEzve5(b7nwB}%5LIr2!JB2pU3`oXA-JWhpJY8daF$dkzO#G4$C5{Pnz}jn6MT9~rCWui81)EL)1yKqX*``iS6hyozj6`YabkN8#lQ8inmTWm;?LP0! z`|kV9S2`5|`a*Z_`+44X-XAlwv%ACHQ)%1V^JYx?(~moI4^E#)<{n&Sdmfp4aQeKN zH0x96$y5TH`AWTeoK2r<+T3F^_3pgcAnVbOGxNNwA7|C`s(PH6dszBBn|Zal{Y<~P zy}9S3&ogt6N1un+6wU3gx!m0T+DzK3*9F(+wcYW!_Cj-eSz2TM=AMK;Z_Paj{kq_~ zH?jSoxxIPMug`1q9$%l=X1KM}dTIPYwGtnFe|+6<$g{q@&uJtHP5zZcZ2OIwubp_? z$6IC{HjREs3by@tR=%yD;V{6G2llxpAlwk+1J z*F2w_)RN;f`K}b~e9S#dm&QNk^YIO@`uOO3K0ZC~<72sxZ#d=SQ|21r z#x>`eP3!)dpXZiMb3tX z+iz6!8To>0|FCVp#pH56j9aU0;@bijm-)H0?m9pJhvw_3InS$pCS5C^PyC^jU0n53 zwV(FwTxs-L-@l8Q>umKNLaM5C#O9KXnb)8;-imyD-Fn}T`D}$r;}_(;%r~Xr+V8Kz z)vvMN^>K5}^-1=RdB6I;B=KwC7nFd++iS$_`z2jF`4N$g-;(>9@zTdfRNP$SU3>F< zw!MD*PMgQ0>qqUM3G>?D#>Z1B+cd7`KYqqV zrGL3@yY`c(e0=nxk6)Af__Uhm2DPqpvGM62`FM5R zxACce;p5fy+{VXNeY|>}vGM7>K7Q@%HYnM|7ks>};*)bKo=e%L$^Rw=yFWi#x~QDz zCsh1ZAD>qJ+#LA$Ru`4?{|z6Xdeg@@eAdU!>nfMVf8WQa)czbZ4eYp6YX3}}^YJltyvDBh zcrY*<*J<+qNN)SN%>^Z&FwZ+S-d6J;HP>MqZ=ARP*2k|?^BMVdwV!_?ZA{uE`|VN5 zZ9g~YdH&eP+kMlxPNU|$vh6<-*`Q=oFZ=inZ596$KWHQO9>;QX1KD&37MM z8vmh>e=>1VdEPPC8QXrJ+7IK$eEd$eKgZ3nu2R__%q!YRA1d_wfmH z?RRlCPjipf#X~b^n?}v+Rogz?}eSA!vC$nY_ zHjS$DYG>%8a((-}lx;sd-}3Q^zf|$dK0a=quWkD|HUH7GDz4T$tsYvbQW{6!z%-ShE@ z-%{}lK0dzF$M>lHFlPSE!H&D2t_EZ1C$IW+QT^jv8A2;7sb!qIMs<@i}#8WHRFZuYs(#NNc`uJY8f2QwI@uz+J>2IsJ+7A=kecXHp(xu58 zeSEKutNpq6f@>|;8TI;MpPr98?)zT%?I(ZL_rFi|Goh|a4<@R;Iv*Z*)5j-n_WeAd z`WgGCk3YEIx1WksT+gTD;}31}?I*tB;}4cTK7Po@Bi;Vjef%Nyc$`xE$9xySrEztA zcvPQAWkE!$Ovuz)r_}9Muld7MQ`{jvwvu)?b z6;G@4-+Z@Wn7*mnKWC%2%lm?pQnrt4^VxQn#(&<&&1d#q8Z)m6ZTp`~TvX2g@2dEI zA73{sja~n**70?+zDe``&c>fkeETW0b{l_2#mE1#kAGerm(f4-@n>==+cfrNDY$;@ zxTb1My&f~4Rd;FPt5S09x4Nk8&*MJ+bmrq5cKNvZjJiwX>hWkk3+~f;f4qE9N)rD> zBT!*5ahAKWM@oN#6`LFLt+^xq|OMKlv ziOYXF-BL*WpQ!n`ab&%%Z>afXcB?@8{|(jt8XK4P>%K3YZ?txqv|slhReN(yGs*1F z9V+hjlbkoLTh+KvSFNk#GIEz{KW5`*p4Y4PHf+|v{#Ka}-%kZHpGy+ooxXJ!re@5cB*mf?H^I^jliM#XJ zjVt-sCsf=WXNgb0uHtTe5}$fk#REHliO;L}Rv(}Il8T2uKJmLM?#@r?XZ$S{k9_;_ zM^t>jkB{kj#y&p!f@<%MyYycrv(7(}A9LPFT9?YN|0O@pg_Kpj`EJ0-ANl&~K8o__ z8L7MM>qp;`@}GN~mvke4p&Y>%#DDDTuM6NK;{T!Q&3J#P+>`Qmyv_0&-YPb)$L;#e z-{#cgU2n!${a=4stopwp5MPlWyZsHZlvVp1%>GmD?=V*Dy~Fgcyd@A1tM=9PU7bJGeKxZ%>~(9VFJr-T z^Z$0x?KhhJqvyL-tmZosi`D$h^W2BloAGq}f2bq$Z|3*5?NhRmLVncoug)#y>ho&m z^@y2+$-44O)z2g*C_i@pw=YOp?eCAi-t@6;_5>c6^gtd7UcW_-1O&1V~R zf1fDDs=fIvqi%0Li>Ujr&L=hhf2dEMm;TK0wXaW3n)Sb{Y~E}CK)ICi50%Ze;XP$j z{*m{Krr*C)ZcF*^mCfs;tIAuYY~IJ%`MfwM}6gO?uHBFLQjHzb<8SJ#p?!*<8<@FG=~b zvMCRg-;uI;zOe0Il45m!JI{#C`Rlw%%HLPsB4u+vx%v>^3-5=Yf)jWNJ_WxFza}suGVAsoRmoWLoZ!Cg3q3%CdO;SwG+T|F$-`q#HnAHX3T z!7-e`DV)JwIEM?k2lwF;9yDD&4AlBZ+o%uV5RTv&PT&;I;4YlQ1>A%Ca0w5Z9uw;G zeXI=!a0o|m3@303XK)wJ;R5c#eYk`NO;>Mc)$upq<<i&%C|C%=H12}{uIEE8Ag)_Jd=Wqe{;67Z!gQlz7&uabVyZd^7UmL(79KkW1 zz$u);T{wpexCi&)5*{>tolvj$x;7lZAsoRmoWLoZ!Cg3q3%CdO;SwG+-4^Qgw@pBu z5A6UB;Ruf51Ww@$?!q}-z&*GRm++uz`Im;qdOy;J12}{uIEE8Ag)_Jd=Wqe{;67Z! zgQl++>f?QV8xG(Qj^G$h;1tf_E}X*!+=KgY2@jgSL8#YXea=&z|2G7v58()o;RH_M z4DP}?T);iJ50~(u>5W3Y{*7%ofI~QfV>p3RID@-z4i|6_?!zTKX!@fsh!Z}>PJ-82-@Sy4H-$T^-{;@Xd12}{uIEE8Ag)_Jd=Wqe{;67Z!gQh<& z)W`qhZ8(5KID%t1fm1kxyKoK{a1ZXoB|K<)lTfdJQyUK85RTv&PT&;I;4YlQ1>A%C za0w5ZzEP;xe`6aC;1G`B7*606&fqSb!v)-f`)~;ln!ZV>*MCzR4&V@u;22Ke6wcr- zoWlj&gZpp^51KY_P4)V3Zo>f_!Vw(937oQ7NK7M zEp0e}LpXwCIDu0*gS&7J7jO^m!zDau`c|P{|E+B}fI~QfV>p3RID@-z4i|6_?!zTK zXgUz;^#^U(G*GV(gAk737*606&fqSb!v)-f`)~;ln!Zh_*LPbR4&V@u;22Ke6wcr- zoWlj&gZpp^51PJRsMmjc8xG(Qj^G$h;1tf_E}X*!+=KgY2@jgSL#Wq(M;i{{5RTv& zPT&;I;4YlQ1>A%Ca0w5Z-XhfN-_nKyID{iOh7&l2Gq?-qZ~^z=K3u|srp-r#^!h*9 zh66Z+BRGZ=IE6E~3+HeF_uxKU!h@#GN1^ok?`*>X9KsPC!wHiR z)8?aJdi{5`;Q$Wd2#(sh!Z}>PJ-82-@Sy3fLcRX2Z8(5KID%t1fm1kxyKoK{ za1ZXoB|K>QZlPZP-EBC4LpXwCIDu0*gS&7J7jO^m!zDau`W~TP|2=IufI~QfV>p3R zID@-z4i|6_?!zTKX!=t^z5Y+N;Q$Wd2#(sh!Z}>PJ-82-@Sy2!LcRWNZ8(5K zID%t1fm1kxyKoK{a1ZXoB|K>Q(?Y%ePq*O!4&exn;RH_M4DP}?T);iJ50~(u>Fq+j z{_SlzfI~QfV>p3RID@-z4i|6_?!zTKX!>5EUVruXLDc)*djr&ma0JJ20;g~Wci|i^ z;2zwEOL)+9DAel@+i(Dz4b=V#BRGZ=IE6E~3+HeF_uxKU!h@z~g?c@+Z8(5KID%t1 zfm1kxyKoK{a1ZXoB|K<)hfuG7M;i{{5RTv&PT&;I;4YlQ1>A%Ca0w5Z-YL}U-`R!( zID{iOh7&l2Gq?-qZ~^z=K3u|srssru{c~+NfI~QfV>p3RID@-z4i|6_?!zTKXxjYA z0lofRZ8(5KID%t1fm1kxyKoK{a1ZXoB|K<)w@|NtcN-4i5RTv&PT&;I;4YlQ1>A%C za0w5Z-XqlO-_wQzID{iOh7&l2Gq?-qZ~^z=K3u|srssuv{qt=&fXxQxrlI}(_xyf1 z5v%Xp&8M*We6RZcUG?{kvp3R zID@-z4i|6_?!zTKXnLiR z(+>*u`X6k=0UW{+9K#8m!WrC!bGU$ea33z=LDP{?uRm(T0UW{+9K#8m!WrC!bGU$e za33z=LDLTj_4*%b!vP$^5gfw_oWdF0g>$%odvG5v;X%_63-$URZo>f_!Vw(937owlyT2XF{S za11AK3TJQ^&fx;?!F{-d2TeaJ)a!q=4F_-tM{o=$a0+K|7tY}V?!kSyga=JOCe-VH ztPKZn2uE-XCvXa9@T+3;p3%O4eC&0&Csu!N<*`0|S)9m^+y1?#A8(5neEs7AY+ewn z?=wGs06qvW!KdM~@N4in_&j_Oz64)^uQt7Z2Hp&BgZIJ*;DhiId>TFrzXqR!&%+nt zOYjx=YSTYG18;`6!F%BY@IiP9J`JCRUxUxV=i!U+CHM+_wdp5j;4NbHed{O84PNa7 zV)gGgPaK4o;M4F~_%-+(d>+0CUxKf|SDXGvGw^138@v}j03U>x;M4F~_%-+(d>+0C zUxKf|SDQXC18;`6!F%BY@IiP9J`JCRUxUxV=i!U+CHM+_wdtRkfj7h3;Jxqx_#nIl zpN7xEufgZw^YBIZ5_|=|+Vqn%@Md@$yca$IAB30S)9_jNHTWES9=-@)g0H|=oBr$! zycymGn-{?9e$Qv0f)B&1@XPQk@Ymos;IG5qg5QSUX*wQ-H^N)sIrtIyDflqF3cn1$ z0)GvD1O7VvE%?hpqQoKWbfWz1@1Z_4lp6ZT(&A`>nrj4O%~L{h;-y ztv_$Q)B0ZPYU^X0zR~*q)?@oWfA9+EYR_oHk zPd&EXVDb3EvrCJemDSae!^cl_(&d%a5kY5ZdHLAMw6k#haOY%tcwu$1^W4(%OWQk= zo_k?+Bt5p;IlQu*{=eF-t}Gl{>>OWQeQ9~+SZDD?nahb`C#J{F3(Lokt}d?(gY_1! z|6%yh!qTCWOKu)!z8zWMu-dHdSh}+O?BcyEM>^?=g%?s;mCe$HL&p|neMeUo53N?~ zI8iOujDKQe;rZuR7N3`%58Eo!*Xp5DoukL4;>3w+f2|$@Pj%8m3y6I{=u~I1(Mdyf zAB|Aeg^N5ZgFnBt{OrO~=g{)Wqo=Sp&tli+yk;OyD zs#E9iao#TPmYhYzouIQqPt+QXw$_n$7T zoLH2wJ0Q)dI|$9FJu=Op@7EmO_jRj-!(MgJb?RfCrK5)yk5{v+ACL8EI#QpM9XTbJ zR}PP?xidS@uPmQTj~=eh!up|nuX|)Ko>-L!@6i)S7VB#J0QjKl#Z%^yEekS_1j)X* zw7gJnRGmQS^3mg~_ntWR?7hdHTUnF`&ar1lwx3vCk^UwB;tN&2vMkqu1$X#mg~Kb! z$jRd;Pb?n3Z{)zk56|xD)cIVUhn_Az?+SZdVRm;_n(yqYBVlLfu&OT2cXqglErc#I z-)V9)+4;^_^PTV1`JOtTvy{A!&vo{=!tAiL%Y}Bj!faiVc~%jb z-LS@%WS(_sw=Kx@=IvDWn5oQns%&<5oy)-01SNNcSzp@aVsmvN?AXFwXQzwUf~LRmsX}+`5Rn-MK^sb35$Xcq1?C!c|Sddeoj_h&;S*F{1R!a3FYOXo~ zc6F+JuFiJU*=(K3yzEJGpDWC}!mKatsbX^QhIMsGj){xZ1zD8~*n&J3?Z!J?jV;LB zUBnlBDV(#b*jdd`daiQm&(3&uzADMAhLO4?vvQHTFyEPTfni~`E=jjlm2_+K9d%RL z`EHw8Taw*x+eogTD&6ad?K|z1cT`i{)v5BiI-B+ReRaK@SPUWqgh`HcYN^Vci73#+WkB43bVt~o+>1J+*J(=vvo;! zy=}6y%4KraJL*kjYIQ;OhHYU3vUgn*TQCn3Jhp}bcA z|Byz_KU!9Qc=~;vtxJEtj%|1rb>^O)OBdZ!_|IIfT90h(*7@+a+g<{rTjbXo^?&({ z)R`1izx^+_y+m41$gefVzxlG=#I4_4>r8UnoALjpc&+hYuUdcj_+ODWYmNV&-QJB~ z{Q)(J>G92nuGSj=g57?r&0XfUH=CLdZLTr?X4lS*zsYu>;;olWCpM1r@5=)l|FH2h zX|vY&Zag=B^#`P+y&8Y?b7D8Xdw!6RNnf$sxA^>J?sM7H`aLPQ{_s55&1|E@<}P#X z#p{1re!212+Fp%Ub>Z5ZP5{M~|rOUA@h}8drtZ%>^pdd`1sT z-CFy9YrWmNL~`@^3*^TnH@-P8=8yWQ^_Xg#TL0b_e%3v1T#XvP`hVZ8G5#%Q>J2{E zmE4SfyX`=YzwQr~e{!JO@0qO}X6sUS{(NQ_s|#wrqdzSnoHw?0>nC>mDl^->Woz8{ GoBl7v=7Bc= literal 141768 zcmeFa378yJ)&F1BGnot_B$Jg%NJu3iD+DGRvWQf|7EnnDBmoua05OC$lL-kSEFB1o ztN}rB9|A%|(6|PX*J%Y2cW^~S9S{%&6gNar=KuMgd#bzA^nkwa@BKah|MUFoN!9tD zbI(2Z+;h*pb*s9kbKxOxJG4-UqskFSe+TCTm69kbBo7*w`gOM`?z$+HcP$OHb!DL0 zb?MFlZA*+6rj3o7gf1~Q>YJ4>4}7xL>BrrJMNzbiL`&Zvz0K&FXn||6e{;`6;sV8wA1NLPB)1z9qxWj zbiekK6u;WcuZJ$Ne(bQRwtvGs#y19a*_kZZv8CsF(af*4E#jgU_y+`5Ozr6wt?hqk zr{TpVD#Z4psOPT&9_Kz?C2>(SW?i6NJ}%3H{lwczmQD=i(*{R~>PP-?dr`;WAkD_P7ri*s%jIvkYLb_aIDN>eZsbL=0`qAb2==uGjTz>yV^|qCCeQo^P2L<`CmT0rkx(|i^v`>kqi(j7T8a20% zh-L`PAFC_<-8uBPH8r+!SurcX(c`0Lq4wjVHlzDSdx&gTY8QWWjrh{9XIzZ7GIMO2X{@C zG{l2tW7hK2*tTyv^>rXRx_-LT>qn#h%{3z#la^CJY=v#cLKhn!> zU4S3qd_UK>2%*1&^R}|WhdkLg8t(diy?mJSP0kN>zK`=moNsjARuJqpIB%H^Z*j*T z8xeTB)4*Fl;q4&L_7q@!3H_fEO>;iL>xoR^3GPl+s(DG&Xh z5+z<9`adNaqE9KoVK~g)`^<@YpY$)FOhM( z|3)^BwfZ&l@0r^AJ`c0`QXnW6=iItrajaPvnpqbtUOq3IR}2aKlF;v$7Pa)8FUPi( zbd84B_;pULA6Y+0)(2H#{T83Ew}tuJ-V);P>q2vT67n;PYkOPBM_(6PD7RyIpsn*g zKePC@r(s{XJT{~zQ&oMs29-*kLvn`Zcf4%+I`4(-)dqIfT@&#_EE#&R|quow1 zZ2z#fXnb==Q|Nb%ycM|}9fc5wwTZDU@Rpa^ePO=M zcKl2un}3!5CSiWgpBwDZ51WtP55>>sVi%Ne>VZB&h zvhvpXmj=CTeb3@J|B@h|zgwV7eV;YZq;s)`6(a9YCY5T`D3E!+YHN< z&z78V={7nwtTW7;1I2fZ)wQzEX-0m4_!_z9*_-RgTaoWyN1h*I6+4Nc)~r2mr>Vxzzt-AyJ)R$;f|&mDHaTinUZvIOw+oTj2{6J2@E+}v}p0Mzo}wA*^#Ccb^D=jrC2 z`Qo=_@lV7}Y8jT=`rPF0M4))eX8*fPd^RuW*J>+IbhZAj6ySZqS)w)b zp}Avo&lz>(Eyz!=BX321S{-?U{M0(~$;emKk+&f~MRNSN{He^7n|qds`z| zv30dpKg;i7Ew7qkFMlYsQt@MU)_$MpU$3YvoB#LOu0bys{`HdNJz6?4+=pxWDSuOy*U06s?H92(N9As>72fu1*lVj(PxjtiM=pE&OHO^;Z|3aT zz7c!%>dD@`UhEy__6`@#**jD`_G}%m>6h#stnwPU{5wc;{5wQ6XK$`}?A5C$dka)v z$DZwvuxC#?IeWIx!CqRYp6tD?j$Hn|RdVdTT{LI!2=UmfS5NkiRe2r%mPn4h<3w}z z7K_JTy|~Ms?c?g$J6iRy$1Qw2dke*5uUJWN)R)>-cB;SnRc{JZI1Lk=QFrUQd9V7<#p^`C^_~vi015F zARc@5;x2pVsl1N8^^#-ne9@e}bH#VH=vvvwX74LE_KE(vS@O$Dy4wEL`m_Gl@LqxU z{jHMop4#?#yr;JN6};{5;O+hizlZeT?Y;?b`#JdjUf=d{@OGc1JnY}bhx;S^WUp`c zMR?mMq2KKFZQllO_aDk_zXosjL-^gizTF4m$2;HX^~X8i;QUzU3(hCb$Ijcn2m5yL zp4s+cx%cv;O@FZWe@fVQQ6Bm~CG5B0L;t7v=MvTT@?n29*!w>vyq`ip^nXg&$H0gF zPw~$s27CXfg#8rdq5o6-{gZzVjK8*jqCE6}iob^%?4L_a@%>Wuy}V@z`l0_*{QZ>e zwiy2r{yAL8BfXcOq5fy@opSHx+a#xaFVW?Ru6)jB(dPO4KYTC0Ewsn`mJp|Pfsc5< zV0k&l?XKV4)2)VT-haI2ea9tIilT<>bF%FH8Gb#c0{BD}{!!8F{U6VDmiO6vzuul7 zd0$}pw@sBfdea-$(oj@6em^4W{nUG;XN=#ZEAKNt+4H2+e>#21>0h0`Alg3B$IW)% z#685RIU66Iuh}18`pa!4X-9=cEe-hD9JLP*&k>nEb}Y}cdhnxE!N;+seeaOhmG|$K z1?0hWY1#W1^T+#>YzzJ9zip)zH`1uBBiOb4&+-F5MoJ^g6WX!yt$ZFaJj@68v-CS#UmWH|<-PpgVO~`HpY3{0VLsSRSD!O44)bNcjVAG3`Lmp6^Jk*RIWc*2`=K6ZTeYx%sK+_A@|N}{kF&8+>wKHV z*tgY+ICu7heDLqh@ZKxr!zX%PP!*4V$>ZKaUOgvy&HQ+-Ha{L&zah|VsnK{?$?~`0e1UwJ zsS-Y)en^yd?I4dW0@>$l#}A3^zLD|E+HAdR;q!Rif8ws8UN3we)x>ZJl%-f3FZ3RieBC=j zIeBD!_W56~+~m`SMRs4{`TTCxn|4%W>$m$Ilc)B)kDqoQuZ*9^k@2HT_>t`kW`zAv zW&CV3EkA>uY=1E$>?>;Z$k)uCMVR=8bt#KW)m|vy+rI}heEZMYv%DUCl>0x!_caT> z-39WRI3H~^LNByCBdm+`H@_ZM^o~C&`hsepZ0mA1uEfFkH;O|=zb&*=8Q0;VAFK~$ z*(ZD9U1f($B(N%dSP6LL?0F3H`TSeju2m4iQ4D-{UVe% zXZM?7X(cXMz7COrN*;Lp=zkdB86Jm)ejlre^P((YW`z48ao)KXd$toIzis4MA3{88 z?2SGo+*i=E^&^WzE{+zVERL19SudE+9zXKGEI+dPqYv^t@cvZBY3}y-=PbTiyzs~7 zN!AW_!u~$%?_BS1s@BLqzpqm-yARknAAe9}w{G%mgcKPEqm0)ZjYBB+xZ!tM^@&f2 zpG7ljX8j2HX0}|2p7k-@jMmo7oF-b$i)*Q^(GcYJb}7@E#^xH;Kmc}^JBK2GoE4JJxi?+r@1xz!EwGHOsu#y`<_Sky_{j;`#gm# zzQ>Ob>rmES_P)UP+sQYBon%=O-zkf%pRrNxhB#cW^4x}f{J-><+Arq6|GOxOZzcUy z-`)o-r<|ji{k`q;bI%cbsKD*&Il|r#RPxbQqt|(kNK`xO&-0SaH-E0s^Mqv}?M;zF z<~RP>`-a@}#AGWs?Wl$FNyZ0%^Uo6#OrGjWKlZMfSBq=r)!y~z75jHvk8M$soyERS z#$g>iPb8xLy=pyA*!u>bum8^TM2NRp!TzBhZ=3blKQz3LS)QsD`rG93Huit;Jdp$g zHRJFfKVR@%Vf((ybHxmgR~VmW_60Uiv*$|YgSA}g&tQ*NHhx2cU)es!*O8cgqw>1) zT#@nFb3J)5Q#JpM=L{Pk?t2eOe_4y|=UXx!ySiUgo;R}fnsrs{hMqf~SG}CRGGSwm zS3XvrL$dm;lTRZb#CmS)CUH@JD*K%I!^GMCSoZXM(P(s)?~hlCvQKb( zx$?5?Pun^At|)o5)%f=4+oJCGfBrnO%AZHFarSsu@+Ny8S>?|KHF}l28SMMPRsLL1 zqldi~`AIz5jMnVm=kF{j&!^e*$U=V}scqNudUSgwUxVG+c3toI_UJa*C2wB;d8C&( zWcic%zy5X~*Xr|#JwLFng#Dv=!~W5qQy3>JXMD@@Bm4Uq>=;kEQJzPxQ#s?mRg~w> zYelnh&HP}#*MA;4MB3zE_B^uC_ra_D{ntt>uFUt!_%8J4kyXBLuhFZF??QhbS>^ll z8a?byPz#mut$iNxd}bdX)>Yr%a=*=uNSG+`s;BJ+Ih)GHZWZ81Hb?s`tFI zuYX@PjN8BSywaip&Yl|>U)zTc^zq*2cKmthwa+bwhjDt%^MmK{z}2$Od|B&sQJ|xj zRGwS5dtQ2j_Fwz_a(I-;;(xdOZPxp!f%nKynjPC+vmRlU=Yc1Cdyh;v?D@Xtz4Mbb z-y{5Y{;tCIKki4Iecx_*N!~=sTBpNGA+HyeY+qN{X2ns=JhOEr%a8y4=dYfhtLd!0 z~p9(ixa-H_T(c1Tn|UQ!6hlO2hNc z>(?vP){CiU&wa07ud%jXBlQxs?CVNgk9Ww|68l{n&lR@YVBa!C*7%%s zY@pdVcC8A0_BpAKzy6N&MU}KBp**{`+wPtGPU3FK(0jq@ADlkt^!HAmar(4V%YW)U z;e5B#UpaluslB&H?-A#J>h#A>f9Uifr{8yK`)lfb+j(2};U94Rey3k|T6X$XQSbi^ z+21`6oGvNz-kv(b`oQ|Ra$92J#!HMu?Y(^eqlM3<|11^S|EuV*miWs~b^njOy_Nsn z_SmN`kUjTDpM#$#p6|1tWAcy>dR{(OJnsQk=Hw4{T_%qAe;Wd=>E}ysx4CPdP@eVI z{l2HxZ}!DSX?Cp%`s+>&zZ1}l-9l!M?_a;98UvSxdR-q1wC^UnU#;vK5qS4!Kz3jE z`i)uMcG+7c?0BB@{B84q@5@^rz}vbD|4muUp3mVeKlt8+9q`}q@;;*QmQU#0yo3Li z$GcmmTUKGs+ViGZ2XAuc_VhibMRohZ>U|%*Ic{zNq@zNFyG0md&RpSEyVv6 z@!7cW+)@<(26VPJiLl)=SENBc9(&u=_^#et2ynX{5hL`ME%iTz`-9h~)Io*3-2u;-Xggp9x^k zBsKM9?{_M%k;|T~=hV0LJ!j9}*R;2W{hZ77_qB^;$MZ_-&?fQF%bk{-p5t_-Xg2;` zuRp$%YsR;^YqDxr=fB^7TeG|v==VYPlYOPf-xb+-!#6o^;|`DOR{oGEe3kLmtD2{`>c|+9!wo6!~L$O?l}5=8#A5q5qzD@S*>nZ}6f2o5Sy+ zz=!^C4)Yg2^nY`h$MB*5o5OyazZVMqzd6h!?l+cao~KPqS`?k=!0bNh`TC{Wb+DIp zt+M`ky$1)jn?3#RhPBg6z2?lm=W% zf!~%Gt*n15hF9);e^80{TYm@lym-p(fPYRPSN^Q{n&*rcv)_x+?}qgByn2E6yNUXL zlz!lwPN6@ih|lgLy~LrqkGdlCZ^gM`|5>AdZ`UMQA+WpVUZm$I0Qnca6LSx$RhL740Xb)<3p;jYX z|0{XDG_vukk+-04<5VMWMQ-EOaY`6h&$9&HJba`4%J!?cgvMA>KnT5am6B-ACb%70-OUR`j&nd#)1od8+TNt`yJr5-)dJ z67}&-kZ*MPc}BPNe8I{~y4vqfZ542PYWD{5&`nNPJN<~$o1EV2wA1OGPPd5Sug&Ay zc-FjsT3H*n{~)e*U&y{+|JwIB;lAhgPnJ(QJl^AK|K9t?P#k6Ni?;M8%xF2pSN;_)Gepg}O8snq(ibr2Z>?n?>%|h)Hali-SW$oPf9Wq574ZG9u^_CAP+JF~Q_bFj@cjbUi;&xT2;JGshdX78(Z0W@hYoRmu+u?KTbzb{%YyJd z1N0}jd^e}#osM%l)@kC@K0l$}Wan+gU)tdDDN4#bEE;Jho}-1b`;yCBkdKlaJG+Xm z)kXXI1!0}Wp7~o-PxfrRsFBOw9x_1vJwj&}yst0fVh7bEd?=QUNJ>|Pd58m<~ z-kwb1!#;3v$Orha4_q9^AHLb`TOPn$zEa=v0lvx0Eid4+JRa)pWqEFSLU}e%277s! zkBh_i4cP{Ud9omkN4x(%3vsqQ^7+_WiI25z{mT5X{Gy%A&xtjDSia@_v^<2*{IL9k z&-}E!Y(L58gMW{O^5ExUAJ^(QSiVpm@_up15BO{xLVi@o$?}NuY#gSzzu7q1{G>b^ z2g`rr6voNt#fqa|M7fsm~)HPTZpNuEYGgJh%ofx#u`MsQ4 zz9YAOLbG{b{es_aJ%QhD-ojgZP#Y)cf4UA>-c!%=9h%K+%WrtgYiPE<{hwaPW-1QZ zeXKGcj}H3|zMrpoq4)c(nOs1hgQB{>i4;!+~@X@e1&BlabFnXy*81Q^?qUa zyp!^fp9{nIP? z=@zG*PH%F$Np#(GANO;`k)JE=`FB(H{j|#OJtkKC%q2#m#&vHBab0>-nBTr{ZkRTr zFjp!ax4n9;XttjwUv0g2esKNg=U1=YmZ(+~XYI6)40hJv{Ax)B+dn_Qx@qa4=mwK} z{&?Q$bBcdT&hPrX?9}24|DyAMb^2$gFF5^!)90N2-l>fP^`CbBNvBUZ?RNSrr;j;( z)Txad^?&O8kDdO|=|fJx@ASJ)zwOk2=z*xaHdYj_T#wwRqC5tt;65mY3Ul0&nrpwfjAl zuh=>453hE zz5jn)0DC5>WA862uaV21&429KdXTgCC+U+XwvQt@mWcPb>wW)C;@{}~f1UYQH!t2` z+^e4pBx;rKdHK8{L0oj zzArwW>Mg8WCquLErL*si-zK|RUh}@|a`EJK$>}+wb@KR2lGmdo+f7g{$m*kK;ODBkC>J`ebtc<-Oy=l?@I z@8j(%D@m{_&yywr}-*@dB3f8t6ZRYdfPYTt@Taa&&oX^Yd5M685 z*(ZFm=XUY_IeN_R7ThF$-F~l@M5%Y1D4#Ff;Ph6}rQ^cy5b-^vPVuahC8y_!(vQzM z{jAfiPCw)H(@yVldZ*J*iDK^~&fnwo%TDihy3MJryR`R3=fB|e^G?6&^uL_m>+~z4 z>|ft6N_#f%vC}1f`|%;a_LG$70OHK|P+s$U-`i3v&c?UURSbM>e&s&@xlkI5is63A zJUd^!>-%hEToOs6$=T;i=vgXcdBi*&Ck6kUqnXd)bziIe4h-K*7;EJrKUyeHjJGSF zznM1rrp3J9C%g~y&+D48zq|A+pL5Rhzw1jS<#W!~@V>0}bIz_%Uj2M?V=td?ZVd0e zYV=z>HimsejXXiVvl_0EPewkxj=T-|Fv&YgHJ@*m!snZP6P5CuIM-<2UyEyvTz)0e z?6@ezd986#3%o_J^10|f;k{ucj+4WCtNAljn>f!4?-Q4&#%f$_-Cv#<|C+xib$ODF zQ|)@pbN@=!%$_^5@6%Q8j~l}K(HeP6#|D32`d)uewJEgU@!s$ruj8`t-gK>5=l$Mg z;r%K5i;^^{ul+&PHCWf&K4U}3qZ)ZD`je$sBbOgj>c}S}pIAqpAfHr6z7KMXBjaIt z>*Lb~Kf&~;>KfiVGLK)g{%7BV^!c29@8k78$8C~5l8mWYnZK#94xhif)qbwnWwomB zWwYn7bsk6dTg|G!&cD|)ED8Jc)-aD%WJmkVK$ir0_Wsu1pVG_k*dmF_jX|yQyhl4W z4%oMvv}eyt|EJpVe*1geo}OAiygb(ZnsMl@yi#8aqV&8QmHyP)c>_I`9mAM zl~?M&K|9tj+38I$>&N!Ar)Sx!RcEbARxUm1v}LQ4lUJR!GC670vZbq6oVh$XbJ^PD zJ*S_wHkoj8zv#7kXRkat=+)?~TD>}1y$XX>OLa7QtKFM?>B%R{r;4SM&N}<_lh2&A zI$5=>W7(=T%aRWIuq;_|X0rN}WyvaZPdxiQdnO0dqm@}j_5H8~_PDiu82&%ZJsbXG zD-Aoxxu=Ko`f1|qK{@;#Moil4XoMyL4Z4IwYK@$`aQ4aczzJt&!&Z;lIa*EY7Z&-7O1B7 zaPU@WxtR-x-Z|XwSpIz5gObJR=U%4O_c6zMetg{ZL`svV0=u6F+Dwsr>p z#c?b6A8uFm6V7a0N8)#r>oNZE5EL{f^T6Yv^h-yqyVxks1l;Dh2tLShC-@k!#-bbi z4ygQk0bF!Uoa1HSF;GZ+7MBD{Y~ypm*jISsi`}2m;L9A7OYzmJ#UW7&--)lv6XMpZ zuXj=|pNi;cKQEtFf**jYZ|8s?!H(jx6Zk2p=ToIW#5H~fdDj1|Z$)J0F+{PdH>b>H z_j58Tnjbqz7n{V!-npgVzk*d(;>_kv8TrfbW1)~jqdPVp#_TFu920fP<%z`VkIu|L z8%Ni-ea~z4&7>-+UcxDVCUA0?uaf{NSiTCEIOVJQRi>q3?w@ic@1LzjuW65{>TsX- z0*%+ea!U^TM+VQ!mqHJ#39A62qxtwrjbGehIeGFsdg###K%q?fLwlt3Fq;e-4 zld+O85<5`k|>)4sy5iE+rUOlN2QX-vs`f z<1OG<9N$gP6${Pn2fuoXNWOdiGiKo>W2;W!Za zLl{OP3Z54T=s8u zy}MlROD^w%Ql;TRXbV*FN}!5W3RRp+(C4g~sNo;hOw@>J%yMwe=ehdGVY#ioq+sRt zn>kCI%Kx#PW#lghjQdgJ5YM}VVI)vzbR0(6uCzgW@~cty=W%j0%BJqmDOmM{OVm^z z(HFJR_;%<=)*_ez3OneE>Qd+6`qqRbBVoX8Dmo)t|A@ zqoL!V9nkU64?uT=eg(>U)BFf@BJ}T2+TKC&m<-(;It97}Iu*Ja%CqkdH$YiCclZo+ z8uVMxJ)uuR_X^tzt?m8jqt?=X$n*}N-+bsy=qb=y(2Y=H)$dj)ZS}hsiqHKNSMsah z3($QDIb)+e;EoA&e<-K&c*kR*%n4~1R;Jq-FW=sf6s(8Hm>g&x6B4}iWEs<@E9I~@_88h3h6jDRDC z+90SQKhU@QHkM?OYJs|^L1}ZRl_Zj6%#Q(Q5)j5UdWKWSfrR=jtazn)nTz?<<+r{ zQ|LPawu%~3>JB6?!h3r#V`eyo>d-tZ2{6Dql<#U+EO{|-A@mr@z-n`mK`yROOa`79 z2chyPg)KO?#k-aXN_VyC>(@t!~iD(~p8jutgh8HZ}T zTFA>x-m)urIcLh5b@CKppU;9iG``lLD2Hsw7dt!-tXp9l;MdW)8MIE{uJ;-13~uSu+L!%?DyUNU@YZ2nQ+V!o*v#)nRI*@lFPhGwhe3audxajyH zXjucpp}uAFk(?Q<96VDpKh6qt@JiQLf0SJ6qx9E8%?9`)$1TvjP32g-WPYr2ntxxB zxA_6e>5GoRo4Lq7HV1#I7oR^zIeDRD@I71{Z|B1|kel!5e7lM(I`n7oW5~01p9Pbr z;n?{9&%yt<&cSmSq2hlq2hT;nn^VVm%ooUO<{;~1We#2dE|LoW8*?x}M{4Jt_G@*MJO9?#LbvBC2 z=}d=pQyFOB-Zf z?Bhcp;r+nq_vr>73jSi)j}BerxQzTg;4ib+EP=KDq2HW>F9O3i7s1zqX|K5i{;1l9A0+%C*Y&Ntg-!(4Lb!) zANv=<%H=1(CGaKS`@m(e?hEJ-fDHSP;{?1Jj1L1+@Mj&D!1sgc&ww&m{e6mCLkfNr zj1L1#;HSa#VPGd%_h0ydWwsEciP{?24OW9ch;0ivT>Cg-@@)9-j6P3-W#m8y1{e7 zzYn*$osR@R3r>;06Z{0Y1YQni?hG!2SAxkUrfIaZ##A@~Uj)X_Au0H3F#ZfFf&Oe>GWdJQ-{<%j8^inb%aQ5GsPI*ssQ||{oJHy~wowab z3H)d1b)5Lps`W~1B6&2TNf;;aYlMO!%=$56tmBV?+ran}b#m&sp0kX64)p!u9yj6$ z@CU#pt?KI6H=&Cp z13%2=c!SYM?w`@-fU~$Y^cSCNS7A`Qc6k1%zt14Z#^q1oPXvu!q^vsG7~K}+Hb%FD zQ}I&ZusV)S&wa+kE`uodqFu&;+f@Pio?IMP`Y8Pas`6dtc>G?E{Jd;_=yejAvaj61;#!1sa6;KgA2)c~<~Cg?OI$lrxP zI0c{RxX7ME`~`|zt0qx9AESq;0`JHQt-p< zU6I@8+CKnaj;$f^!kh5#7{@oEueDS1PO#Rk)4^N8w}O8JE`ya{KV=^a8KpcTK1!=m z-{SH!a0!fG(Jw-iqaJ2Jbd+@=&M^ehE_Xt|CK>vVy3W_dp;Le$8ubiRm}AuQTt3U0 zfd7mR|Ma=csDD8{AOrJXlnFa$1TX;?l=J-?HC&t`+?}E z$lr~8Fn9s@RL94H*ElYMHU9Xi92~t7{2g#7_`M$2ZzI$EP<+%bW1a00M(ZB-F;0%r zqU?QijNa^Z8QjsIr;lP&u;MnFQ{*SQaHf!_9GAd~|gKI0b7hp{{)1 zRXHeJ2A@G&{L?tcQ-hpq6nkB@wX46}H*tZtti#zZEM{w!RR1IOpe|LWy(A-GZ5pN4U8q2`V3 z$TuIKY#*EDkL1Z)Hrl$T>69GKYkpF{ z9Bsv;!71|Hz+=EAF!>yh1((75fyc8)i@@|fo&c8q0tEOrs04n86f1e1qeyV9HE>H~3C)OL*@z?kkQTLjHBf-Qe#yegUlb z+-23HagT$E)sO@^;}A~EJT> z4Dig57US1|XMv?pUd4NZrO(*P;0Wno=eQeuD>`rDc@)uj;-PsuG6i>miOI+k_{U)C zj_d?0R>EcQU#Pneds0N>U*&?IyQF+AXaMrJfJ3?>FK<%pu)_6IZ8qu_T_ z&dgyh3Liz``@kje@u6;lXt$+YXg5j0nmfWp@I~klucQRN+~sBPjbQqdbc5B$gUE42 zyL}q`c5n*b2FA~^W$^uAWaFemf8+UJ_}zYu;6A?EB>V?(fv-1}!2bfbgFE^9(?IaC z;4)bAWdU346wEw`>Cfa6m@$!#;e(McY_>lsns6lYhxv+BGdh&VN4G>%TEM5cJOMKo zbbp?bg0BEC2A9Csq4NQ78GI9%_!Hu2LMJ*)z$pwzjNPdvZtMd~c0bd_}c4@-%;NOAKpYTd#W3fN_6X}D-W&cj_F!1T% zGFU+seh9o5SUTO{IoNqNe4TJ2c^)4MPLaz8V*O^~IPqQJKl0TqVPc~1A|8l|i7UZ> z0!x1__|M=H_(Cvwd0-i=_z+uEqKO~j(i%r8K2F@?cn9!j!Ney`!Hl!M-3d8b927D3J92rev>(x0X33^2epK+^g;6L#651uM@?XcKRa}Hi;Bzz>9PPSDnS-{oeM^D||5NtjYOOy4Hm1ZBfR(2LH3 zoX&~8=&Z@<5Qlnko}#tR$7{-`deONzr}I!RI#1_xSa<5RHI;dswKbKwQdehkPG|4D zjwTWFv`yE5<59vgihR1RE+;oD@)_s|alGb&myeOjT~n)e^V}?dB|Vy8ne|tGGe;K$kb{p2Pd(@V-?Xqd#?7 zUkUsOF==?SSdR}enc7}Qhg_@ZSbsxowQtVk4R6U|!OSK;Og)!{gCpKgR~F=$fBOqV zu|4%FFUOauolPN5glOvLg3i>PF-{@l$Uo`rz7)!_yVf0S?|wN{`X7a2cK6Rh zrS~-`w;roVxyimSp0X#o$geD3wl;-alGKi4bX7&nPNU8%IN)~n;C`=T53LPmhlbQ8 z`nktsM8~NVOsws135B)Jywk*7-0s2R9g`y&j^Bm;0k54q@w?gI-c?6lM855{@`gjP zbNEJ*kG5G;_gDZuN$r5?b9`E8aSz&y8RtTZoHpWCaCI}FDj9|W%r?>hEiFA`r!*i!UA3C2b<=N@-~ zFXsI~1pXrU60mf>iU9w2kPiJZTn7INolC=NyvK83a-v^~{6%z*1((9l!pH9ouXCp{ ze(@Dxbfz&E;wv2|;H$vWpA=beb}WNg_hb6AV>k3EfXFnJVTXDd}Sjq#6}13O8Fe#Gwwm%wL%KLD0Kank$X0TK8zF!357eQZPz$I*Zy zc(dcB;5!_j2>!BTY)^Z@F=ILHA#y+tr;)uw)A;JJ&s^|Xa?6MJfXnDVLc3EqVW<5P zx||c8X}^K4;4FckhMvkP{by0+StjlV|I6{?{P~R9l>7J-6`3s=T2|{xsqYGRU5)zd_|X3U<{qWQH4tR&ji$a6et5h7r2D z{z_q&53kA_h7qf)X@q}KLR%W&29<-k=f;{3DfndYYH$%OzZK^acvUliV=H(AM%-un zJoMsFwC4wq^G}~e?RgtibvgE=PQ}hW0jv6+-@wco&Jy^$1VeQ>_xw4OW6$4nL2=K2 zKPkvH*0fK*&)pkZ_lCsiD86z-=>)G$`ppdbFWB;Z)F z&2+|9bs89tOuu0_e3*_umBZU@mB2B1(DoLO*EQgSy^k6P*NHa?yFZL;MSgt_v&LsS z@rQEq8-|!q1YWoP3YHgo{st@j_wf%orN4jZSNsX^T<_x|DhEfR_;%qi zuJIP(>~J3$m+1@faUk+CxQN|5g3k11;7@^5Q zfm1MJA^k!bEPMDwE7A1NIZnY}N9WVXOHkHl(YPZ{ccExdsIMOtY<#CT5s7nHg>W+>?k0#JEjZT&B-xi2A3tADX04xe@3{UE>Yn*bbMat-?_z# zlg6!y_C7BR{i4(NINj{@VW--6aVW2592RmR{sMS@!1}Ij#fL8;FXA&fnf=XYyiMiE zHSTuAqLtzHd;Y zAKwE`#Y=(f^L7pR4^0^-YN0^@ZPAJFB9w1(GPAz9AVp{vBEUcFe5o?@a0ctQ)-UgVOT4h*UT1HzZfDx&GO%hV{27 z75OVw+^|Cqj}gX0RVSZk-3$$;wtELM>yxxg>O=`T8ZW!ggnOCIlN32)XSfKKubMw4 zu+}KyPVg`2uhyzEr#BQIS;gBuSNurwovh25Pra^#Kd#z{e?mJ?;;+WNQ*zp5EzmLR zFW^T2CGbDMKL?jNIb!(+olQSuy z-B1D_fZTIu_S=z@Xm}OPUJ8B!EPdt6Z@?w+TJV$La>T)y$Qi~&N9^m8S@5x%eFX(R z4zu6qIE8w!3e1PuAF?Ef;zuQ?b=gET`xdCzt&G3qcc7wsEywK7gMS%twswi1{ncPI z)W@d$Nsu#_?DtS0vzZT8UWR7={I!bX$A$ap5`J?f)A^T4g8WtNtj+n1U-oPtO2hR} zRsHy5IlNhzfcv4(Auafr%lwIdFC2VmI4mcBE+>BzK6HB@A@VvS2QLe4$ zk*S>;xxPnMl`Cj?I8Rr{`{nTDDlYu9isQctyU)AkIM- z1>a^9YkiKdV{crDwML}k+@B+abNwQp%zqPA<&}OV()YS=&EfrXct{T8*4}+XE)~q2 zf)hc@;l zw+zGXCzl6v;>v#1F`LB4a!vLVci^^yMWf34$vpd1CQiK&ncnn2LSUJx-b@;~9^C@6Lu)<><$ zcA)G!*2fP8sr6B~B>s(c4j8;KydPnF4;Voe`Ok4c!bO;IJYWLU<8}aXHk=}#4jxDw zMeyE^PX;Sr2gdfheg_=rcm?u<9G?n4%<*a9w>oAmIAFfxGr&hXJ`=pe@mb(^g5}%F zT+Wn`pXl;Ue0M?LYvrHb3HH{nw`e`}e!YcxUhtee&=QX_^7O#$x91q&-t5?C5Ar9; z`NO!UdR~_g)J^Y$%05T>eJ;Y1bN{rQC(h|p;9xj$%}%UazNz2xjbYj@57iIJm0R`lld?IJxu{%t7F2aO=4h;i z>C2pX8-pF`Xzc6{AHnZ-OuKVVag41w?ZTt2$vNjzSL*gsp+$w7$`uVu!b>`kfp~u#~59$7g z;8SL8(>cPR@tFM>B;lu!#**~N3S*Infs6{f@zwF2a9TO_pyZTH2y~uuZV@e)17DbC zy{x=f=u@Ju`h!j%bmol@Sf)%=`nxCp15_3WVkpZml3 zdt0hqpwBwyk=KR200i6rg{1-k7`Dd|-s|wA$E~q~*k4}WKqqe>wOlNiif7|7(BlEOA z?TqvMvqIF>XUi|fe%?ehEA8a@>jjX2)gl2OM{E%X%tcxpp}B7S)Xt?(ye2PSN?H<05UXp^e$-l#ss| z}N$> zT<1vfB$?;Y5%^V@iM`>TN7^r_Tp}*-~@aJ z_#|)&{xY~7Tmpa1aVJ=FMDjBDr;fY9j|V&r((yaT3HVQrQ}D}Zy(s@C?T(Sa~g-B3QW~Tmm2IxD#A-Tn4Xo+zl>)iHU9ztKaJw*=ntU z*lCmeR>ukWPRA*Do8uzbW71XvdraCo!M`Bp7h zW$a@Fkwt6#)z zP8qDk!OmRBx`P}i;J1QXk*DByI4*)u1!HG!39KN|Rye(WCzPrEf5%4B(3Hfip*M;xsuX{e|ADkk8$#F4?&W*tjqf-Kt!v$h` za2c%mi(FIm-0{d+Uk*vZdx38Pm%tjk4}!~Jjolgc6Ry#@{JiZKm61=m0Wh6RR zeH2cRzw9{We-zZ*l)MP0Uj<@4p#+`;#-|BoFpEur^<^Te>r*c5B_=}GABp@Na0z?@ z_*`%qOkbnHeIiUm>(>j%(WE|L#=&qCc(dca;4cdEz4~VG1CCq3KXTj(?slAj|KxZw zKh+k42U52UjE%y1;1sOBoNr%Sh|ZgU4sn}YLcWjVPVf=v5bMch@Dj#i3_H?iFt-(* zOF22tTZ#^O*_wh+L2xCw1nyw(au~Q1OuywrEA`I11pRk{%V6f2K6e`dIgcFCbM=UX z_caADzUp^)&b!$$ex3J8$0=AoNM|1S%ffLqVgdMTj*kL=%ke_+4;>#3e$??X;3piD zE9d>e@nZ1b94`UC>iF0wI)8{`a`F7}j_LpT(}nS;2;SE*@i_lbF#Q=(0?&8c3C5OU zHKGhIqVlfrapd{bDI5<@k&`C{)-S@&p`Z|d*~l>ctzi0i{soPJy$O6Z@ZBm0Uk@%a z9%b;2=z*y@Rt1-Uj^v_8{4N zP;dqIf{_SN?b-tT=RZWr8w z&dHQ-x||n0fP6WaY~&{4;A=5#Lj z74jxz#hQLGPWHQ6CED6MV41(*DK7X~K%Vun&MRxm!_DjQ5nqoBCIZ6 zcry4*a0+A2kgolZpGmvw?_}`#UKjR4^+|Tp-1iVm-md!6iOvV1!e#b_j{(mLZ3Q9r zr&5-+g)bG%T9@HMhcJ$CG)&P|2DzTCtIJsrEAq8DIcsA@zD}446RL};{>{B&mf2k| z?0UZ5RQ~4P^oYj}s8Dv{W~k@Rg?Bh+j4s^jm~p!B%Z{br1!l}OR@rZDL@Dz7TwVf` z<2L4fCe$z};R?R_6>W2wI|XOS~%&?Ouv zL8M1_OQW!_{KC)D1gOe5O1YdW%|zgKO8Yx5p>qUSIoJt4Rv1UOfKL%7yY2#O9ECp( z&HEsG@&UI>=K)YLA1X*hy zb~sA+aFLw^e81xq{C&qo@FU>Uaj3+<-#Fd?e%>+dl>XuvUrYZIZm^-+ps`Zj%lMkL z;(t00T@Kdx3tt5u2Ubir@qDQ~`axg&9l#BH3Ul6s&OXr7IXl5~9Fq?l-ii(45vAax z>DHxSjt$3hVa%~=zYA2L{WxWUw^USa4J)VqpBL^d$B;UgxFHYgY5s13BR z-SAa!mws;e9%Vl6l{`$OPrUso5L4L?HtWb$CIuBSk1DwhYoAeiK5Y0If|EE?&Yb=R z*09Wn*VR`)#mY77B zs6k`zwi}Mi;T@~EU@>qzALSNtDUNa!J_f#)La}YQ^c2+UA^#9~6laQDYx4lk67o+# z59cf+FG2@}u3UU47#;1UF8;jZ1pHrM?8GTl{nj|(%EjM;d>B(3;jwoyV_hIeE`GSb z6r;k&bw%HNXBx>D^fG;WuS?rmz02*7JE?cG>V-HJZWZ1^7aPMX-=$8Va|;*CLD#!o zQrBZHT)E8kZVP%>uCC!Ptl_Vy;Xk%LU-9SDxptU$SANm;?h5T>dbW1ub9^i1iUXm! z@~d9wv!TwF-w1rDgDh9izSpICoL4^V^}Z155d|qng>AZez8!?#qptTQ#QAxuwKLN@ z5WU~Hp6+#dJ&kv!HwL|DUGLr+y>d>Eee9KgalNh@z58-{q{Ef3xZc-m^d6|{6~4(* z+d!w)#fxxHc^%R!x{939_bRpCT{XgOeFIs(58sB*`jDVE*7d$sqesqWdWG+!H-+`C z4G4H)X+^Q^Rv-frmq()E7vI(p;y z`PDwI_siGOW3FGVA1C1`{Q7nDTG2bq^&Wp6z2WF(bN@H5qc?kt9R6Ak-=D(|=J0oNn6<|a+5BM+|0IXGH`{^E)z{)1DVOm^ zTevbFc1-5FyW<2p)3M}la-2X9ag6-x8?nhh9feN>s$3{+ajd?7GKcTT;ZNo8ojLp& z$K-$EbB>vhg-5}Rq4KQo7&PU?_rkBBCv%c7g>LAjoa98|3Fu}{>J*-YYE5En3r|Be zFEoB1#d&tKwNFRq-`Hui|qTRq-`X=kTjld~I_T5B+)-UpuUd8_%xdYscj9#2lWT z!w2W^!YZDY^YhwwR^`_&&*5CGuDvKHzdDCMT*VXbtKwOwRq?gA z_v;#}I@j4BpoXR6x}++f@O%|tH@%7{e!q&Z%ftugl?Ftgg$& z=em#ObneRG+?saXLpk|#Is8%;U!R+I*SA*WE6!XUop8dOg$w5#cf!$!zvGY-jyZ0@ zAt#&=tzNfsS#-wIb52A6w)9eOF$W{`>Q5-t)>>v}*9%j}%!R9y<`ouenO`KWojx&om z*mW19n3>~bFejdbJ8{lncAPofS!Z{@@7Aq;^)4C|otd-mkm&mA-m1EF>;G20 zdQ&sCO1bVvE46ZIrpT{&*B$>BE2H7JIOEDQH+YkS2a|ISX; zD;KBAquKg6W1Q`#e|O?t8Xu{-*#rCOjlT_cDnPO`Sw==nnl}Z=nE$?d-CcNV23n}k zj6k-@>8a`BOl7iMoTHDU7zJ3C}j@i9l5ERMtLhs-CdlRzA!RT#3$aTX38}v zvoe9-B9ayU@>pfAI8mN*m1+^6pdSfz5eXcqCIytby2!)dee!p|W1SjeU8mz1>()qVhBbGlR6QY@W_6Xi4_}CQs_*1L&)}fz z@jqvdo-ZChHPCZx=v04S$c{##nfC}mj~+YLQy6jsJs$$QW5^SyP7n1A6pu>iL8hr? z(r_WziBo;W6Q_@!bc1iGV_0?#1S+2CS+7)T`^@p@QSK-D&-Se^W2K+%>p72-9%@p~%5mc8sncf%de$Ya zlD&$`eYzhF2o)+;GDEXv4K}6xP|raB>!vVE=();ppql1v}Mu?&#@L$BTnKA3odDcWi@LdIp=+e#YMj{zhL@ z*^iy>A8a1y2~?u~LQnHx)Cct3G)gwOd(qkXob4Oz=|cywp-NBm7m?qyJ;lNRYDCj? z`oYt*p`rHnpB?J!AHd(9^;&(kaH4qZB#4Eg_UpE)oGP3S0w}C!1#=X~WQxwG(HX^hrMO@t&j28puZSei*_YYp#o9g|o$tTEzL|>pQ8T zW2iEIw6j_{+t+)v?>I7rGHKc~um^A2oq}+35WR5E`c}zA&K~P8yiq)H6kXLiWghQ2 zS8Ub;(jP@|e0|?VwH61EiFI-Hp&hR8mk@h#2$k&EI{u-f=rLr_Zb}+DzX9>ou`?TW z=-QZ@D^R-oqsNaA^b8F253H}aLf^1jhq3O}Q|ohn9CiJ~0B04ZQ>3R1vS*4zr_S`C z!8B)Kn?7-L{XisL@~OU|q4mvxjR;lmc(LdFP_q&gdY{zXrlLjkUd#kepE`4Dy>Jl| zlRDOae0^VGc=`*Ry*1M=ZtTB~ar=!8MuMWO3SUhud@HHoEkC7Ut?a2&E zrbpFjEI*#ZTKL)n{t6rJ3x8^Rw(OrnWQt375rF z8OwH!P8aJVlhug|Ru-?klFbELv3Ow?bM(qoak4yFFVA#64QX~~bGx(k%E!yoV_kqd zcfRa$FYS}KAQzWr7s{NtjVgCw^-|R@AFaK)*y5|%Vr z3UD1DYXYnj<#&9vCcl?w%d=&y8mmrpLx0r}M~7O@rNy33AnP`BHh* zjYG2HSZxH$r!nMeWCAl(SFLhE^HyoX&+xBcUOR_1Q601Tx|sm$C3P0U~sIElqHX4I47EFwpj{PNV* zWp&BF$)b{ri4heD;Gz{xJoNlp-z+=ayK?E!NfFFC7?8oie8?wQ)5MGpiYG=j$MVNV5D}b;W2R_@e}RC!U6@2)utyK zSw<|9<+9hnXt`8-t4gjqZ7>&W(_WsbX=(}W0xI&R5vlgYQgs&fvNBz(Kq3}xr84Gx zmdhm(#;XOAs^P_#v}y}6UY)L8l6qLKsV5t%a;*zy0z|B$yJGQbLxrfTSmlS}W1GG* zF)~$z+}X)*wWTEE0 z^-JtC4+=+lYIc&I@q#z&*S+BOour2^3qSj7B_GM!;hBr%#=1B3;#oYi-C1aKri>_a zJ|6lquKFeNQ9vjW8Atm-OG9la*5-7l}}eOmtPJCn~UV!No49G*TKD$)^!&}fPLq%~koAO}j;5lK7bcD#b2j47xbq5DnARL0B*Je-`M=VZ4J zM9yLf?O+DO4{!-0vlcb#A*vkx@dE*UDkMiWjTtsxvtoNf25RMyWenS6=t(Zj*56_U znqZ_20N3b6bWIef46e`!E})$htK(8hh2V`^o(WQm^3XsUB`hN|qnWAG2n)x@OX`@Z zdz&u&>f^IBqtjQVabiwWx)j7^JjGEA!cY<>MVx>zt~v3qm1)wXCZ;h&C;qcjC=-zc zzvvi22{na9>a)dS^AA3v|j0BPz^>WT8>Wm5kJ)D(^}Ei{cPKz1?v4jzpjwNxCPo|-95SKq?WcCo@j z#}G5;d!)w1SHx`UnTXVghjUm4U}Dt~PUjLQ{S&bgnK-awM}%Z;5+&}a(9InWc(VjF zwuVz;BN7gbaPlk7LT9Qz93}AuGM-FhF$iOtT%E=w4joOA3Z8M)US;Z#2rQ0S?2QP* zHja02pU1l(upg5T9YjzH6WS0K5tuHQG1n9CnX4GEyo|olV7@jxF;P#+lz1?v$`nRO znY&_gK@aNcs%jST5l&&gFQm|OmWo$V_b?^H+*D@cD9Wqs<2|c1H|aenuJj^k=4hN( zFS|MxIP@V!olbgEN27b<1t0-OYNhdpHv);L42piq#JL%~jSs|?b(vpNT;|d%_)uZ4 zmKyyi7KXjcwEMfcvD#!9Tj#1QE^LQ!;&9=u%IKU-p3pW>oQ~a5&0Q={q2KZ|32xP) zPL?Myw?#)jfsPto41cbUOk6^siOW7RG_lGQ`l1=1+!R)I+G0bWBw}2_#6S|~baxJW zD^lE!UBTSww5wfJO^T8l2yM463h@&9Fx=XF3oE-Sws){qH9O^Zek2*0Q(keCUYLgX z+J2-hk4o*Azi6yjnaReb|LZ+CvQ*}v=^{3r8tyvW*eJy&rq>WVV4n=5#WDwFH*(O8 z9AYCt1LhE1#+0ZsdSE|DBHl!0tSnuBX$-wGM!zEWCy_05q?q1gb`@cvqf17IA9JL9 zga_$kRV?b@NH9G$TKADPBS;92D@{W*BN)kNYmV_U8(d_2hw%7m9Oiiw7~Z9jxG3Q0 z=X0-gKieFio*316rZ{~SY2ge^}hk|%SF2rE;%!$5`Ma)JX$VcU>buDruFC${f;rlMm2tLltl7eS;@4| zM_nAllxi0B40Y+^7%Tzq5~>z{Fd94W;^ctC|MaX*0K6ME7cRS5mn+k3_=u3%fUsDW zp%nSY_p6mLtdB?Pj(rNpB>rO5MdQ9Of}&qYLe{L5o~FC2a$N6UKyuipq%1^}x&dI&4BS z71WM~CkCS8B&6Y7R(^paK}F>(m5bP%<{Vo}O>|qwVioonsu|9a8jf+)iYQT8!ZtiO z+f*-{;g~@~DN-*J2PRoEbb2F)XbL8ae;&iL%SiwLtW&LL|;V&7*BmPG?>Y- zSgBohvn4l)BPd`bvF@bbs24A>VW2CXgbinw<`LAb$ywM!5rC@?`qOMxoq^$_dPshy zI4e_QJlQcKJ~NdnxWNK5Cb}zOE|<&Qx&8Z|KPXTNnf{Zd86R5^#bE*NFm>WO+zW-V z6osGmVx5IJ7g@BZ(X;h&=_=W~Nt+p!<%`r{lNJ0VcBt6Cb4IyBpXX8&1Cf zKwYa`#<=QDP5P^SE9_9?Fa^{b^jGfDCCxnd&(%6PcCJUDcD^ zC0>_lLURF2$pYv(5-CFF|HACVrOR%vT1P%m&Agy$5^G4NSYq^3Sr{iu{t^36?ZU67yb|9p)}G zhj8%x7mh%|Ex+S$q4K*r#vLrNoi8esH`m)z*fKSeGI3YY6_uyFC1PfrOH4Fx4mq%O zqT^@3zDPRZCSrsx9z!p41zwK&5|&NMPtRy^ zkAT`CInpB-6qj&C00I1QH3{pTRI(glE3!R$C-~H6AdKn;x#63*!Arwm6=fjI0e~Yo zCFTI6Czb92W`6dn;>KPF3QhK0u%VLx#Ew4wA`THYl3wQpk7;ZSq&Q*^&`UT_7)KCc zP+|MP{3NEQe8^E5n@%%C9-w1LfI%h2k@kW%MlSfBl}0-2{ooVE{C1~ZJsY!QLmt&>;>ra#Fz{45BwBMxE;o93YI03@hKg@i% z77qPEk<)n`02}BziNh_Y`c4g z!6o?k*)wO}a05eqr?D&E_ZkjtB&jv=W$apLI@0(N=FDjvC%tgMCV*o{``m=@ zmblf3$t0Hk{Hbz`RJZTF+@2~gyK2}1nDkQ) z@2S%d7_aA~A4c4q__->$FmAv!Hd(K@(a90%=J_iN?$Y!)I$&&}Hhhqd0KX8XH}^MOVKIhQ&;=GQS}SN-h?oSeSGkxo{GwD2};@Ela&0fGpC zrh&CGmYpa83>I%ql>kcEHk*{)X6he?7AJGtOWx={>Mo&HY6uny))&~=4sl>TA+rT# zKnpI;`XirMbz2-+t$_; zte)f3v#^4wO!e6dC{f?0u*NCs;cv{#Cdz0ISgl`%0RwVQqTFaMKwE79CuQ$wPhj@Z z?tK2W9n!DSN(g9tPz#lZ_{({W)EKGKD2G!xj6BeDrvDtz&-tUfXA6b?0UqgGM?Ty4 zT3`R`eSvV|)cGb{(pOh5aJx&Uyj1ZO%q-Z3;DjBz0okkL{62iVYnH20$W0_MG-w{t zU#91rRgbajkOL^3fa2yd>;t+5AhqwAbYb9YV*t{- z&A}Le^A~eCcLPfgf4vn14(Z2gusY-1Ocf_Xu#~}Z5ZIXfZt7?_o-)adA<26AGL!)O zgHeWh;4nFLSRI)e$4^DN&@pu40Fv|=Dl}dd0Q3?--831{Vxax2IN{-Y74Wk?jiFn| z9}Fj)OTyLIw;C&!8M*2M1g{t*Cyf#TOS68sE>@;X7c^OsZxV8hdUNBLR*?zxzHu2V z9^;Zxj|X7i&yStlSuFDpZ86vmDV#bk@6zD6;T}2}fkF%hBLa_;Nj7KMI^9LaDG}3! zWe4&vc&siYn#ao*Dq`fRLdO^rD>G7$un~+6$r^0JJTO939vj2(fN@yR1zJGpJu-Qr z!p{C<7_YG@S;NvFCVP}BUU>)@E3*hVUwR30L#_UAqx%391Gn}jlnW~(NI=^@D-sG_J75e-FePf=%R^eA_~s!khI4YX z1@_+P=&r!xgvJ{R3~+Lio(w?43aT&dzAzmzu`3wqvAc;WM}Xwz0+&H9qC(Ii^_m`j z@vVLBRfx=PVuUr!XO%quPy*`2$OV-jzEscH2xhrsqs0;oCGhgOf`daz^&&-s2E0QO zmG@DK5K*gbaDc0Jfh`j~KWE(N@zFXNt4{K`D94JN+o!aqpr~o@LIM|Mx<$5VA2s(< zIJv?TBz)GL`s1XaaYS&o8)yBxaQFLFA z$gzv(U=XE`*`Gp$E@73C(}(*h)Pd(Ej>_oX@B)}WZ6jw4d9ul;vHnbBWi&q0SQ+Js zMoNQi&aB@C;HjD2S?Pq512jB5lS&Ls*|*=$8ozx!UV}56_y>>*Ci)-W5;H}@d0Khczmu2g*sI@oiB2WpV~?kpnN>tooR?HTGV9zEUD zr|N7PkT3Z~0ezaOpiq=#gUKLvX_I`95CuW4P0dV?*SmbOY!lO(nlBw2}sF~Vg zpi1>1cK;&L2;Vg{3+bMrC08zto0ahim^@+QLq|iCIuD1)v8I`s2^?C+Ngqt^FyF>H z*jvtXFTIFn=xfQlWPw!{Ju^-a`>a5<(8gXl1`%b;doW`@Zhy*n!?=A0H;7v{XXCiV z!^fRefL&6DeD#7r0VUnj{nwByhwi_Q@I(o?{kx_U7qE*p0*fbx2`Zk)he;gA3!HxG zntSOGHGRL@W`*&p+{+^rvwOqIiQQSeBEVM-eJl0R;^^YL6=G78_KFto9WBd?kI}7W z%d572wE|;&wks$+oLyYsXt2i_w;DiShQ2#JH3*C-t&=D2t(l)JE(-6$sjnBeboCEQ|+Q;u%7c9B$9#W7-9fUqFMY-P%M^BT#r z3q+yifIzBOLuz|{Qra=dae{R$6hj%fpp#*$>uChmAxmkYpQgDPaY$TV`J}LGQHltX zP!c_a!a?ZZ^laQm#~4(pg5u8rV#^xF8OVY&FaDiT%o@?-VW<`PR3=+cb!>t)+9*L( zRD|D6Sy89!tB+41mrexIXzcFF4BiZoJn$%T5e*P)T}HKD_YAT(R=(5ZvJ3Q|o?D4N$t+VqUVYRX%oAUhK9tN~JdASYJvNxcB(;rG zn{tivQvK^oG9;5^Pg6pOkc4;mC7AeOMafNPN?62UR1~pybLBgW-DQ|);6jnBi)YHb zAC8$7%Xl3EKd9Ryqp6yMU<>TDzZEjcuLv3*TxZ=wq z@Sat;JV81_@Ui*Rm@|RRa;$e@1fJ}Ay1}JaEMT}E)mqP_8)P5Jj#sjMcw;dLgR3F3 zSmKMJo5z|o5&wn{&L(4#-4!AmP00)!Chsa}BKK4ooi=)I#PtcOOw94#uokQD)on$P z_wB~a%#XQ-NA?iGAuAKiVs=YJSlp?A)WU2PjSYKl8)!jBi>HGyyR&=`_h{c6AvaJB z_hDha_lC-P_UWg&ddxyYjW^gRmG5)$j@+fLCuEB&>obt$Ze&&tkz;VmA|Z@T$n{gB zZ%#e2n^9>}j3iwOSXZDm$2Wawwwmd^S@8ABd=}dDWpQtQuUK#g&E8KS8hJ7MXNF;_ zgLMJo`t|d75jw1Y<@dV?C#!Z=1+LzcaPrF8cU2)cdq|gquM|D!u~Qb66;dYVo2=K^c{te> z0{VC`FVI&eVTR?)Ly3n!t4qHNp&#&_6yM%t??5{Qh%wyg9Z!@1?Jd}GD)E&z91rB2 zO{6SUVPrt6NabamtAq6lr#$eo*u^5uPy1no+=r`UP^~VYDqZp>Jz){!Uf{3dJv*?& zG%*y zVZ2-Ez{&s<2LB%5g4vf{c!>=IJ_d5sFWDBryg@E5f<-QzrsQ-)On%%lLFr(<%p={{ zAEc|{d8pxeD0s@T63;45iHXM>vG~BrKdvl!Vy7OC?r?pC13i3Q)K8ekujpfaR#ME^ukZ!Yb4cMcRb5M&f1x zY|R(@UxR6?Sx~nZ8^V4IvQo<8kUTa*@iVhgLcTFINhm)Z-M91n&E>-~)T(OE*LS&)f zQ z>*30GY3Ba+6li^>G#9#xR9rZ*7Ih+wt7F7OsbDRV_D?gZk$qGf(j{hvZ2wS zlxy12OMALBgY~EG40yW;Isrr?Zp==Q3D1xio>B}VUc)LFm=o(jFhIpB)cF#oQDQm4 zFI&Fx-6<(g;MM+7U9++5Df12nfM{KzrzUO~s&@hH%pPg~TiIRDi4>G1HPmv8CP}}s zQKObsZo8yR?F`T79&t2`?AnSpjDzun(P^D=A;^T1WO-r^e_3)C+YlH*cvv*+*5*wj zL3A^SGrcBfB--4(jWN3mzpSjr_a2Rq0pdnToHRn9cWH!4-2(5PBWM6R%u_h3iEVXH zXrC#$7&9;H!;Z+A!IPn9@twNA|Felnrz=h085H;~CLT=KBDvD5+C4}7z+BJX+bD1k zQvfq+BgIq$^Y66L!k9{IkS11+^yrhcBdXU1ZcyWL6;#7S^1`g63$%gNNrvxDUT?cNVKdIC_r`_MYJ-Xs7#MAC&v$cw6Ud zJ#Qr9GBjgO+h;;sBQ-p1NH{?gneFajiwS}FD>goVbPRR^){CZX(%eRi4h@-z+9ac* zq&rM}8_Bw^*KC>ql2OHNq;s8d!2-i&CuV5tM=vhQP8H-*xFY5fu|%ofDA-P+f=@W+ zCp>$7*O9Drym_`kzK@1$LYNk4si?Ppm0_FR#*9td+Q#e~Hn1ef->|7{Xo3#v;_v#} z3L_ULa82xnaj;NAdqWG~gC>`5EoAqDjMpH}{xC5f!^2HAP^;`2bv^A@6*4zUrfpIk zd^qgD<(gG*rJ5STRPV2yk~7XdPV`NTPCYY&wVOzWjcr)z(Ot;QR>j_%Jx!wn#Fo)?9Lr78@*P=SqRKCh#-k8_WuD;aFlWh$B;bL8`^b z!8i=WvB{F-vZ)EFFkE|q2MyofNWcwLf1(8^Ql8E3@@wYZ*Q7>96N#Dhksv;+T*dX~~D;`Gg@H zFSyT4G}tu}V%dRdmP*h823H&pfA-n+eJeQ<4KcD~JloN18XqFtAV@3^k3-x`9!9aT zOh1w>PhitI5i3=26s>7Jx|hrw#0BepsuIW)TI#JgJ%XMpT2$9&*<$Qk#9zojC8%B|NT@NZLLW(_g!T) zg`LR7hAQ1C_K-!tRBzVLgxH%Yfz`43Fno^{eq8EFCy2d=6Jm(JSL0&PCP7d~WFj4l zNVXk#SMuX!xg$pNPa8BRQGpY$Y9j~ObFW~=pL-cUhh@tlTY2``z!MU!R6kPr;ihdr zsG+ehCb`!$0UNAKAsUWX$}xyYw$SyVCLlVadZs#M&?!c0KQ! zj1D!8GX^Le&i5MM1hUB{g_F{SWGh5s!%)=p$SbkP`~U`RZecj`gy%k{&aBlNPMt$M zm~o@fb?S;wB^(_(*1O);r;}rO&F?>pHO@w8vq$lonh$Ki_4WhmO(*7P68?is?Aj8) zcb}}IUdr;#r&bD^)N0^k;zkaqcC*Frp zPXAty;navIkfEqBO(>8!XvT!m#F~V`s~KJl|0^z$@hCMUhxA(5c|B`nVwUSAH?3i7a=8nXu9Ar)(;Nd>rvHk(Rh=25W<5UaZn$+bi zcg8p}nDr_XC(xh2|uTlA`+IZR1YGXHhlyG0LytV3CORS3OR>ybP`NjZ0_$b>s z++pFp`nV7PXF74fO1@Ph2RsSF#l*N$x`KdwUrnwq#yusCi-;MHR|m>LIf;eH$1W#T zy^}{c`Q1hQ7DweqIhg2Yr$!{A`s9Q}tcOP@^d@_MRCpR^iy4Xsk?{UTxKc{|f`GVG z&fiPQOZMebi10lUilCRkP~@u<_>v=kd50)&z|@!OCUFmezujLivGSKLGx~a+7ZdPZ zJP3hf$XCE?#Bj)vh@(>$H#5Qa_N6fKvI)*M<7Dx8jUV}xU9t$^t$I(8t95w&ei7G$ z_;)rDY7A$kaRnwX)Wi{G+{4rG;9dFtOA44ih*t4FNBLsHP{IW_$od$+M1y+>MlRyw zjG1vG?q$3>JeN)&g>;9{wFwX@-i_Q12{{46VM;i6}~)m-oFN2%gsMQMN74Zv4Ds<>%Xdl4os!m;MU zk8(ow(3{?@Y1%p4K0geKP%>RWvQV%zFumy?RSABG2glutQV3d$`E?+9=o=dKIMgR!Hn`WH*I?H8F#&Oh zP9b!@zKg)?^3Y(Qv7WI>B$Z2gz62tpS8$UplvTzn6&!Wseq~%KQfyp6L*LRkzLvz3 z_pt5j=Z>I8sz|?8!R`&}VV#%mm#s-ug;FcwHdm181r^YZ`=!8%%Zg@ZeRappYy7g4 z-0@FO|7HemRIQ?ky80xp|A$04xsC`>``8`8(~UshaMkkU=<~SU#)oY5B{*-EbYnH% zgN}{}qYrwb#=z#!oa$&-!bFP{yaNfBu2tkhJ?|qQ5fCwjfpEXS&s8wh%L*tUs#U(y zi7#qBt3~9)-qHua?WF8gVm|aF9#^TC&`(o1oK@!4FTxB(G~Bc33{yaQ8ezj1#`qN@ z--j{|C;~TNU=;G?aEDDm@r`W34+}Qd5q3YpiwkyMnZ%x%u_hVUh{G3Xgvtw|FvNOR zmL%Ev8E>JYf?dGHzRK4xuv!5CTcZ}lL7hC|+fq7=OK4yOw)>H9lO z#NSF7ng_nGHHOy>Q&e1QdtZLJADK5|k~ncoFRc@SA{WH+5-#7&gcqr^CfC*fpB=M<2W8~H{@ zXx5i!E512htn!UHwrAW}4z*99N=oVbQInTZv$n9JveT*K4ndr-;fNR9C=JXuxV{!n zZ#dutgkFrwkA|GM$s{_=C;a5CjM)vxJ4r5@UX-UfOUEpyj1CjCDM5HGrOx{A7ZNhN z!jznM5Mn-GnPSH)w|O(W@|G1Qy)uJD&Bq&Ca-_^JcrXD(%j6VUr3+YcI#b7_MqL~W z>$qi7yDI;KFAjPk+|@daG5C5R$0>{v{6#C}52W`*iAbG{X?E(;6s~-e%gh-Fmzv?+ zlc-l#BJ#zTartr;KH$Jt)-lqJ;oUX=WfAmIxD1qC6@B^3%i8^yNTeqCC6Gn{UgQuR zQSSi1zdC^H67drE(9waRhEKpOA<4uAxlB)jN!dt!@Wa0rG_L9Mu_hP=3CqYKZy3SE zVcp`Hr@wW|E=%$QiKQTYwsg-CY4q8*b(bOG6MJjw6?yXdw@tj}K z?CQ=$=jOiqlYSe$Tb#Ub?j=Z{b(14$OgeiXXH&gN0TaH)d*}>^P8#;T##`vjfBFq{ z9BA@$s^Gv|w8`r~ZbIHqDde+jm4|I976M+yzsJL6EkE)4YtQ%Lc{Ow zZ2I-Y-8^X}g5hExz9{$!M-8z|!}~J`6&>9N;sN{c4qgZ#1@4a_p^hOYp5OCzWZh0h z*xm^@hT;A*^ul$#-}v3$HO7$s5|eddnFwU{<5rW4#uyasq;b=jF96?wkb{tU**{TJ z-7eVbe3q!5RDksgX_{-l+k}Hn_qzG3X-L19^EV0E*x}M#__Sg9!c|?^5#SW2VG?Wj zgo9f5kR_=T=@{iIsZ@;O6JnIAp~sBnGXs1Xml$SD)vnaw30#IYHl5OP{8lR^mEuFu zN*&g)O$AQWt{Hy!o35J1lN18Bh^e@DkH=*9ki2{RH_NWxv=!%;O*JK{oV#n>C z8We-vy`_Z~2ceFe2Z`=z3IQ6MUVN<|HCnVS<5U!LoCUcbD$0alybt#`$c6;{ZZo2R zOjcqEo4hFXH<3=>n)>dPEJ5xe8Ry=I;-SEMysP%emcG9TV*}%gE-(rO%1R^rJgA0R zOI9}dCI|06c?YR%=??GqYbm0;U+#9|PlMO82wxLkj;w}@wK5*AAJpdo;F zG4U+}y^x41ZSXbUoNbU%Q12Fl%y9ID@_4ahJuByE0avl6R~@<*}a{d3Fvk1uleyaLcBDY_5fs`pTHEC{Gf>-era~97h1Ytn&vh6yzm}& zCs>5|r3+sY>MgRWTQ-ew)qZ?IKS=z>0B_`1r{1*BF9akXzX*jP6~lk-!doE27ccld zB3y;8De1Q^P)xA*!!(k7;)03z&rm4q`ma%NS*dnlL@9PodIdWMX>u=%nD!_bKXuQK zgNZU}Wkl^kHNXWn{_-Ccp57rJ=x$(sz;#i`wtPZLzL>?TCLdN~?*_p_z^hf5RGE-` zLQPWil^1(7-j)JmLZ$d&F%9~^TDv{9@p&@devRPrJuwX_Uop||n}{2i7$LCAHp#Er z(2Zad*i2&1_+2YBR59f?K#X@dOzixSkI*eoy1vWEJ4?akw{kJ>6(b_`oo!!^q=*sbZdwAi?x zDsOE|4$oiwLWVzC{=xHY|NjO2cHePs^LKzdoO|%&_!UO{_BnSlm*AEsoa_B&g!eo5 zp=Tp}82A@}H{(B^mLvGTi2uFL?a1S|;N0Qs_#JZY$S{7-Id|p;e!(+T#qTd7$q%?j z`Yn0j=?K3H{DlY~2mV@wPdN9{)d+vcx#BYsJ`G%nF!JX*HP62cH=YN74*$R4GUo}X z58{92GNW67vj%r5{%e<+X@}U13;Yl5ahbQffdhY_=OQ}!p%?N$_Cc4Ke=l%B@rcX3 zvlDn&@z1)*y8&~?jaV@K_fnJU92Z84zjJnx& z&EpqOxR!4)z8faKrHG&O73H6CE#D+wRs5?^CGlFC-kwj>+ZQ~3=>^yFE%3K%_?Ldp zwX6Ye$|?OXT+6qS|4o`-$k*yfhx~DVH_dzgm$$jrwlkn>egq!{*7O9U?QL2#;gMhJ zU+9egw&Gtx(E@MQ{Ji`f*SZb3L-FC0uC){Lb=>vw9lq$?9LjG?)`vg*W3DwvocGx2 zvqkfB_}5+QL8QB-p!~$BUt7*c81-w*xCvj4=#2lG$FF?MwRWRDZCOzKw_NLC;u|LX zlG6X7Yt1wKvf{70*4{^fR~3KLwH^oF3c0DDSN%58YS`;7YH$0|% zU#5JTzYlJ4txL%N)|*O4{%$fqD;|IFS=ahS((ibz;<~MCO7C~ARpfi?-3X(69snct z@j=p2J`Z#%{-SH0fIJUmJr40b&=v8M-s>^teFySBP*D26bggrc=KQH=!%+2R$EQ=y#jy zOXzTKJFoO{*ZK#PXWYc6`VxN97d)N%{wCAEuHk>nwSEKR*|x<9Q{Eej|J1dvlD?GI z_qOGz{$o7Zw(9Y*R_9jGKWw|B7;)V}f4%K)Iy{O{^mptz*Y@O-pk)>RsB7y&_y?gM zwx?qn{=vKn4;`@nHNtEDkA?8azl2|OnYQh4&ztbjhtPk=Wjav59)vyw|GCR-dmr4( z9v^Rana3f+LtSb5Ln>c`-mUyQT_(E&{3>sQ4teAFAJX(1^u;v&;jG6_pNB*IeJ*nl zjql-MP4DMjrW^WtcrL=*fi-`}|7VvuO!`f)ubwv7wtF}5vf^W|E%z*@R-KCLuI=DX z;PVmxF6U6hqVFE%f25G+ecn9%8h2*BxQZFI_&u`+-rOjqs!+ z-wlko+;D^$|G3Amz2Y*vp8~Ebo^Y8i)F(IZF+?SaD>x!40`#kfx?D3)B zcaHN-6tSd__}wz%&p^MF=d}svzKr-Y&@XX#GFhd61LHsB%V>UI3pnrTtUt#QIoA&i ze;V39f)VbU=zq9~Y0%NwGMc~B;9ENcJn!jFpN!^D=;Xhy;r|x;2gsr1)T6sUhK zU5ZIZ<7~+({r5kJ>El71QW<$HjU zUt*MZ%jZ$vtq3FZA4UB@`Lv$*_%+DM@w#=+W7fY_;+x9 z*Z(IN4^e(?nm?fS-3TAR`fGcHu|90iMi_Z( z?^gUH&fP$LYR^X)<=0+N3^|t$0-slm_Vp#m*FNX*ho3-wL3y^%NBH}Jk$=iB^oN13 zD}D!E$8O-o2)_ud`uH&EsL$;;J>BWkzO4MW(A6NlP4JiaiP4@nb$C4R1F)B%{%-12 zeB5P*k-tq@#n9(CW@;Y0ch zivQSUs*HcpPLrSrw`Yk%wKrGB6#o@TxJgSZ3|=|y@9(Fe-ri} z(#Jg>+JpHk%5O_mG5r4k`P*_`@h_vjqI|Y2D*h_kE9B++lJO4(yzJ|5L!Mh6pKZZ7 z%lKD4#(Xt%9>Q*c{A@pGFSyK`JFwpBReTNQfj(~Q?KGY1DdAW81Ko;$&1I_Z^Xr)u zJ%kVRJ5hMFhX>XojPxD|@+d#oH*tJi4+#D@s6RQ(?{7wU7v@gmiWwgIc@X*}|GB4J zW(NEZYWQGPQJ)^x_yj}#hsTwE+GTE%K5y_f#Z#DXlCJtZ|0^zY3;KEZw$hOv z$CHQec>D(1&kAs7m*S7P%o^nB)cn2iMVI*w>J#Vll>d#dxt2E6(@vGIL09?S_)FK) z0eyB>l|O`E^!SG`zP;!BLBC<>%Zi!)cFmKlF-g*^WN&5%5qR!Eis4ONW0X zuXNZSAIJFhNUz~n`G4r|x|S^DeI&@Q@;)-3=I45X>4oRfg3@ofmcuB|N9R0#{Xe*t zJoNYIyvOHHyO#6N@1xfZUiA1QkGqyPNx!A|b=SiF?@`VFM~E@Mdi1u^KkizpS*$ml z;y-jPGYJ1!hvGHYGKc>7G4N2{j|AN1@tfa^bqDF^6{Eg<1M)tm`G52CSVv$y*dFM= z=~@<0f8KXj`Tq~s@>%e|f4lFme)v7;kC6ZOcPrk5`55}69}M`ib2GqM#SmMb?1JZS z;OiRx*U`R^e)gu~{|oI4`O7YQ{87}`&+Y_oDV?#WffUv-Z4@2M`ukb4c~@lF;2^djR-OgrT3O)*{^F+|!*A{wVNv z#ou)9J7_OYcSm&8r>8YPrT4p*EYg4ayr&CB`cDrlKIK}Td=7X#!VItZExqGfb~FBY zrT+uhk|(|vVe&61M*nae`FZ+!gfYH9z3B1idtA$Dl-JWY6~Bb}??K?@2xATK^lF4r zK2NJYMn8@DJmP;QuXJMQ>lw|T)90CCrT>M?l4*hmP4%Wx=Rm^uGZ&xzc=7k6^wF{dFxw82al{{R$oV@4BV*VV9YJJUh2X81n4Qc?_}T z*_l`RSI{3K{LbMBKMOn_VWht^$V2{DfrC86uK_Pb{O5pGzKiT%p}(ECBRcfAbJgRE zucH5iY`Y*8^&|L2;Eo6n0-ulYo4_jX#YLC-9Q3zKqFD z-!Jb2UW@Qc&OO^1Vd&>s)#v!%b(v-GKdbTxCjYqd|9hADZPFJbO!|_?$fi8c-t@Tg zVVAiBeLSo3RsIW?`4;5et?3Cyeb}8<{@=iS2JLNk&SSK<%w4p%-Fc;d+qJyI@CA=Q zhOwXZYq!cPxE**{=_tzr`peyO2G1*g6XQ4J+kGv0$-HtH&`+chsMt_ebm5-17y}%t2{xEPzkMtsN&eOwq zzP}LhqyFzduXG*H_s>Nb{pzZl_<0^d-q1n5;6Lh1FDZOo*(G-{G&Ra zA5i@W#&~p~8qv{T9++3G*<1_zvqz$@`v&K`SXf(Jb(U1gdxxK%N`>cd7fWUx{l}1uSFQ+ z`SW)pjPd+XkVnV!L)nOq@%&Ie!smchzA&C28jtAE-=V6cn?(uqmG?r|8;U(oafWBh$#Iif!fyrNjg^X`ragP(g8 zERS&=&$|l|CVkjrr%$))BaG+WbDkc?^X^*_Mt$hk^eZ}^cdvSS7|*-clwZels8Hky z{Pmr=Pz~{oKvjh`HS5Ve#E&K^NMvme{nd%hd{3?=J<*6{6)>5jIS8a zUz}Gu$4~V4FD^v*`+#pmnDixsmlf-H{^Dwcnf{vMkpA5WBYhN!*N>+EQfGvb{!2k0 zfu4)#kmsd5>IBMr;!)S$0e!r5UU9c;-}+OkHh@oWu(vg7v>Ky4=dLB!^`6lMtUzpKH=B-!^?9% zy-C;yXkUE!T7+K!zNz@fUEAl8|Cd$X$zO17UqpL;`Hs?m)3tq>_^!t)u0t2(_0{d# zzL9nA@OH)TxVBZubGXZ6=%?+Q#469k7hK!7h!;E^`%~?`8Q|+4PktMHDdai47-7t3 z4&R6{!{3ZB(muQ#;r9UFiZH`#{-#hDXkR;g+tUSi0 z{*?~JN1f|M{I9@|^=XQjbi~=faKEDZ5jx_3B^S{de|Ll#zE|-j*z_5HA;P4eH+Wp} zPdfJ|`Ku8o|D3_s6#oMDPsqOzVe(%$_=e*D5p9P2OA#jjO@nVK{%7bv$iEU{^4~W2 zj^clZ{aNy_MVS0|)A&e-$I}mBe+K+Vz(aithWp5NgS!+z<=kh;pNla0yA95JjAEDP z$a#-Zf8BM|-y_5RJjC=ltiQo=WKQufV}Akl^~k(pjJKcJ3Vc1nsGmm`J;wfP<~sVb z4+c8V=RO8}L-_+asFQ&R{SmOUpxf*HG{7k{zZ>3KM5UT z%z1Um;AO*qOEL7r`u*yv!K#nTWz=WNe^=xC1nS>0oEy{p<2 zkMK6&>pp#_&&frPYrls5&Me%^2CKZv?oQtJbk^r@p?prRrExD9SzmBI*zvq&?{<%C zcU`6p^|jZOM{h2olOGW<{3|%`vJK-`Z$a@_UFJQ|UoY|_bnHiUkX}`c{oYQ}7d#H~ z_G*4#`G>HdGX5n`uMfJ+0@wExR#)J98_-jPH)(5!ceq^wt!=jr~W|x4v=3|H!p)eCP|||4-Nlz~8r`bYk?k zeHtG2rx3^GZj5>@Yt*tNFV|On%MZr}v}3MtW!GJilP1 zcXmNB&R6if`&rH3r~e+#&meziHGhH`-%aIT!FfpXYyLh>%=m75`cJoF{2^WQ=kz&; zU+Vj(52Ako{aj~+5&yaEimTXPdI~rjVdU>zmtxpMdP(nAyy7z4?>pBUVa8YR_-D`` z@%;L^^ARThu;K$Q(?z=K4`Rx5ZqCzDUoxYpFY+#3i>L9NW#2x`Yf&$Fo|yPUb(g6E z%R6=vAJ6T+6Y=%^7|y?t?+XYU@iD&78NNmGeJ$bRd4oQ@ZzShsk!}Nn@1v_}zK`O) zs#|C`l#hIZ!6)zVX}a_;9)SH_)0Oys4EA>NRnqYl@gCLPG+(PzEblq)br`r2<|NMQ z?8Gmg49bKZ-(xQd#K3@G&>#C@#q;?66l`7SkNIR)@CW>dep#SvS@ZaXHx=Ji{2Ly( zBL0>(*p5)%d|DR_UiLW1)7D`y>X3y0NeS-wP8!qiUR7N6_ytX`o$+IS_tJBo-VQ#& zKjZPHoZ>%MJdfWfzD?IW4)JYT20zBWmwz7qcs4UXF}|&M&C@%E4TeAYgFgAr7SlWY zE1rK#!Q+tL7Np@DFKt_CI_c zxrIN_XTWmSpKXV7V2*U^1iA2ZDn9L@ytms2_c;6yVy?t^`7po47d=F`lA>oAZ%ps? z_%w9JhwX#(Ab(~xjaxK);BQrWNU!yd#;@|UWz+GuY5ak|eGYWwE!3~}#dLV7Kaj0k z+s^|Ue~_>9yyp*?HZ8_Cx#r}d&5C#m^JPA?Nu}uE6Z&CKr%frQ(`F>}F{Sq!{&B?` z|0~x__!}ntEsul#Ub&<6zx46bro!}I()N9%%i~Z#K3D}G>PzUqKDg>}py#0v(nEX9 zLmz^*e>=+fqW{4eQ?c4{f7K-jYu5o zPw!3O*PZ*MPtKiL0gm+5mp8cJafq+)n!%Kh;kEwu-vLH`L;ub>bKJjk&K&E5b7Y}w zeJCsgNBa3N<%{vE;x#AF!1*-hoHp*yIj4;C&pBkQAI|aE9|!$#P8avjoU^gNZO|D$ z>VG-siu+T}v514dIERY+OU{|%{?g~7*pIGFRxvt56o zrw+{TL%NSu)t^5fjrjBG|0I7=A4I9@r$3v%1xN6QeBsG_Es_7ou=jnqm%oUo!{hkx zCYEpTkKz&jtud!}w@xPU2WrbA%8`23^5vKwp0>Y}_-FFL7iIsIBp%u!_-wj`|Nk5g zjVIG9-twoN^H=rf7@j=H7w7Qn9{4}B&J!N+@7%y&$H17H`SnCj=9AC+7m?pjz@hTA z=IHC6_CLXI@b3z~c(-LO@!t~FpDkAs|Mu>P|8C;HWjyh3I}rIVCH|ZKo5a8Udg9-{ zFY$l>_QZelS0evNPMLqJ{5+2zp0+0v{s*IUJGgF9{)Z!f_o=8p|Dwi%^fyQHKUN5_ zLqD%3@pP^v{%xI!|0DUtzk{-Ax{uz9{QqN=F8fA4--~)RiC^V^Y+QWhPsfi({2|@v z!M~93@1P8g2lWU2|N001NN4>6|48qF-od^{<=Mfs^w|{Yhx?u3+4KZl%FlDw;c5Go z#Q&x4;0t{&CH^0{oA__qmH6{KSa@2$k@!D(Blx0g_;->2?Zkicxy1jej>LacA@P5@ zH}P*DOZ=beO8mF%PW*RA^7H@N7OF`R9W#y5L9; zJSQBUEdy|=o}U>GzR<(b$e+5^XYNe>Tc*$Co4jNY8D* z9{G>Mtxw1Iz@hT*9uB_H=dVTng~)#~@#p!-@U*kdX*_#6f-m&>n~DGKTZ#XsS0evN z&Mn`c`0trd_*?%v^3Ny!ZNC`#$K`@%-H_*Q#LqEV<$O6R-!|GW)PL{ogunIMiGS{V z;@`nBP5Ie{hiB7&ge&-W24CpmX5^nw{9FGb@}E!q#SWtJ?2qK>xEk>@PJK3&;81?v z-xZ$CkqwOJCBw6c?ht>JuPuK&@#p!#@Q7V0@UH}4j2A_?)&D@0?xx?4{MQox_DKGN z`NY5NqX|FH?S*IaKTZ6fkM!9w8u>^0Y9EaJm*Li@?T_G4IeD%wJX=1J_#es!U)2Ao zpW!*U@U(4*OXGRrdhkVm^}j{_k^Gx}H1Y2qPWapZEb<>u{5LN}{!B}s_GjSGba^f; zJnerF`3Fz@wtqG8=Q*VCbi6XQeTo0!YQit~;b}ay@q}k{G!7og2VeC6e+;+s)8-SN_8XCZKKO$FA1D4ihZ3H) zXnZ`g7<^H_bZh*_Z^5lkTcn?pbHNwi* zrJj$Y{(lQ@ji39PjptV*ezsXYPhgfG)m!dsg-7Pb8V~pD!qfIixYU2JJNQCxT*C$b zY~r6;gQxm)pNvmSC+guR=@2OUQQigrZzTTQmkAH;<$-@_G58|Ctn?ZW_gTWz_NS4* zrb}Ae#}ogvi}dl&rl_8C$`+oEJ8%U#&j(-F0iyoo>}ulQ{=*4B_kofJ10i*^?t%Y% z*Lk|u|MF1?oA!U~8t|P*@PeoMLyz)wVV(F7;2+`-;ZWo*gLpP6U-*Gst&hULBY5a9 z{9Aqp{#%WI7XJIA_yZZ~wC;%H3FSrpzd}6h3*sjW|6hl{U2j7EmR0zN{Dd&{pMgLA zrJgMOUykJ2tW@SVmNVpo<@YC%oS_^Mf96w>|GdVN#qTpw{A{y)82|4_{Njl}kh^6O zLcS+>=x-MOKMa5NeeuJ5{WScy$DVN0e|wZ}htiR5>&?hNlqdW%|2mRqQTY*1`;SNb zp&anrx&{8|CsGf}amyt9Lp==X;|CUE0|Bw&(ckqSX5KmT9f&b>ejQqPI z|9a$~OZ+!|E%Fcb3h``uH1f|U{O#8w|K7yE?XM&MLgL@b7wdzZp*|pfd6>^H<3GzA zPfHblegpplUV=NKw=x{(^-6EerE%Le;IAa~w%c(3R)V*{>3%!Hd4q2O{|^cM;jF>q zz&9g0)B93{m*M8TLgi!jQn(BFe@*C*7T}K3dvqQ+ivQ6?;3)n_nIF!(G=9F{9`irO z_#^)8b>Jw!*|jv@(F^=L_^$pBR#?|od=Hc`x4WO@L|9d&ZqH_ zdEiJNrx;(PkJoM*yb4S|m6zw2;{2Z34jhH&`J|Yi=aFK5>6fGWBz7>yt^L`y6}Th( zAYdH-gVbM?-os2U#>2oj@n7YAcmeLHd>#Xg`KA0Kez8YI{Oq^l`tJ(Bk$mq;r@mtT z|DSRFy2_7!kWc;d_(L5Byae~}N0@E@pGNo=-2W`Xe*icChw!U#|6zo`1@}LX@LjmS zp5PgVyB*<9xVdi9_+}o5`(H%(Alz(!fu4t(UNd>-!Kj_@elY#)JMg`4du z;A?O*&zP$4nfWaK+{FKY7vW}m3HXa}v%LmPUiNitsn!{#u0Z!2P8N ze>;t50b}~?HiNet{G`D-gL{Fa{R+1mm~#l#hrFP{I0HTnH^-8I--MfUw16kzX8jI$ z#^ARNe#hX?7)<@g@qNzVF9JV_{~^6E!~Il*e;4kjBm51xxvmZT--4U#&49lH_XiW4 z*#>t$!dbW@{bZ=m80QTx7(8rn)!=!97YtrBc*)>pgI5e*HFzzJnUy#{T)(7n*5I7M zd4mfE4;x%Hc;4UzgBJ~6GI-hG6@ym|UQ1(Xrcrseb{cxt;GDsEg9`=^8(cMb-rxm; z7Y$xAc-i0;gI5h+OJivUQT`iyE!sZXvW7oraNgj8!NUeu4W2i6!Qe%MmkeGuc*WpV zgV)ly9Wbu%?VSc^4bB;yH@IN%u)$S>=M7#kc+ub`gO?3nF?iMBwKU!Y80UXer@>i+ za|Y)PE*Ly)aMj>>gBJ{5G!&g2BTER}G#w zc){RBgO?0mHh9J0RfE^kxC1aQ|Bgi+a|Y)PE*Ly)aMj>> zgRcWe^KUup8{?aXe#_w726KH8hrgS~+nC>Ie!neiF!LMJ^9D1&F?}33S|4niGk5_w zn%{3*G7ISoz|A#saJ!^2z;Jm>FgNF^S8a!|Cg29UhFB!aS@QT5!2Ct>@!+>#pd$`l! ztid^h^9C0T9yYjY@Vvnb1}_@CWbm@VD+aF`yq3nDfN}mi=@He3&aA;XgYyO#3?4SP zYVf?l3kEM5ykzjQ!7B!@8oZXqu!N`b`$(t3S%Y&1=M63xJZx~);CX`=3|=&N$>3#! zR}5Y?crA^w6O+pSqn!q44bB;yH@IN%u)$S>=M7#kc+ub`gO?3nF?iMBwKNucZ&cqO z>ooMN!8wET1{Vw-Hn?i=yuk|wFB-gL@Up=x2Co{tmd5V^jNAKrIt|VmoHICYaKYeV zgR2J58@yofqQOfBFB`mK@T$RUY5YBaasI!j)8MSZIfL^C7YrUYxN7jc!3zd28oXri zvcW3`uNu6T#&IfL^C7YrUYxN7jc!3zd28oXrivcW3`uNu6T#@`1R zm;d*58k{vaXK>!&g2BTER}G#wc){RBgO?0mHh9J0RfE^km{b2a|Jyqa&KjIEIB#&l z;9-NS2G1M3VDO^BO9n3+ykhXG!E0&!{eW@)zrWMqtid^h^9C0T9yYjY@Vvnb1}_@C zWbm@VD+aF`yq3mtY$j^|KhSCDS%Y&1=M63xJZx~);CX`=3|=&N$>3#!R}5Y?crA^& zG>yyuy`2VU4bB;yH@IN%u)$S>=M7#kc+ub`gO?3nF?iMBwKRSlFwXzuod#zO&KaCH zxM1+G!BvCj4PG#K(cmS6mknMqc-7#wG^Qyc&j0&54bB>zGdOQ>!Qf$os|L>-ykPL6 z!Ak}&8@yuhs=;e%%%iw*{@>qeaMs|Q!FhuV1`iutHF)0O1%nq2UNU&u;1z>c4PHy* z9|Vl^|AU=5zb{b5EXne@# z49*)|FnHMDs=@OHFBrUN@RGsH2Co>rYVcYb?*NSRC+~4Z`Q4E<^qj$Yg9`=^8(cMb z-rxm;7Y$xAc-i0;gI5h+OXDX1iuyoWXg63kDAxTs3&!;01#h4PG*M+29p} zR}EfE;|~DF`Tszt!C8ZI2Imbf7(8rn)!=!97YtrBc*)>pgI5e*HFzzJp9GBaFXs!R z`u}9s&~pao4K5fwY;e`!d4m@WUNm^g;AMkX3|=*OEsc3JD9-;=od#zO&KaCHxM1+G z!BvCj4PG#K(cmS6mknMqc-7#wH0DvLIRE@!ObTZW&KaCHxM1+G!BvCj4PG#K(cmS6 zmknMqc-7#wH0IH-IKR(y8k{vaXK>!&g2BTER}G#wc){RBgO?0mHh9J0RfE^kxC=1O ze^;l$S%Y&1=M63xJZx~);CX`=3|=&N$>3#!R}5Y?crA^00>=5@*=car;GDsEg9`=^ z8(cMb-rxm;7Y$xAc-i0;gI5h+OXFRDasGF88k{vaXK>!&g2BTER}G#wc){RBgO?0m zHh9J0RfE^k_*uX>|Ic=M7#kc+ub`gO?3nF?iMBwKU!X80UXar@>i+a|Y)PE*Ly) zaMj>>gBJ{5G&Jsa-rI8{U8p8&5`8K;@5uJ+^Te?U?f#95b z?-}3u@0-vtlIQ$?=iYPAxp!t9#qms5N!}&2=fArJoC9`&>%a}*CU6Km2A%*PE@Dz9kJO^F?FM(G{b_wnIyIa6HU=FZ; zKXB{74d5nl2s{R!08fEuz;oaQ@Dg~HWRK9EkGBP!19pMyzzyIga0omGo&ZmQXTWpd z1@IDhmE=63J^%a`a1Pi7t^+rKo4_IP7PE@Dz9kJO^F?FM(G{E)v@F=l_qD;2f|ETnBCdH-SUoG4KR<3Ooaz122G= zz^f#e2<`c*=X+T1Z>1cxyTEne25=KN1ReuVfTzGS;5qODcnQ2pa+%Pce|ZZy2kZjZ zfg8Y0;1GBWJOQ2p&w%H^3*aU2D#;Z>d;XOz;2f|ETnBCdH-SUoG4KR<3Ooaz122G= z!2JDQ{?O0dzr+8ZEWtTq{!mQXUEt@$*8OwUcY#A<>wdcG82Bf|*8O$Wm%wvk>wdc` z&&TG^i0kx{`glGT*Z2!f*7wzGJXf3f4bSF%-~-?ea0Gk_dy>*v>-Mc{ql1KEfwzHofQ!KUzz4t`;0X8<_zL(M_y+hE_zw6U_#w$(*#zDO-T^KG?*ktIcYq_{ zOW-TuYv3E;Ti`q3d*Fv8e{~ah8+Zq}2)qw`0NeqNfG>fsfUkjXfNz2CfbW4Hl3d>e z-Ui+QE&}fZ9{_iNBj8KmE8uJ38{k{uJK%fZ2jRK%fphEp*7?Z!z4Hg>kIu)=C(fPo zJLfmf!uizslk<1y&(1%czd65lK6L)#{MGq~^ZJvYJ3n*EPv6Q!?a&{#-}8@xel&cM z*&PPG-6*(-w0d^%ja`PI-#Y9C{xFI%o&LxV2g4{s;P(cD)AP`8^*jD~*l9(9f7Ba% zZ`Y^o=q$>Fr;* zj#L>VRV$A@%CwG;hruyLbxadaG-_Y?-99ypMryiI8}Pyp+b!_D!gb*Xi9r&i$4I!; z5aKzc^K5%`f z?Az`2-aGSKoz8I7J*IOjUSBq?yripp$Aa?5MLuUnzTX;RK_Iu8r>KB+$(|?v>5K1z$K+ zt{O6Iz32^EI!GPhaM0~Xd!y6Cz0;#%5cs4ryQ64G@txC#Q^NZ4^1+ADkV&Ubri!w`_7L+-5km`nXF_uj!WwR>UmVIqY#ukk& z#I~k8P_ZJsa;!>9QL(W{D>|B{#Z#6FihH+vQrwzJWmTo-1wsU13x{p@u$$j zzaz`GO+r{b@I)HO*0p|TtZlsK&&_1pBB34>LAEYVWt~Ms^TwDgf7y^fd*V&vDK`5f z;*|XF%(*lD@2K~+jrS5yj_eOq;=|F_AXmY?h9?Xmpdr1rG@Qx2vrM=#DVdBswG^~|o6{HISS zhPRyBuXhqp9*4g~k(PgMLMV^QFVAI_c+NkfwzT|J6Q46yjKmW=1KOqKUzh;N&z~3Z zCh?q~Zvsin|E}t-p3iEOHWJUFyhdsHm%2MYCsxg0J#c`u#B=^8aa#T{wej|bHt}BK z$>Ds5c4_%nCPMPFmqzmxe&etxgxP2$=AZ+fN8|1<7P+Z)=%dx>Yqzvz`V{~ULqP4cT7 zPKhO+{La^jQ}XvXj<-khJN);7wEV7$UgwwV*~*`tQ6I@K@6RTzXRbd-JqXM=*~t4S zN#_T&lku|LxZcj{MmN_!rKCSWmfsU3e_FhiPsK}r4*l^X@y}In`e(Stim~&5XvIsr zv3Q9bcc2<@`C#cK(fT4gT|Xmi=SX7@LjPS4KaLeOg%cW?zyE&KuL)`ITAs?f1#U MF~2k|GXE$41^dJwegFUf diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go index 4e982fe04..b94c9752f 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go @@ -91,16 +91,18 @@ type BpfFlowIdT struct { type BpfFlowMetrics BpfFlowMetricsT type BpfFlowMetricsT struct { - Packets uint32 - Bytes uint64 - StartMonoTimeTs uint64 - EndMonoTimeTs uint64 - Flags uint16 - Errno uint8 - Dscp uint8 - PktDrops BpfPktDropsT - DnsRecord BpfDnsRecordT - FlowRtt uint64 + Packets uint32 + Bytes uint64 + StartMonoTimeTs uint64 + EndMonoTimeTs uint64 + Flags uint16 + Errno uint8 + Dscp uint8 + PktDrops BpfPktDropsT + DnsRecord BpfDnsRecordT + FlowRtt uint64 + NetworkEventsIdx uint8 + NetworkEvents [4][8]uint8 } type BpfFlowRecordT struct { @@ -111,11 +113,15 @@ type BpfFlowRecordT struct { type BpfGlobalCountersKeyT uint32 const ( - BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED_KEY BpfGlobalCountersKeyT = 0 - BpfGlobalCountersKeyTFILTER_REJECT_KEY BpfGlobalCountersKeyT = 1 - BpfGlobalCountersKeyTFILTER_ACCEPT_KEY BpfGlobalCountersKeyT = 2 - BpfGlobalCountersKeyTFILTER_NOMATCH_KEY BpfGlobalCountersKeyT = 3 - BpfGlobalCountersKeyTMAX_DROPPED_FLOWS_KEY BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED_KEY BpfGlobalCountersKeyT = 0 + BpfGlobalCountersKeyTFILTER_REJECT_KEY BpfGlobalCountersKeyT = 1 + BpfGlobalCountersKeyTFILTER_ACCEPT_KEY BpfGlobalCountersKeyT = 2 + BpfGlobalCountersKeyTFILTER_NOMATCH_KEY BpfGlobalCountersKeyT = 3 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_KEY BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 5 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 6 + BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 7 + BpfGlobalCountersKeyTMAX_DROPPED_FLOWS_KEY BpfGlobalCountersKeyT = 8 ) type BpfPktDropsT struct { @@ -183,17 +189,18 @@ type BpfSpecs struct { // // It can be passed ebpf.CollectionSpec.Assign. type BpfProgramSpecs struct { - KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` - TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` - TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` - TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` - TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` - TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` - TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` - TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` - TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` - TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` - TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` + KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` } // BpfMapSpecs contains maps before they are loaded into the kernel. @@ -250,22 +257,24 @@ func (m *BpfMaps) Close() error { // // It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. type BpfPrograms struct { - KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` - TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` - TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` - TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` - TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` - TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` - TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` - TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` - TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` - TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` - TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` } func (p *BpfPrograms) Close() error { return _BpfClose( p.KfreeSkb, + p.RhNetworkEventsMonitoring, p.TcEgressFlowParse, p.TcEgressPcaParse, p.TcIngressFlowParse, diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o index c0fc165a963e367302e8a915151d263eaea6d405..047d706b82a18aeeec4bafd01040a11c2b689967 100644 GIT binary patch literal 150088 zcmeFa3w&HvwLg9`P1;Z^ErmcGshl<~4UuA7UcCWi`bZzpHnB};L2xpiPSZTvwLmELK+Lxlrc))homt*Yh4u#PX4ZpoDiKB!_F1 zJ$$UvX%m#X&s3o39^6h1bEl3M#^v~=s7r4J_Hp$Yy=4jjLbLa3p{cf2N?RL+4j&NW zO@OicUU=b!f%qdYkaLe2KM;RL;sf!gjXq`ckkQA4cElgHc+Tj(M(;9ur_mv!HyYh1 zG?g;DHkw_xnqA{O?-AH_Im%Hs(esMSS7o()y^4FOlvZT8cTn_p#J?=G59MK1I6K=8 zs{O^~XFkp4p0^&dtb%6mPjWx&xdPX4B|Z>Nc(>AFYnN6C`(QC2($q$LAk3Y?yAY@0 z2*W9(JxZx}4z6bJ7gi5y|L|p+-iOBa0n@cNOL}^*)>}HR^_IT$I^nlYP`PQyAJrv1 zYG^o)pUC$%r4vv#wzIQi8l5Q;hC_|q1G8CQwC`28D!lKA&fya^ec1F)v-(gj%DGEX z!B1Zy4C;Z2`E*u?kE_q9^V2G+WzSTP3l@{0MPiXZV>}Rc@pDxhxA=uARhH(t?2*Lho zLc;~<3=-D?Cv^U@Umuj=0*)$_uM!$A(C1>XpRG%MYIv~!Xp5sj2*G}Bz;J;S2XS@q zZ~-bn(ph`FpS^?fTF%h?AQ(JF_~8sI|KMDUTloi1wz!pl@U<3KKMMwHEpFu>Jjvo# z{=piHTloj)Sls%{pbo_03@iWOY)iND56-f;#U5_nHIP556-Z-m4DE;xRrnK z1d9tnKc`oyAJd{>Q2TF#s=;%1lKGvE2gT!r8y`Aqk5qp=DC)%jCc*!T$DhlQOiP+Q zZMUeuQa;)bpBm$S_D2TlVF@cdt=Gc%p!F6VA6TxrQ?-8BKN)YEuZm`uz2$`$Q1u)y z>4{N$IL?-`zsh?U(s)kn)qXqBuKhN!xg*B~<`ZZ$c&SuXoM;2?|;jMsW= z1rfW(sXtP$=t-*_#}&4pn?LlKeAuGpit>41*rMeNeZ8OKGV>38YAHMy8Wu^^3(B?s z=F1z?{@XCi(ks+&SPrc}9S`#Lx7NvNf6bS7y!O|IjHQp){+%yxrS|_;7$@c3qH=D0 zZ?Jl7*r5D|#Y$75(#e)iJAAMjE>r%nn&o&&eQga@)rtU+bzoXh0oeTeMj`v z2w(jo%4dCq^UVJv^3o3Tv^~Qq+U}fZP;)p%{i6SX&37XAsa-nW51YUB;dUuULdX4K z9j?7Ts1dswb^Lep%06W4GoNl+fpTcF*9Y^(u6_DE7}jL34@OBjvie+b>nQq}*xO>y zhbd}MbBtV;TMZ8r>K|dF`cc?TZAjl|<9=4#J)ENc5VqL3UZ&5zLci7Lp6j>$2ep5@ zcsNDV3-ze?2L~RLaWI^s<80f3A6ooWv7;b=n#9`I|y(Qkd&m#&*7>FK)TmBxLe(}Z@k*9gt~Yy7nmXMPO#pgpF{!Th|u zOzMYmhIITG)1&jzbo^wCFEo0Z&@f&rv?G4D#a}P9?KbNV=bbBYjwjNu&r3+W8S4QC zmtWgK>mRj?R382=^0kX4KCIoyqaGjk_1k_x`n*O-PuHyy8ty$`<*TV3>s6FT`c2(= zqxjNq>Q;{8OTVdGF^ca)Us$(%6kqyH-Lg@9={I%t!Vhbg2+cniYnMtq9q%^UX>^Ox zO-5UcUTk!O(I%l`yjN({uVvkMD@%{{0m}bU{B+p*Yt{IPWBaQPvtCtMp}!qxKE!-D z-;#M%nlYWXQRS7UH*8lwDeecnkES|5VE@~Vt65fKcy>hc0NX3{`#=Akai-6W$WR|g zSl8bxyr)IZ^{)rmCvpL`t$qvNXh zldt1>lrR3|>$o4~`>-SQ)v=>|@h4xOBT>HilfQuG<9W{9bpJ~2)rm>}fuR!i>U=+{ zPwmxpQIxOt>iQ_k_mN-d>pCgQS9^866y>YEwr-k2txd$`B}sd)m~edPoY$_esu!Yf8eST_S$?bqEGAB=3^1Q+N<-is9iqt3w@oR zMfqy4&eNiNwO18~Qy7cds{>5`foutTbzT?Mr}pYRFUnVYb-own`^Yc!bv_v7tGzZa zkCa>O)p>k4rIO~#elbxvMd#)H2X>aQSLfl;eAHeY5TksxSI5;T-)BBP9(V{*zS=u( z6kqMd0EiGWf>7{Z9VCMT`n(9I9F+9bfhnWxSN)45J({oTPY6Gp@_L~K{pU(NbwKwK zqW7u3b0j^=SA9A^4X5b5v!GAsr>O(eO4zruguWHR52q{>TF|##;;942l+agSLf=x; zx41~(64U3G(D#NC`Wj8&nj(D-rcdW_(R$MQTU|omD${pfk-qayUrh=7E-ayMgXz1V zNZ)$Xr}NFx_BEH#*JS$E7U^4O`sS6eFI7Tci}1rKZxUK)-=xG-2Xvk~+P+I9J<3=A zyV&$yRHW~Xrf*>h``SwA+hY1Q7wKy?eW#Vsmnordlj+-7q%Upy5+(EnCG>ThzHLSN zx=f$WPY{nYL~n!d&o_U$U6FDv|T%3FmN{O@fNPaV*8NYoCkzqd$wl&|)^+4Nmnq;J6VZ75;i z)g|;@W%{lt(s!llYbv4d@)G*?n7+%3^zAl%EhY3_TSDJH)A!CIeeW`Ty3UE#lh)rm zO6YsL>DybR?;6v$rG$O&DWUI1(|1FWzIU5GU6+lv@A?w@t}}guMf&!ezU~tC9Vnr1 zNciED_X{of-z^eP9nf{;X#3tL=~2G=-_54)rXqdsRrxX@`SUJ%2|c31tACCuS61?q z@v6U*4xDHEs@fjA6S=_g{R#>qA z1d9v7dCO}wo@(;e+;#U5_xW%pfgF3$tXIS|M*-KErGpziBI=>HR zSosHaUSBwW()oNi!^%IX^Z3x-e+UM3{vOV-@(=1j9nP@w59)k9oMGi3q&G2tEB~Oj zvpd%hu3aGM`F+>H`4V^gtcCNZ*9kuuoTu_o&l0$El9zk_bPvp-g;k!`Q@@&r{Ui0u z6SP>~K)(l+V?euPH~&(Bz&uyY(6CP=B@I{9Fu`mMV!*rXKX1}*<~69hVLwyt zVR?MW--B|n2i${hOHZuubQ}pbs=re&{Ub=JzdJt((%L`r=VdAFKeUr~x^tdjllp!B z94f7bG@J$@gdn5+B!3Q-)_#Hj76RCwB+LACZzS8rA0?w9l+ zr5*2*qyH>%mp%d2$r=r+wI5fF`?SfQF7%S4eB^t`h98PO{RdS4+^Wa& zc1!u(^Lw_|f4-fq9;;kC&#-wwwayPX9!dM@@CAQ$?Pv3eYMo~k%QeHV83H#isiybq zdkZ7=DL+Wt^Eow7?-{!D@_f5m{pQ>CfVFGYEn;W?t=6vd z-zRbUBiq&45mf7QAed$2KhmZC?0HjF?&?J|3TCU{=KVn3-7Aa>>KMUndoC5#UtwI( z=VLHi2hP#@W{AD{_RU!P7U`Sbq|cootqtz#p-_)Do|s?c>&^Vby~p6{)!je+G;N=t zTFaAfr&(4mt0z~#?)f~uNhdlkpXsOB_*ZS^on_@Ms^{rV?m3|Ccgbn3QvM2gdzR;+ zIz9!}FV+q=u2{bqsB`0l)t~jtfr~UBH%_?mq5lKsKc~@4Jnx*-ZT_CGU#D;6I9*!L z8qqU%-D;t3+;q>k=5a^Lb1axOOVac6p6Y6eyLt^~&(?IbKU*fKK2hT+4{Oxf@8&6d z9y0Xb_Lz)^mn=C);x2!8zQ@*?gR4TG(t1A0lAjZ5e#2-dhRN0+f~6X-Du33-Uv0N$ z8>ffUnm&2IQa=9~CcvB%K=b*4FZuMUUksyM43q1X4*!eN$t#rx?=jk}bo%vwmHgfF z>u~vUL>rwidpX8|e>h+DLsy?cwao`nPHGNj*?e$`WP^Cod~l}C2T^^}ev4Jky@ytX z_tRLjL0aqg5_=ER<>&Sd+6mYDq4=j=+6 zu2LayIU)azHF^D#e~MmOSa$~LlQbRmE%~QvH4b~FK5cvpHqBB#l#8CU8kX-@8TE6H zzgmv5`&H25s~&uco(#Pi_B0Cdg4P)V3hRlP8i!u7XS>RkUQbwkqh3V+Cadqorhk*w zH_9P)wpe{5S@b7NzpwNquP2ZnL$n>J|K+YPs=U)t^XN~eFR1nzmG>>BJTt7mwA-LR zSbe$j%$TY2&?4noB+>l3Fu$(Ad!Ed~tt+Z5?$-4$eVuWF*xP^L5%K*@*?6f}7q2RN z-H3UD8?URzRoc4aH)3ExzCzX?h4`Ei>kxN;^SGbudStd_KYBg#1}%4fJ>urab5KzT z^e@(D!+ME&RTFG}H(Lj?V2-UvisYInSo&-vAOv%$Wh{NR9%ybGe z9mdk9{Ge6m@j+^y%E8UWZvu%H)+4h^F4ef3SI@1wR_yHmfa;(7hIdKawX3ru zn0=J+N3BPigpGNys~4LG&(ZmA>2aY-_06$)yK7&ozkK`7)&>veaP*C|ugO(Sn5}wT z`xdW9%%7(>={WDkyTZD}=FP3vKg^E)Tdm&C6!rP<8%^)E_2ewIf8ac8hiWSax+(q2 z&F}kfv;O7uEm3`ibj@daldjXJ_uBY1OUpfQp4nHlJ{dMW{kNH&7I*JIPH)n2{F2^g z$#B$qWsbE|@w(CKtExiV&y52?M*GjeMONNc>qoA>KA?Oz{=4yHZm-&L&URa`dY*UfF-H*!$AgE85@Px}pCzorg}g_I=TH z%XQi=qt*}R$CHpJMswd)P1S`$IZWbmt`QJy3oRgeBwm5lFd@%+OqUs?Pg>$*J(vpPhgF zzrX(afjsYBeyuRVhaG)JtIG8IpY9xLbo#emka??4&#>>U^EsX`u{?Y~pETXB?cv&+ z^~3vKemm*-_RqFc^Z0K`xhuq5U?=RMovyu_$A4YY?*WhDdL>@l(K_!~PbWiHtFrGs zNk4{ywqq=v)qnyvm+QE9*!v(i;y&nUF0YXs4|^Z-7;zus&E+%XKH9_bj7S%~x?X2G z>wiSL*sJ?dOy@{4A{~t8@~XUk7I8$n0mV5Jb~$1rXOYVK$MLm*F#1#W#eT1-!=nG1a}_NF;(NPJqK!6NISdpQu+ZyM_j*W zTl_uvV5X*5#eOf69rpVN9r53ao(?;|>WKf+;!hdaD8mD_G%T zp3|Pl=i7~V{~gvIAtDGbH~)e7!n{7--x0r9;_3JXp#yQXljVFE>BBoIg?w~+=tax< zjFppqJiJ@P^5y0I_mN*@XaDWbNIqlfL6zk1Z#$rJlXcqX4S!JePrx*X^*gN7YIz@q zD;W1YWqq^Wou3X5>V1>VuLFB;BDCKxaOZ+y{2{R`m?72gh4%Xwf#nm%zhvn;4-ez& zKY_iM5yrnF>CBfyed&DFuL}A8Yf--HZ^5>S(%asn{oK{d6R<=4 zEB?8>UH8JS$vW*F(T>hvS^uuRoZq(HZhm_}>yghvCts@F`2?yuqRhMEM}>A^f1rNy zQB|U3MQb;|x3M z4^$Awb({#}`rHoVI_`w=pNamkUB`{EU7vqp`yHJF{sx0L0I>g)Kg&R&S9Ol zZ&0o4+^|mlJgCDqBOUvv0w(WyD z&OCwJ60nngde1HTdfYwOYb zhO3o6fqo_Fb-$8yK8F%W;yAGoG_-jj^#~tS__d?oYx)bH^JW)+cc=Gx%&VY>EfmIo zC;A!ZSAfQvpSb4~J+yd!lDbptx9tv{$42KPpJTpOF6R5}voE}`9y*W}z7Kf=K6Ran zqI%rcehMIL*ZE;|-9v|~4_l3{gP|k>ca>8Xg0g3WyXShr!df|B87wZF$3^#`=nk@lVs2R?gjZiSnhW}&e_Ae z3eT(XSiL_yR`rI*YX0tdLVx!mA0DfAMEOz=$7;Jo`BD$ZYWsy(ByhL-nUwd6MG}88 z|8nzSw~s~tLX{lm{Z-xftMYW;kN$d<ZwD4ize~rPgoZbD3GKMF zGv6Pf0{a%)ztR&osr*D;XRuv;rcc!8e0rij=hJ)liJaU2N$I3M(+|D#p2MEAou{uKBB zm)yKWYwA8fCgfW^PJ;MVhQ|N0)tP_=sxi0^6eNjtat`}l;P zt9s9^`zK!@Y_DJ49OvFD5N_A!bHEOR5N@~g9S%?u*XMJR z^_kJ}JFL*>O&_$#I9H+Ld}^NB8@*5Uog(=~`KoWO*`wzN1$`$AKhW0{e@53at8K0N1>H37;p2{jFu~~ z-$e>DTJB)H%~vz(2f=uouV&OAg7Io!n9=qR#;c!(8TEr;JRA@q%&0%O-<1mOefeO# z^KX5w2IHOotDgkpUH{PqU@+eGFZG*1MhW_Xwns3}@;5(XKc!{)`e~sx=vVgpR$)f{ zt6=AemhS9Oe=FFjeps+W{jXrB`eks2)<>98e+;Sj=%FSO7O>KCrP!i@S)p&is; z3hiY6l5Yp?hlO_1{#j@T_5Z-mN5YC4$-nJ%A>I?H{sdqjDE(?Z=((T;|3d{LaGuGW z?vWUuw^~mw9#$-nl(1sHQFX|$Lif+Z3N2Sy;rfA=BdpN!6^|><&agu5|L4a8?dM^I z>(}bUr9kFFZ0zq3=}ohNrhpM&fv zbE}sNzfHS^H)?%zy4L%sbP029y++2h>vcUa94Kl#pc zSf8X+Zn#&edw-7a&9moLjq~L_B$jgz;uW6yMYNn8vFBE+KmW7ktnhRki|Xe{F}GU% z;-A(3a_g}|W;3Wi9p>acRJDht#`t9I8qU^_g{zn7@r zvm0oC>Uq_7$mnB6A2Ir{(VWqHjoxMSPNPFcZ#25k=v79uMt2(R7TSNbK0l)S58YVT zyZ89qdY|*yr(o#ixcH>#54B!XSL?XKbw8h6bFb}%B*xSBF40duLzVrWBI9i8vG!rQ zxSxQ#d5g;@IG}p4|0w#R`vSBxb<{qpAF*G`e%ev{ywrO~yS^XY(f*9Zb$_A5em||F zUC((s+Ml-c-x}5ZkB)ZT_v&bW%J{mE)nVtu9k!p`Vf)A(?Yhs_(XRW(9qo25*8Z5? zuls-bt6}Ui`MWe%hw; z)CcW#^ut0Y>*L+)Xt#A>(LR3E4>m;hd)O{qS-5s_?dtYr+&*sMJjeEHXNY;&AF}=0 z{CUnJVt){bSxC42TKisA7}xqKoU7=5DD!0(E}ie!RBt|C+sDo4tMwTKIuN*Xq9C8I z?GxwEk#wK9EzohQaE|k3tN+)_``gWBRWVLXN7w(oQo!Q+=l1{^(tEdvo`JnBMmGpO zTs!c27;OjM5XKM5ePP_rG3=bz?aPIG)gQyXc8;-E{W9EZ=Mj6=Kf}GcZyoMcKMnWV zxy)Ym>u|4~i|p0@5$@G}#c;3olW?z{N9@)967JP~;Bc?@n{cn4>+IEj7Vg!3%aqlx z+t=)a+i-*lZC^ObXa5dsb$>XF>pplN{DM^)+BtP9pw!55LG}Gf>(`z)%t!V8(dzH_ zLJRtId1jQn|looD3F3)CM9=LL42kv}g`{|I{2k;7U$&&Zz_JSp{3I4`jM{QP-= zJI~O573R+~Z2gr#&#?7Z{yf9hU+z33e_o(|$#IJ9T)JM=Zwu!r>c53^6!qi6If^^y zuytAfoC6sl6wW#L)M8xyJm{enjJtCVU7v>benFvL)b9%CDCTeZa}@Q*!a0gQrwZpN zy6;{%N6~&&I7iX`<;J^A*-1L@F6-8LcbS`azeNh*{5oCY+;6lWe@4n#JiZ;ykE7#P zn~nbi@eT5PZ`-N$8r{cxF!CJYy6pFGako!9`aI(q@w+GBmeQ~H{#D}r2UY*D^^?)} z>AawDp7Lk8kM+SPMHv5^#M|!B=Wm5~k(9ftY?{vR%TCkzJ?c~E_v@rSUuu2v`{WGi zOYJ#u=?2L!daf$Z<6SC8KWER$j|a$&`R481W%X#|T%lZAA8nVZo(fMVa<-(x1Y5;ik(0{9M=~zzglhA3U3x-@S6id$({ zf25ps$ikH~u=Wb$Iu8wYp`j2e#B2V0>|u^CW=|OZFZH)b{d}4Iu1Cz3b?z0I`19#U zea-m4Kn$VHOQZdj_Z9E6(+_x``~MNmmD_oH{yj$b{?E(T(y~n->vSHLp4h2zJ0}_54N3G1$qc*mu^M)wF!&Fp6O_`f-CmU!CU!${kEVqttx_+k87 zqx+1$%ji3ezQgF-jb0-(ZU6rzjNfGZ_ZYp==(~;HVDx&U*BRY!^q|oLMu&{vV)Xq+ z-)Hn@qwh8PA)_BOdZ*DljNWea14eH%daKb-8vTUPyNrI^=*NtH)aXZye%R=JMn7xx zUZeLI{fyC18@=1;r;L8V=;w`o&glI{b4G`ScEooGb?^B_{Z{(jte z(Glm`GVZS^ac;gsOB+4!W_iOECC<%PFc;JZ@@MD+B_~{QoW!Gi83$JAzJFNBSq|iJ zv0Pzg6{GU|Nif|T)?$i2y-({c%GZ3f{=yocx65-)^wrFec>cV7H+QRXE{S@Vdc2IK zLVfG)Ua&{^6?rVE?V|h8JO*Sw?*BQs_95RV-{ud+&&||P-AKhgj!VB*qVise*gb85 zFx>l(eJHe)bK3k-d@0YgdBP8G)O}nxf9HLgZu{svPZqiU12%3>v-2md4#}UI!f9Gw zH{YY4g~HczNBO?+wZ5W!a>8j^uTj31L;W(!*K(_WhBey2oQH`0HR|WZ{k*upy7yw( zzj3R~pGb@43jJF5F{1x(Y%%0rV1K41#pln%Eh^#mMcw+k-|Df-{?AIky^mHE(|o4E zzoH>HRb%VHM-^kAkDss`mGoK^{2qxFzK_s9b&GKN&3~%ObmIGeNRlziNZr*FVk5eRQ|PIWM6dY5V^GjHAgBUnqH%YVT6} z)>X0ZOUtY`z4#uC#QA^581moWIYQT=Q9oRW@)hUD@t*cIdtVhj#q!bjD$X9`{^I9T zh4+ByVL4e>W9|PYpojI!dSL!s8m0~;v_7m|!II#<*D&3_hfBQ`NOAR7Y;WoEeN61C z@N|EIdKldIDF*C!s{H4SGKVL^&=PMm?ou_ofb$;ajKOoCF59M_8 z+n3rNysu`A`%<@SJ4K%pu01gf2K-hX{Y@`<*K)59+1>S5IPJMK_D6EHlp{@nAJ{gdYaZhjc8f8OENi*8;}I1f<& zEPT&G=h=ntcj$b(a2}xZ?!vyj&Zi6K0XnZ{x%p%&ov-?TAzz)Z7xLA9;r7SExUL@x z`D#BZG2km8Oj{QvL9qzq;cV3z(Ht(S1JZXGU%v z?!MpSzW3n14-vNO{#w}nfY=qbf6?f_8qFEib(DM0H*B|cSNoSO{Yyq4HToT+-!}R! zqmLNX^QX|x-wNL+`JSbJS7_>5Yj>;9K9rNsv#|DSlJ4eF(-z6!5NsB70!-`^N08i!oFAFf?%pgG^~ zQBLtMYxmLT5Za!F{dR5F!hXB9Z(+Y(+qv+)1#R!bIfS;mdp{to)pd6;NGn+1>Ia4I zEoi$G_S>~R3j6I})q2bCw|~Xr?i}J_i@S4(hb*r39M-D8753ZJ@96*Z3%C9mT`%gF zg?dr{EYyqoX`x=!Ukml3ep{#)_1{9hd_(PYzkgi&b+g-@L#Tfh>P7vkP%q|B`Fc@* zE1W|-R#Y$FFRB;qH-&o9{^Pz!=f<5(*#hY&Rb{v6`XPTFQSe{x(7xPplz#s(?sMNm zXnV17)y8S}Japrv>p$-O>5~2BFwe{AI9U4o8~-l#SNi)KKazAlXA&qE`~5;tckavm zec^)&-_FOP^Wfz%Ky)3myz?R{P=ad znCVSGq2#%g(m4AiNAUdkg>vjc+?^x0r67Rd#^b`diM7v0Nk%!I3jsA4ehX%Jov~WAr)AKjyC$^53d;KL0<#zy6te)N>$xKf(PDpnE^T{hoM* zr|)O}f5C6imGXX5s=T3nuaCbo$PnJ7`}EOq>SI`Mlv=O5^P{TDTjc)WiWNfXzZJOL z_cQunjEsZRY#*4$A&@^0G@NGpz!83p@NFMB!uN&Gr};?z8xZ8IPcAu~55T@Q>*D8%8Kj!cDfAjnAm#xus3}fuq!S2-( zcl)}*?gmYF`t~$x9E)SocbY^?e($RPz*F)Zntse?5ezO{tM|M7cD+I31^d@&y322u zKG%7EFt+^{jMTTgS@kUtg7T~%sc+8)jlXRBwVr~@F4Ft+_FpLRg8gqCncuF9HD1#G zH;vS{`x4ccw?8>j-<}qYzij)@ko$tm^nt|vdTH-nDd89FPpkfX{cqBEN&B^3^ZIt{ zbFJV%tt0jA*`o26ZU1ZJKDVF4{9t1Ky$&~D?mwvh=AJJ+hg}G{aK8F&ls`}S>fcem z^y~TB-ci0U^4dS5eEMZLU;9~<&;A$A*Zvsgv);q`+KDBuH7K>uyk#s(uqw*^?afK zpl|6*g)$$iOxGqP-gfXWLOW_NRyo@T?1*ntete_R7Nc)6+G(`i=oX`!jrJPtG1_f( zo6$ELz0~MVqdSc5F?yNNtkJg`eY;V8533`7mBp_xdY#chqx+1$)9B4cZ!&tL(Ho53 zX7r%ZA*1g%db`jtey7mre*V=wC+R^YBlO=|Bk62UOpUz2{@*aqv)B%y*2?#>f5nLUqe?BTmKj;^HBO-dPz&lzo-|#{C`zoYWM`(S~FPZ-G z&skY&`uQ2sU)=n~{eIzC=k%24{uEaXW2IB$;nJh`b54iqlr|xMtaE(kQ%a8aj1^vZ zA=A^-)#G=k(jA#ze^XCax1a9Gq{LozWEe?DJrYm-Ds7&yiw=fW7YNljl5fa zRVF?~+gmU-<2A%VK?DA~gpLO!94GvIRHnCb7DqmJF|-Hg<-n0(Z{`T5y8JzW(&^B} z%ei*p&W83v&*SQg`VzQmaUmSZ=b%dmjOi1SNh57BJIIf5FZE0!EI(g~k8^Q>SI)h! zmAH6T%)nVG_Z4YJ3$9~uv9E10%+*mht}0wRarGH~2K24CsAngPyVCGg!25uy^G4uX z4G#f-)bO3a95;@|br*0B6rqwQAP)-kDrwJG6_=r&#J(R`+Emad4D&esCyH@Gp=)i8 zYTeKdwv&#wk|J&4_{`^b!uBeU1O206*1_KtyY|-@>XtiH_PS;z+y7`}%(`s@o(ary zG1LMd59;HZXSfA;iQyq&{yaA8@DQ$xfN9s$xLOReo-@EFfYJ%NaQT3&+e>j#KOJi~ zFz-zO-vB%lcm?nWfDtOWoB2V*^s$dCmbb|gh<_1r_)j_Ovs4+r0U6rbg6nZ$=GTqu zDPWeF{_!j@>RW!DbprBte&))_`kn~Bs~1=17Vs^MXdj)uNs91i0J1%KS%}SYsHi`{ z6bUR>4G{Yo9-9*QwR4^%CU6}K%(N^n*Z*?B$Ae!D%6dN$7~#m-8kGn>V$^Fv?4ww* zea;_{6yr19!U%ch!GL?qeNfsn2NwfvJqa{{i?$MHanV+e5jk9EGGo|xGCo|n0b{Qt zSHsBsq(@0YY(7Z6-e1Y>ur~mszmzk7*T*U8`Z(K=?and5$K}R^Az;_$K8#304}FBb zZy_fBib%9gq>jO3c}~HVz{N7p1I^-MnNJ1H;fl7&7Vr^{JPjX(EWFs6!j_K_WsZyi zv41Hcvu!O_O^J6h7kp1hf+Lm!Rwye+2bGp94*RRzN5VdL)IB(W^<3 zQ4J|CglJzlTUp}&uLn-xqW_a-aj}n7!4Zbhq%0eGlcQ0AwWD0 z*SmqC*7I>$VAOjr_-JRT;|~Kf@ZEiu;cIr!FF{eAUO6iYbS5bNytD9ua(NemP5|uz zr9Ibza-9D#DCbHKfU=+QH)imh_MQRdoXE$`^x?BXIrre6Fzwy}N?+Xv%DKi}pe)C? zLD?^V4@!TWf=t;zYe6}8SP9CqZU$wYTn@^)!a-2Z6CMEN`1cejpT{v6%l71}+p|J(p9P%-`XkWUpwEJy2>L1%b`I!Spf#WyKu-eg2j%nO2GG}n zeg>4!hi`$-1^pH1DWLobA3i5$g3bdy1C-AM{tF{M4+79rLHB}kti2udbkHw@o&ow} zP>w&(fGz~(Pm6MVtpVjYxES;-&`Ut+-I3f`nQ0Plh`pk4Mn zWx`EHNm_8_a4~G;2X49=@>>8CmSY=m4$9vS{)Objfww>6m;@%4GmkfdZ)3mLVmLtog!Q@b-%z$bt^f!(l3}9ApEM;g{A`3j%&aXx% zh_(Z4mHL_;;m-%|z`Y5(A0*ak81)5@*5LdCF|mcd4WG*g*2CEr>oR&abPwbuWZc_B znWf-wHU0&_)P*oEX&4!lwHZbpWj%(`WnynOjN!fPa>MYyvUeE9a9ei0VGNaJw;RTA zSM~|R9G31ii~^Q@*)W=+?7N0R$`0X!cCwD2H9m0JM10U@Jip3bZ5Z*`iH7m~DyuV$ zXIR;4!+4IBy~!}1Wo4a)(f48lhVd*byV@|GXJzj*d_3@-hVfi0yT>q|ZDl#bc)pc= z!>|whI55nX=M9LgCla7fg3o7Z3$EV)qwS<`|JnFMh%wl{9facY&ZNnrK5BXL7w(Ad^n=> zZeRr2b0Kj0?+2gbEyDv5oDqu@vi_r9vD?GWj`7CN@k8Q-(;@R zXkMlceApiiu_M9w6ZjXS&vF6wH{febf`IfpUBa{AYMC@Hc^37B2dpz(qR~pufb$a^xl@4&3X zL%_cSrbBRX^mkz937v=Q^P*+0I=8zd<8J=X#pNIyao7uhP$i1vQHZhfWL0Ir`j9; zV_=?RXCeO-@M`7@{Aa`5JSamWR?<%3BY`IZ=YWp`eg!Z$7UmdE0H1C+3%t~D4!8;U zm6Sgb<#Y1DUBHvU&jP;qvad7w;A>#=&H~Q{#(iE6c!A*r6f6P0m3%5g_y9k~VgC&LI{*{F40qxp zPJvJRbHF3&LmHlF#d1(1}oan-H~)@_g2BG9C$SBmXAI zUugW>fK!G)0erh*Be57N*n;HCDD!jyaVhWpxYyP3gTNm&f$soACf`OMGCter!@voP zgT&lD#7>SjGruQQk4XO+nByu#8J>R*Gw$T4MEI|c;FBYm@(3~hWW!nDBUIicM9Ns6 z16~O}pAifVzz~&>X5fDVOn|Wg_pa7(unQa?n6?cgodrsR&$dVacN%U1?gPFM{4DTgz+VE+0ly0v z#7m%IU0Jfg&i)**E7!xot}KrLTZrWx!Xut%@)lxWbO`(hfNw@#sBhV0jh2!7E$lB@ zTz5c*cnH@gfgc3T;j#di&~iXeGzkA+!66@0dlzC4IRth?0gL?S5vM%M&j4DU0KE_S zew7S_sNX6_$40g-?`2&-2zi9cui@i+z|_+S{BvN}Pr0wnc4XqOz=r|l8yH9Uh7#`? z@Q(w|;&S)qfEjRGevk8;%JR6E{fJ?c$&YCl7(cpy{dcpCzJ0hoZj?*fXyWmOe#bKg z*x>oNSQhF};Nm_$aTb?_SS1A!Xfyj=74$qR94rSS*jTY6OrE!V4-C0nz^tq9gC^j6nD@&ZCV>h*ADF(>0^9)1wM-Vc8Cb@X@wyj586WtK zCZ7OKxqE@RUj8ZX1#SiZKY(+QFd=#=5_$gvI-vID`5@TJN1^y*)0-LRu1OEz` zYr|K8(uW=f=2-|r0=^lTZ>w*vnLnCr@q06z`Lb>*jlrG8!m zy}-W*KL`9Z;6GCaYl=sJ5h@=OGIBisf$`jUo2Md1k;#z3 z@FaaO4$L}X@PRJ`&El#7{&&zEF4j4Hzysx&e?MRX*h1`(0kF^uH2lN?6oq~@ANpLm z&jLQq($6vMBYrM0+@O3R@M_>EO=g|pEaDde?;szUB@Oer(QeoW?lwFRn0~>zD(xKr zpU<{N;42Mp0N!u71^7L{uv69+2Y`PLJOuvjzzCH;6QXk#juW2-pYu9~dw~xs^zH|K zzu{rKcN*f40HbZn6Tm+O)+EpSgJJSx@JaHsxQ;QL13uBP2Yb#n%yMmT^1y+U2i^;e zHZS*aFW-N2dGVbx;sman-MzpcaQ6a#44Cajo4*8nF(|`Vfaz;lT;BkOKb7AFdjzAiQ2B{;)e`kr#4|0ef z4H?!y%kKlrI0k$ou-FIuTEkZX&o{gexX$p6z>9%-?-1}x!#Uta!-s&^1GAj0i>O~F z!DriNaXB9v0=6Li?zf1raN}{MA959y$Y20i@+ZTAEENELgd4>4I@&f z?*fpCvPb}x=h(mEUOwv>z5)EKLhlJ+*e+up&vM^r{3k6h>iMPO{MqL35NDmUuKou6 zyxsFWFvpGrt_ojq3$TUQbI53-FR{v2kmC)r?PeN|L56`gu^gN~k2Bs$K2yBPp9_(X z7~he!5VLnp8J+lFb`NmuFTk(!rB1}=1Tg+>w1nlr^s$7*E`2+!kCG#1FXUrxN&}w{VusEB71&{CZ%Hvk2vj4MVQ{Ji~l8tpTP^wlDiQv5)Hl z<1^o+$tQ>*L!B#drNKWIb~XYt90%M2+6F$th;uaN#W2mv6+q@yhRKIo@k@>^ZX9PH zC-y0T@M1EboGq_c?5Tn5lPJ(wWxo$HuSUMK=K)|>-fsd^p5^_%;f25q$3s3LNIs;_ zDO-+9BA~FX!UtuTZslSbUt^ej+V6w1jPEA{fn_AlGJ^18G8fH}b8Knf6fo^)*kTyP z_O=4MdJ7CE!0!j14B7%a-fDOg@NUCf&=s#V+y;D&;dbD68SVhyZ@3fq-G;XUzt`|K z;9CrL0pAKtJG*f?pUDD$$oTt!PdiEKvmEmduT;6thKwuMBH$U;F6RLw$X=OWt9sbJ zrSx!(;M(2U!*=J3$mRIsy{`Z^|M8ABO#TdD`aJ7-F7Pa1AD99CPv(ks;Lir1VJYy5 zhSvbkF}xnQ#_*efPcpm(m~(_k5Eo;)VH?Lv7lhApkPp9cLHN!;nbr-R^ef^w13RC& z(lE=i54Zx)Fv{ElZ1cj3I}IbP>=TAj&a(RrPXPX+VbssGuK|N6dR#w(O3_K1AT#a< zkRg5r%=(#OG)dXhhA{?~{lzf+wd{Fd+Dtotd9svO+KA6f*G7K;-}HO*4cA701^;AJ znh#k9luz2|S@7ptWxfE+`em3f7w`WWpY`C{i)pS7-8e~Jv<{;(lc2}dbCqG*Z-G-w z3CK#GAAR%j+(Kjk6l4WT z19N>teirznP9FF!!*oyvh~?{S0V1D5I>@Bc@L?f#Ch#ez$}v=V4ovoWx7Oi1OvFAe zcgCCm=KF@^x8UM)jyQ{JA@mRr;o|!u#5r6%M4JOsSoWPH@|Ert`oA2Qqm%uq)=f%(oHiX(fqcbg2`C=*U587*6PuBM&0>K7N&j{GMxA3a6Mr75HRNnstOp8!6VG6@uzMH6~l! zwingQwI$Louyl@PW8MEAOTWOg+HGp`)*)rR>p{PiZ203I^j0(__8Ebv{1hk)-loCAK) z@FC#u8Rm=izcK6s{{{GB0PsbI-wfPg_$|N#hTjUj*YMkbZvtl7v(SHs z;UVC=fm^}P0e{gj7qQY-e=~dtcwDFE#V-%P0{9x#vk!c< z@ecu?;AGCjKA2%2_$i0nt%n@&B*TY* zUuBqwtFJNa1J?qxEfT z;UVA;0ly3NlLNlT;YOT00COzNt;O@pu(u9lB=EDe6S!=f%4C6$1b#R508aH!W)n`@m-b zzX#<_051o=iS+=y7Wlp7Ux+c%u7hKGQ=fqz9Cfct@OX1!g6bqMhLc`xv_z_$Qr zf!`0@2b=@`Fz^uN-w3}2_Tl<9l{o(d-bg;eqrf|Wv!~#^0Qgek(=fjV9ste(S9IIj z<8+MMz(2-)3E&#TxijIDz{inahxeBZw*WU9&H}d>9s&-4-%S0$ZvlQQFu&Y&HEAH4gxDj+{8#tN51T`m?>VpBY{T{EXq{z^@2Yeg*D5 z-f$!ENrpE7pJBKKc)8&%!0Ul0Bd>1YEx=d6hAc3D)BZ}@d=Baim_CpLey8C>z(c^? z%jXx>J_?Mr>>i%8b8fL0Y=(QlPgAA_-?asP7hnSTE5Mt;ZvlP;SZn}(9C$PMS>Pvu zcR)S|{5z9*1la9c9s>R=_^hj^fL}2DH1PNyESfIFB6uAc+H5;6~Fzh3i zHk<$sfH_{Y;Cc)2RM0H0s|;TSe7)g)zy}Sp{yz@Pdxvm+4wwgVIpBwYx6vM4-!qv6 z@UJNY+bWUtH{dSHFt9@42V7TA04~R00ytV;x3`|fJ#KG(2>3|I2cS7zQ}tfiBRd|r z2Rw$Ez@gz9VEO>VJm6mAGfuxk-i7mFV0R{rrjPsk7t;e&eu{m`EyLqkj!zJ`y9@bO>2U5M#_(3R-KNfG|rXsd>a{Eoea z*sciwxCno@@xj7yp&?}yX1Wl&JR;}Dl47|d&<;%FyKs5GfD&T*`!j~=M`hcUPx%SJ zY)>Co{+t{56~@m39|O#>WC+(x!*>BQ;L){*I%~mqMLWZ=5B?%xmL-Ad{3^pqn9yXn z1!)$fm0J;EA7F)Sf(&fmLp!$uGmW9w?o9v>MD*-7oCSXnm_9hf%kZ5r_b$U9g$?%` z#u#fgte!@DX#)*<5Hd*Ax&?lRNq!v|<(0AIabVuV!1qFkeMU4$x!z7Ch%@pd$YU6m za{WQ|h-3^8Zg=k_!wIAuYq$m22i^&qg;}pPybJhr!jvD< zb}y(=A9H-BF~Bd%*^iuWljrsrec;*1i~gU$#o**?BQ_jPo^^$`@#aP36NY^%)DU}L z!O)43|3Mdf6%;s`cm&IY$?oOvRymnlfgh!#!X<74{t+;JV;&f8zmADdfqy(`0vG2> zm@4(A<3Kg z(L&^i&qCl+S&Df_e#GRySQP(yQT(x@_^*rNo`#8cNAf{+dE<6T!jYe{d?s1kPfB=_}* z+&6%)gB;wZ{8rpUUuWGh90VVaXOCmqhky}|{2DO+T!?+E5Si$M`$?1IJ%uDM_Glq8 z@w>qH?ZSEi(<0XQBft#51Vxhf94O52j=W6zg*O9~atlH6{Pr#e9S@oXod9|rD94Hq zfg&9F9Vnq53o(wr7ABqx@JW;Vev#bc5xIKE-Dh$?ERy?iL~aq}zG!kk8Yw4!^$^l& zdkwCyn%tqn{ou0eV-0|daW@7`d5{J7PCJP~ImGBgZkO#^io}7&(q}=bPM9 zhmm_NDE;eKz>k8`hkgV60x13DcfiNt zqA&a&bS^H|$B$56K&ive0Wh$dYjsMGU?tVf4n#J_?c|mtRnixiyvcHif-H8E@;BCQeYTUk%{DlQ}dP}Ou+kWZ-vhkT0X9Y-c zwq{UJ>@7tjD470YbLegC??o+yy(!qXwQE~fvbSw(CfOT$nQfa!GB$UnTEXt=+17=E zrMtcEj@~4H@fnr4Eli>syLznZ@Dt9S{jhRUe~1~iy0eJoZ9n5QT8+=V)tz1VA!U9P zl*87}6l`Q0(Ey9U;@*sl(28U_wLQ#4Ou>z4N{Vkw_j?_ges4#rb30qV+f=1mTYECC z5ZGkxua)A8!csYph+tM;-XlyT^jbT+Hl{k0`0eLyy_p_VcUvcD64{D;W^>!lWM^iZ z*VdiH&t9iox|jqBI8r0pcOBcoMe4V0a&W!_3+P-0NsCa@;Cu%b;Bdq#B%%!IeED49 z(d(Mn8wi%r`%>v%`gU)+dzsWU9a?$^xG-_$n&oTj*RAvFm8@R0A-Qzr+WI9;D;w6h z;+P66yvA_rq9se}8=Jhf_2)s{$(~eS zvSV{kCX)<1HlkbfdZ75t?;fwSD~(?;N0s3h&qI);Hm%)^0j3|GlF4lC?p43QD+g$& zj*Zf5;AL{ty7Lz&*EBRGm#%GSY^-0Jys*B>BX3E=nsv>q>z7J|iOIF~i`F%)5#e=> z^-ESRTRCQGa$Uoc^G!+gzQ*&Ll1&W_$#tt2ty(ooYtxd(esGlSf^!;nz^!sLhx6vS+R8OC{;*U+o*YsN^4%Ta>*jJG1`39 zg5>JvRZS8fCAVSO+SSR%hE*$ZPZn-Ip~quNmFOmfpOkegj&%X^eI@iDipcu4-OeKPqdpY%!dBRRcN@94cDL zyqi61-IysC)vs+>zjDpehV^46H>_HkT)L=f(b$O>)UQ3s$sW zYG|w<`!?1G>Um6+>~M!m=it-4W?lUn3;-pobXh|Z&33Jvhjj5mabg4Xz{8syl_qZ%H=DPOE5Au)zcM6 z*`@+aDtWgQZCUdeW0Kyyw0_Z89i)`KZ-B8&#;%Jcjm^nYJz~Ss(Suae61a>$?TTxq zdCiJNYnGx=Xp=EV29Drk4yQ<1z7C^s{peoFOq!Q8G+vlowg^MjsLEVge?fAr5s-W| z!P3!V7ppb77KIq4YYqD0=y3_!lTC24C8Ok<7GcDYhxV9BO&dxiu3WOZ)PQcCdF%?5 z;eOH5rEBZgu5DO5+Hoblq+7dZ-Qty_OTHAozHBXL6-9$0nevcboorgUx*i>7Y!UY9 zWs62Xh%m45WICSe!92AvWsvzmcNb>lufxOtb%lgePI(>vHg<1*U4ANk$`&qN{Jcy} z*xR!`t^W{Z%n6BK%iK=#2Rt94ZL?pqsVf<#wsv>&UyGb`jz7<(C6gPsV^Z3-Ex9$b zHO%zZoCs@9_UE1Khi#W;x;ED!deTW}dGqS#i!Rv3)yl>UXDXXa+KH-FVFz?>-h9%@ zK8=|BdT8Lp2udu)OKAd3>H%I|4=ZEN@EFt zNqN`i&9Kwcn76G9;@)P7dEJ>zM`n|^1(qc@_oT31*o;!8Ix!{ly1O}p?@xE?ly(Q| zZ6_81A!cf!*PCkVDxXXOb*FkTal|aJv#Wr+o1q%|lPn4{A(VW= z4p}Q?SUOh|y;${Z#rhIc$gKjCC{c&5*t}4dQ~WHfqBxuG4KeFSZKk%Rv4r$6i$(Qd zVowB1GMlmXKv~q$AV5P{56L9Gk}Fh~y^DKTk#_ZDlAAVr+cqb~y=iN>ed|`VNmo}_ zva2K2?{%J*+>#2ncwwK?zP8P6N&cgtO-U?!FcmH^(74IXSfpa9mg)|-W6{QGwZxKi zk~DPV1&Qrj?M}6AM`vmqTonIh2X$-C(v#ULa)@?zp++T=?m~aa^kN3zr;JXlO0k|t zU3bG5&{NPf+cvlLZ1rRfyeZS!wj zAWUyfwsj@9^Ir!sbKN88?rH1lX+s`t#aDUfn>USyBB8CTT?yoA~6B2WIGVr zzQ%T|^bt&UPh*U_t>}h`sI*Zs*_UrBcqkULt~J<`rNLmK0;v^+z3Ddi+r|twJhr8} z+qegVMui(nNu(I|&y+SL*@Jt!y0#Y8twr0qRjmy`vLeG$5^gR_^C(o0XHw{;db_r^ zrIBh2c#eW~^JE;f5jgBhclfnohkuTb3O^<6;7ECzNMyEc-%4h5U59`E#N{2Nb!gP+ zSt8F-+LB-I7e9r#7R)d2+}Q0SY+vZ3Mtd_*IZySFOI=|Hgs6APy}OV2dVDqP$Inp4w;hs<^OjjWuO*ynREtSq`=sSj@{YS z%Cya0J$Zg`l80@*tu>h9`n!{!ap|HIn* zfX98Eb-qWo<;bnwjvXge5~ukkO&rCMm60vmO6s_i#7We~ZKjFaDs9>sS&}VbS(@li ztWZ;CcLR@?C%|l<@?iD`c6K+n_}<4$hrQvh;R%;nZg=ta=EATWz`F$QECleDKnF-! z7gD(2?>X-~bLQ2^ae$M^^ZT6l{Qv*&cYds^<74AfKBRMSsO9{^?1Wf6Nqrwi&a-nK zo1s`-ct+M3C_>GNLeK7mEr_?}Mba*)*S48p*v7`$a4=7nu!43xhj6gHnBy}a=J^oS zQgm>rjxC$$HGFKi94U^emhaMFa(-?J!DO3Ad&d^AJjW#)d8S3*ae;j_L$e85Tn;U7 zu!9C_?8pq78%T~%&d=t2Q30UUMT(BoJY`Ls_sH&$Wy1Tdv7MSmheaPG9NM(Cg=rGw zADH0~1g#+3P!LV#{P)$+$$-ZZWR{v{xkC}7K7J6u;0wi3$zp_!){jQh&zn-Jvs7_x>g z`!N{@cs}HzTKMnPQ5Y(H2n|`Xf(?VWD}x3;&rc#CKGYcU(I`fb3yX6Yym)Ws$Cn(| zN;y_Q3G@b#;Jaj~gUGZRt?}H^u@loX3=cY9zs$=g?}68AdgW=l@a*2XSuu{@i6feE zzeqQpJA&2N38eC~X!pEfmO=mEyH&XQ>|Ue**YfBE05QYpSv8t%<9KdjZgz2euJ9Z> zsbka3PIMN_p{;2STzxP*YPN#ZbcbV5TA*izF%HLCkJN7ks}&}8kyzxXSPJ_%HwpO6 zMSN&82gJ4Jqj!zuO`xyxzrMiBNHf@DNJ558wT7YSPBF|a%wgn%CM3rM_G|m-Zuj~C z`-deg=6k}XuoCnaU!9&r}qb()>mHgP<65@in~Eez=VXc}30lC5_z>T8y7 z7Z_aMKA-}l+MRp`!{h>U+b!V3jg-)YJUZW2I<6MCv;f|L6Jy7w#~qysBO3qY#^%ST znug)jGfj&aA9Hn?QylGx0_u3xOEolpU@E=i>=;_xlVj65a!3C$AF&?{ddr~&_mS}S z{qWImi(%x>sBwoZJTBzMj?x)wFQ5pbgGZ%aIuh(L6vrlKIc8g0Ky^m~VeS$h{oDXU zWCSXQ0X3&!;yD-C-RyjGACA6pKMo^%ltT`5eI&xnkb_aAkJ0fiA#42{G|Zo#;WSfq zSh(DL+fV^n2Fz4`IA0N7DJI~(lg;HBn(YBw0sNB8%EK|BApf47obFT)@r_ zcBz(T!={f9gdd+giw|CSYDP|4BHt=b_;$qq8}ixjYN8Ubzw#N+-gkN#`H(}|Z9XrA zJu~cZhV)~v6}+Ldm_{Lc$A;Y4UN$EPIEN{d7$#0n?A`^D575l?(MjJujvqx=h7m%J zo0o{ziRnd@UW_;+_HxjOSH*nQ5;PAmk+ItjcDguSzkP68A1rFlnp z84WHsga>aX40UrXK2r5CK0 zZX8n`bS+`Cn9WXD2I6=FK5(|_hsMF_+);d!cnReUW$D;atckeeC|dXl|8g|N@zUIq zjF`eRR-QQH$25I%F&ez2@6(ANCty@7mdZG4X|Ag+4P$|<57W0DcVWqG=YeRBhH0dX zaj<6Qox(r!lL#t47VP&Rb!D)R(?~cOTn~=9wyLWpxrY8pUAVHX{`h__z z5d=S0E(A_b#Or|56I=#lXfU=&LW$v|vp?}2cWeI1xo4YaFqE7+l^DH2&Q4ovolA?; zGYeSfOyB1NJ+#kzWs?vGFPaI6XQrS`#6egRLSk~ka_0dSzvS-3KetvR!b0Y*kRU4~ z>|JlIe9{9%Ik@bmP)yMaplhC6nny1sCqNM3uprRp+<4(Rq`}SuoS(w`{sfK;1Q+^~ zhYyeB9ys*yCnPyLTI6Ao$YSub%_&?|9WCT-fc5!ZMp)YvrXJg`9$Q^6t zejh08g=Lv5ng@Hm2gvaiMR~f=%!cVglnc{^)iMEp51%(WrWi>m=4F3W{%9@fYNhZ!1W2w zHFpZ9aLgxk=IQ(PGNrFJwOL?7QVww5vNWL6mHq@9Nj#7eYH4s5D}V2liLH_};8XF+ z&N5x8OxjAcLeegroW>?X(x(ZoOMfdX4Z6d))&N5uhURpSqfa)Iyd5cUw9sl!L968w zdZ4y7V|o*eWBBEpM&D2_Og@9NL+r4h!Jv5oXO*~;hJl`2cn)V{(3BRi3jh;tY5thQ z!l4^Ki8+a%gHbD2Se*BP_M^zQRt>%l1h1rF@FUL^Mi>k+fMzAkFZ;H`vUFJ@({`GJ zRk-2Z3W-k%n--DyfV3p7bkNL!ZG8;JIOIYfG+lrsEU961gGB>`1ryD1vIvu!zU}O4 zeK#s$vKh57zKBpuP7$K4BEFlvVnJA0e1Z?@c25u*aos(;i0w2V2d?B}4#6b{I<+$G zX=NZzd`Z7nx8zN<<)Dr5(nKr1c#fhA`uoS$JKu^deL!4;)2qY^!YfCL2Cf(<9;w^g z8ovaLViq)38N{?oTiAnPc$xewb5gx&MNfQ!4fP^Cgvq|w!$KQRnlgkrOy`ls;h778 zOS9O-^h<)sa(VJ+P`w`3c5;*0L&Oit_SnR;7(Vl9e#|?8)iW=hp1>Rv^E^!O&>}2h z6EAoNxmrUjF**M%W~g)MX6Io6?Fkn8u_Qm?{E{~|NySJws2J?Rqaei)XcGHf*apa9 zypP4cxy31XcVcW72q!pu4s%7Ha;)8qq)!D@P#@u`fe>#lWVfoiH^vmQ#;7M%-Rz6g zN_$?%qg5~c2gzyp(Vo$M%qs3<-wLUS{PS7BGp#4F1M8 z<#d=DXeB_{a>8Iu4jlwH2byO^k#u+ti4~tZ#!&@5aO;~zm}6tW^SCqEgu>hl%PA-N zwl_`5v0QZC6H*80byFN1x%K|!6FB9ffx&U&1S>IqyPfl4@g!`LW8oh==6&keL|d5U zs85G*&rYHVM>`I;zUWhsF&@yF*}_0rR9Jv`upi^J)?hj^cgrBEiFxl@N!c&NUgd2Z}-=RTQmB1tPREYH*6Hm|H)=#IzD>~Ai3z%h~ZpY`xC0d=* zy_5BoiFr3tGvvjU&4+}AX-(Oyu_TAcTj$0R+%TJd7q)da?0?^dJwJzMQQ%y@WzV15 zcvl62V~O^B2!-O|(T_fPILa&5nyV`sOzJBa4_`)EJXRKs?{nkWN5rVhFS{QND~;jj z9@WJ_4O@}Z*sqv~mIUy=fUf+=0VtMj91MH3Pb~&_FldpJM_94pZhT84ECW;Th_(fx zq2eJZwh&yIfH20@j!GdHxB`PEu5q44!P*i>aXyse1uQ$jSNQT_hlAaCfndSS5AMO= z5dL=KZh)eS82fEerc^mu|@BCO}*XK^); zLUC^9fG@xO>_4%wfE8IjwH0SQ*>@lrWc%L7+RARNfVtYl+zGw})OS~MN1{gn?`k&x z$m%dRe0FYnBH6)&MS<5)qr6*RmLTPBt(3lUvi~F&n zAv!C^t}qjiXMj4+Wv#`8X(ktzb6I;Wh+*O+mgqd;HU}3 z_k^FyMkA?^g5C&HfHf5{XoWC{L!WqxQQJFoS4*1g-=FN>C7B>$#tFHj@{SF5EecDE zA>7-gE`}G%WVVcar_X5E!lD`BE*~@0@o;+1Cw*JJms54`P|d@+ccl8___8(?ZEoRo zPSl9dwPsr|n!_6!hlAA)p$Il581qxyV)RR+@=hGaOVijrMDwK0Jq2x=-mDuHjH`y{uTR--^S?O+bDYMJ4Q(yqPpFW7o?lObF}Oh1{AV$7((SY4kJHx5W<~LhDIw* zSc>Z$qINS9DqYSG+oy5Wvf*8Q|D?7083)#QC;qXOxO$i4otbxbTNA^sOm?uW)%4r^5=j6ahlyIPu-N&gHU}&${f7xPLV3UtP*Q`Ox8PDDXA| zuu$BLBc7vRfc}9)fj4nG2jZRAC!=k;xG=OL-M$e0jtRndzCOJ-gr@UiBg3@U&*zM( zXz5iKXj==9HhJGUX8*ux%!q{mv%2{SD0(nnukd$Y+r%6xs=^%Mv@hv!{uVEA(q}3( zt~jNJV%Tnc=K*aWS{E<0f=r9nJZ4qzX624fFqHIszweUThkcxtN{@v(OWb9}jUZiU z8#JIm^tFflbJL&ihfDp&4GiwuRx=qMRqZa~WQL8a^CG!O3Nuz3jY7NQbDx6dK9pr;K4a+*EK~>K-uJmi7gM1GG*f znN@5tbMC&omHO`(nX6a`XOj7}ykAahwFflJXHVcb)r!RKm`9v2eJYFhz#qnu1iqmE z_-CFB0mk~!a~L0EPYWl8xwDM(CVgf?(vK3ynP_tk>)WU1gm~p5808BCc#D%MEzNc?DwQI^=0M}lc+dJHGVK$mghUA`m zk|+FaGzqo^bvG#SyFqxzZ((u3M|tI1P*`$n)e@Pn^6nQ_jN?c`hPri>ttpwDm*qs3 z2!s*)jTklXkVx1O$5Im<@y{5yHx0Co9JCqm!P5a`({QUh)-gz>9KKV<-HwB{I5q zdB^x(=7j9eG4C1XTWJA7>(li>a`TAN4$xi6f#xclUwBF17nT$0M3@Y~6e5koAIT0~ zr{Mw@+}ZVEX3D`&JehFEH^F5L30_)Ab|qo=X6`6mIJP*IjNw&hEJsRWIEx3fZ3~1x{FqSCaX){j?=7Qk0|4${E~;~;U^_ZQM_AWb#NcaiGsLka z(h>p#okwdXQNps_cIHs-li}7JzsAa*Ho0d9eMzvDKmS~^;~pOhhGa!=+w#A8Oi#|$ zaE{Y<(2Dk{uHZ)Vh?Uf&Ro$RF%vz@Eid7vA6LOOIiRth&K4zVURCGukXteevH(8vz>_1*kz4adujU#!ZBo(YE045kiWLcO4@bu|n!{^W zxz5KjXwi%dk!&67@uYt;>cVb2 zr;-?@tsMBU-sOgZ zewM9vs~hZC*H$;aY6WXq`8!sXRgWBLa`AU%X~h)f$}>P((;OPm8om=%u2s2dger70 zAqBC1OV7A%hS&a)to|%v-}W%G@MELau+0~TU=OQr=}yI4q^5+>*Q=|iG#SSpyEZcu zvv)3HPUSSl06G8inJI^{Gd@oxyZ7~QpE!7w)3O2)R}z}L1_rzg_N*jjv)Q4i9&st`r_R zv3i)kFPWTKz&XHX`EScO2CZW`x>A@|aSMAcZH2%Op`)cp$lJ0O#kEtk=(-en^s=iJ zc@cFe@*=sKA}>a-w#c^yM{)>y&BfIayQcpuO*m#%C)H+i7b~$gqg=S{lu9eIwJmV% z*(xHY)zxKdn_H5t&2T|S=kDLN$aR3 zZ-!EgpyFs&yH1%%y^xV1#8-5AP&4`6xw@ikw1>ATR#!Xa#EPQRN{se$L;b5MG*Vkz z&Rlho5g+bCELKg@)m+yohN*g|c1B(Aln5-2ox9;H%<$t}Z#9D0dw9ez#9yg$(P{@P zC?kF#9S4!dZk=ApeedM_91gWh{J93rL6n-rO*n`F4(Cl{#Gn2!e)jtG$J49rncmZdxx4N@%7FyYLhT zmS(7YuvYbkP0u@eBVA4NG&?efrd&T(g zDu&K1)vFGj=^qtF+{knpy5c%zo`!6lcMWDR84BNlc=xAIS6hv<4t7e8j>`wuV%RU% zaPpVwCIfR+iSW?|Hf=4vcOI;x%J}J9@NIGe^&3lm!3BNu*;fL#7*{(BVBmDv9_)0A zb;ywU$?w5FE^;>{gzViDKV>Ko&Z-Y^Xor#L}5&JgkuTkq;!)M^_; zc-{S-4Poz@hSK*9d%uipt2=SU5Uk>D)319w`Gq&~SFwi|@>h}I&HYs*c%Pq=&hJZ^ zc{y9l&IWapjJNUd;AqDK=o$j%)2*$g;->Z$jRcLk7??;AR-W@oAJG-#R z-A-I_iv>)7+Xc4U4kSbV-z)ox$$Y0Z@Yr*IF+;f5lUD}gJoBvol8Zmj$7?R}UKL)N z<{P#Ah>lEA zpg4is$HLXJZTAAw`GtkWDfl>rYyEJ21zvfFSrZI=kGJ51BTc+poZD`PQQ)dddpZoHJK^t$;#40FWRwtbFWq`Mu zll+CLk~QRT=Q%!bgx5uJIb6gr zGl%O~aL;}-Km36xW`ZPc%d}|t(%IBSMd0h6{Kgd)4ShPZO+ZxS>R2Qh?k)Ef56h;w zJG!vMmwkO!&~qO+mR#sDoJe}CsmguHrh>E5^VUh&VzZ56%!|Bz&BC@sOAqegTu{zu zf|O!jLhl<8SY!>w?Gg+)J(gP=nSUVW9xFN?rKGWR;sv;1)_ac337&^kZ#D|ea;)Vk zJTJQS&)OPjs~g$|+$x0>R2j<|%Ui2(`6jQ0i2Sp-6cmXyInOtYaej%n z*5UJOx$z^~tV4Hv3bFom#aH~8WViL&>$!ynKjaU9-9dv{@aB@K5?@{pu zjrY9qwo-J&-e)!*a5*=BbbM%dcnFOxuO8> zrH}s3TI{y^o8@rzPWbAhk27_1NAZrYJ2HlM+mFs@g@Ivjor%Jb`nTwiJ2+3tPh2=O z?jh-#Ehn=94$PgLc1w8s|0us(=sy|2xA&(DdxK0hy+OQkhdZ?8kzbpT$C=&ihL!vc zvLa!`<#7RHNbwNsm;895NQNKBHXlwqAVr$D0h3ERHOwMW;eH#}Szy7VAJEjLIf4Y+*;{ zy^F>TlXDyC)2d@mb#SkJonns9}Lu`_B91^>Yh<&G?l65CZPH>OSn;6 zE@4K1@gWGbP4g&p_@RuyywW&aJ?+bZT!S8*fmv8X(>L-2zg6`FK2eB|a~^)+iNj4D zWhR%8$`OA8ya~8jNg}r%yNVt(h!-MNBc{Rru9aD#S zs_zoOR&<0J#Dm6u$pEEGE%*!%$1zaG;1bU zqQ6iziW+v6IKELAebJEPmRLs{^4H&xv5F_D>r3zqt^__h*OtohJu(ct&pu>rKg*lg z?U;BW37B{iy?4IvwWB+&Opr*uvpXibd;cEMmHTGn*siD~{``^OlnY_ysmW$u$gv<^ zp{qF(7vj*agmiawnJR7w&=s{ZJ-k$eT}s;$1beV4hnP5EYpZ;42HD_mpp{IEtuWl3 ztwcAKs)gp_KX1@NiT0`O`>Dv~I12v#@hB3W@m|p1AsXK5yN!rgGEs>`*x`kdzf0)+ z+avETk-e2Ggp6bEy>V9%G~O}wXsrG2PV^0QD|&-mGn9qKn~&Zk-;2flV7~P(InBqZ zM!5|d`l)wFIqwq}aZx_K3s|Xd-Y0scAR$B{EP78DZa|4&y~XDm{9QR1Qs^TN8|owb zbU$9eYLM!&|BOy4`hMR}QN<<9mRGrZXjO?`41*~luUE&t&S4NvUolg~XdCm;aEl7= z-wF@7B-Yz&GOkhvdVj<+=T; zI$`@V)`|9LUHd!hdR0*;ZF}KOCo}e?U2BFfUpv3?EgyOJulD+|h1A+3@yAD#eCq9! z-+QH4pjq;t!YQ@p>m0Lb+@&BTf5P5)KT8 zESFoZns)p^&u9)v@vz#v-M|q{QU`Fh6#vf8`Gqqrd7O`9U69K_;p-v1R}iakn0YSv zSv`?oC(=(9c}@Q;%bifV}?-U8$_s8Im|4{e$|KbG%OV1;#N*={emI2bXkD1 zaJ3+o!g<9YbOFj|(3KKPU;cH4Q z9a+d?cziTvRg>Y;MNK|iS-kSw76(aM_b>jq%Ox(TbzRQ!Z{|_w3M0_`i`e1(d9f&^?KP&9Rw{EQ~g5&U=7WrYA|2Q(E6!8P#GZ8-mK5Oh( zBKZc$>%xBm_cm-?>n@|=VEH(>9h?I91pZNQ@7A@@4B`12xO>^TJ}}My3VCR)8xhw0 z$qIiL@{bL#b)yj<0Ouoq2wX7smm_&MB+j3~FyJX}IBX?+{8DI53CEQQ@BWqn^i!=GB`3c~N{NBQVk=awVB7F-m@efw_P z#&zzDu)1Fs9))8_;5fbegjxPKzv|qG@KZ3qp@#Y(JPp47l5=@s4c`=) z?*9PfxA(1cr-fewZ|Fz*fa#ywKP#;ED`H;l*M!ynMKS-EJJJ4Ouy#pU?OztYbpZ9h zr^~tb1^?~`-@LKQrG-BSzII!eD;xP)Bd-X5Kg{2X!OD3fuL`UCsrLu})csyzm6tJ? ziqreN@Jn$2y+d8DF8nR)cu4{b`JA zGwa+1;YIR(gvZ!Fy|vxHlJGg0-#W6+oflp|h@yUIovTH>4O|yaQ=Y|O;JWto?h#&s zyz7-NKbA)q>(&1Xyn21j1A3*jhrxn5zKr#vz8tni0np5xF_W1e#c zgw89v`+MN-bH*QxM@Ph-cL(8Bchx-S8f(Oz>+!rDf8B?JJLJd7qg{Ru@*l+@_5Tt6 zI2dQV$iI%i4ZWxz5nm4;j`&7!CgNRShEMHN&Uh-P!TE^k{*>?ydobSa?{Wp<5%67o zU9KpMAFod1U%L0bkZ*k1xii8qfV*G9cw6`el;Q4kfUaQPyAL6o0NVHNzk)VH7$&_! zmG<)UaBlu3)}zI89!n>YNF$h$}RX2=U7?}vPE!?~hx z@ZGu7k$f}cXN0F=egoQrvaoOOkQpcv-u*8_en&s%6T-g)-gYR=PZ&P(25?>E{~f&P zFnmQBpj^2}c=JBYN0w0}gm;6t7Ewnd`L&P_3x68&EvKBz2rq#@h|C;~_`TpMVU2G= z_$Od~!}DP-eAA=Ojp5H-bnZOlWM3ZZ(2Dqaa6RH1!I%Ox&F=zZNZ(}29lAtyFAeUA zcmo(iYA^q3_<0@jFD>lz`vT?z!d1wbKY3wa{w^awg?;%$e^eFz4Vb@vBj%G4Zv~@E z_x>SdUe$$t`A(rf6ZYj5g|#8<%kws5=DK$K7=n2BeECKFNerV4_Htid_oIFbC#bkv zP`J@#H|3ilXOqeF^5yjq>aVbdcPf%^hP){3#|POiKcA!h18|?~0cS<7TmgR`vL*bz zFF}7A@fPq!VU2%1l7A5L%MsrR#-Oh0-fnOb@khXY5s!c~!oLLnuRn`+A>wZEsPH!- zAF4Q)7rx`8NUxWjD+*76w^mVqBYr=)Ec^|~`$rJ}i0=hgBEAt^71s2 z&-xHQ@+NRfI1j!K>Dw#(L*VQ4h_CR!1#g)`dlm5}Fz2&Oe>FcU{980Xf%pplIrxK9 zsJ{{Sf(ybL-jc9}r!4%T#}M8E;w$`lFzb8en&AJxB%cQN1XlNYHnej}_zz%y^D^RR zcvyG`+PclBu-+rQ03NC#zl47VyyZ2tH^P4cet#YQ3#UGg@Ls}t4VdLuRZVyV@*((t zN!X7^;h%d?Fn<>ETZ>3v;a>&c@;t_KM$Y*b-B)==_&YFv^H;iD&B*IU-iYLP!u`t; z(?7Q{xUcS|gw?FL=UJ53i0=e*JkIpk19|!!+M|effV0ALkngHsd?EaAz&l>Xcp{Sb zL(cIw-P7>(3I9IKZ#@t9g!gBW->;y36Fv;y{B^h&@dv>L;V)DEdY3zGxFq~C<#nW| z@SlNic@z0By#JGsH_)C7=gDs&{;3e&3V74jHEvY+&%w7QSg#Y_`UK4NVZF`B3&Oi0 zzq23fZAM-Zo`n1cbl#kAFn!ehnlSzEh5vOUPc{eh{{;4L+mH3dh_|I$!{%d`$2*2-1=`3mw&_$G|CHlIO$3GW48 z_afFm4WAZ%n({L8NBCv%Cb!1Vml&RJf^V!q-W$Sq&8JYlUdH-m68IkQCJdf>ZVm0{ zXTe(!qkIW3fHxFS$8HPqe@9*ff2h5^J#Dxo{MRsl;|pldg+KCX#IJ<*KsXP+3+=^) zi0=f~BEAh=kC^5!376;|+LxZ&LwNol@HW){Ug2+nZ$Nv&JrmAXzx^5H_%i0Z!UskX z9}GrDBTm3m5q}6=jF{%n2+zX)rWY_@jrc}z1zdvcf5TsQ8S62^4&mOEtaXiuH-LNY z2C4{07J~!V{3+lwiF-;_JYp!aoZ6J-4HNM|=Z#O86UepY5;ko8%1Yd&D<@PYd7l zG~#m*`yUbC3_c_LS;#+h2>T(zMetBB>RZJ3gD*zB4SY%X*XaH>)R!G0J;?jPSz#?d zqr!uqNB(Be-$y(EE=If$T#k4Id^X}wf-gjz19RP#<@w8S|E7cJ|04NykY5)5MaXxL zpuXQ7%#-(nlZYP%XCnRrctrT-FCe`Sq5qELH$Yw#{tV~h5Sg5Ay#9?G8tXCraJp;ji%C~`2!oN-PRfH$v`@nhOd&eNh`0Gr>_kqs}J5d$kABXwt zE+KrvuYo!KO5GFuC*J_>6<&J;ZEOSiDeUXpWz-kp`yjvNdF+1*Pmm8`y(i*s@Tl-< z$Twxt-WgsN{#TG+hw(x&;?3aG!oLdnJ*73S9PwUoMcDg~{`!2x-QcS5b>r}V8S6n2 ze*kHn1BWnsyoP@(;YR#x;ch3Y>Zs{qtamFL?tvC9M59*NvGT zWcKG-;nW1u7mGo)n4gAxx0sg*=Ykyu8*8cpoF<%na{`^wJo2qD6hJt_EpQj>b zf1X6l{v2LZ%3GUH8`|~7X_UC=V+MjbBhv`Z4 z?9a2p+MgF9W`DjMG5hl~5wkxp3u}K~jpXdlYr@)}*CS?seo0vS^G3w%&o2vWf8Mh< zgkSsfRK)Dhdxf<>Paha+ZxJ{mFm^SrS3=fz0Q{`|DC_UC6KW`ACZnEiPzV)o}3 zg|$Dw9Ld?Ar-nm(v_J2SnEiPsV)o}*VeQY$5wkx(FRbZzL0J3qOA)g_Z$!-gJaumf z56!bb?~R!K`EbPS&ojc>pXVbv`|~Mb?axm~%>MjL#O%*&5wkzPD6IW?-+du`WcKI7 z!rGrNN6h}bB>d%k=+DnY%>Mj*#O%*62>&+CSG%11aPW`J{yZhD{W(6<=*uIS{rRY{ zrhi^o`*YkN@6Bs}UX7Uj`2}I^&o2sVe|||=`}51f+Mm1q!F@9O^B!UC&lAIa!rGsw zBW8a-EUf+cXvFN#r-ZdXEkw-zbXi#Y)6)^NKP^Yh{`9Qj^TOJnUyPXj`6XfXzY#J0 zzbvf&_uL=ShfM#Ii0OZy;bAc6=cDK^H*f5AS>Z2%lT^2x68;9|Tf5z|uwM`B?RHh+ zZ&99fy9>fQCb2%%-|a3&d>i;O*!S03fDQ!K@k2IZjvq#ab^MTznB#{jVI4mdz>Lo< z{NGi=`a#6|z!l+NfqWA-J1z+S0r)y>cGQKhISQ_>a}8k)?qy-0KJ|4j^^xG8PoFo| zxkT8f&n28k5%%fR!1)Sczh8Bz+n*(6c=y7;O~dFvMNZxTE=Rl>T#1-%>G_Daf~yhJ z{srN~aBph{{cXf4@WqIG!IvVw1>A`ECh+BmY2V?G;eU?qvpz9G~|^ zd=oewG3{rB{}tUI#rj3WDR9~=}S*Ml=)=7+|AMCA9Qo%$d)GxLTE#{9C#ef-aLyEDRmKJ`4> zcO$G{W9V&{1xy+U+;F8g#G#3D#HIr2)`4R5~leLFrOCo=Yz4Cni2Nre_us^ zWaMQdZwUMI!8O=_H2CMw2fv2)RoI^oejV`>zI_V&i;EcFf|-7w0&jR7^;6_N{0-!v z@Sh^Q55Luo1}M0%@*ZKzH$a{h{*ybvm%$$m=KXo#x4|C^?9T(QTkp6J%ksGl|KImK z<_{5X1Q)?^_$ngT^gJ*8F|^AGHj^(JzGTcdgnfSZuXjC<1^< z{zYL8UtQRrU&iM7W#RvX^xpRLddEJA>8~mQGrgZY$)ma$e;dvS`}4*r%vX&4(MY}- z@`A8GZ?P5mE3E#V7QPkhU|aeyUoi5rus>fpg#O*gF9`en?P0W!!e2u8Z^@v)5dJFo zX0$EC9}n?Uc}AG$5qDrLdCAD#CxUskKNayNxVJ3)8rNxoMSJ{su;Uj!v6dv=8IKffBq8l!;2C3g54*B{U3w- z8w!~339Ea3!oGhiVtx%~dAR#InCQoTsqmM;*QK%F5b*|ZLHKKsbANSN_#5Ev4A!%R z*L^P->kHW@g8Lb8?+C`P5nm53L`-=xV#-UxzJ1QFab@8G+~a!hS>c}qZ+Hpqo3IaG z1?eSRWq8hGJ}UfIWXumQ8Ts&&A-pO-Ej;u^n6Dzeg@2Izb*#@KPnmw&z77jtg#7&% zFrOCwYw*q*>W{Dwe*^PNVITgtP+x@ior3#4NDpBjABXi=BR})0;Q#m2{6_50d^)hU zua$_|zMdEU6_|g2YK^N3e-pg37yG%wT3#**-|>A&-{m!qV^GG|m!A^WmxO)z&N-I| z`}5uv)CZMsNBCYwcOv{x;NA^Y%)=ty1kMZpJmlNHj`|kyjo@O$l%I~6@-xD3(*6aE zPb0n&d_H2zs}WOP6TbONI8X2@+LMSkfm2U~`1|)Q5TCU0BQSr{I^=i6o4_N&MaZ}H zV0|j$8^Kc%Q(lOe@}lt1(f&rPe+XX!-|&WW<%lYh zdPKYlTo4|G{PsT7kBDyq7lr>3<^5hkX@t`Q;6nQ{)=jdn8yJ+eQK zjCPanFvjNmx#j0u<~~Pzr@Q>Sjj=iJ9@eqcJ#P-})X&2F?}@*^iRZUzAMIrVJ@V)7 zMg2m1myVe2Usm`>C_jYuGva%|MdAMf`E{dcABFMb)mi*Y_cz>!^%2zXs_+4D9o~;{{YWT7-@74zUUxN7?P=9&8 zmhP$jUg7@}^6So{{Spqop}re=M)%S#0n@ztpB1^z?}~;?#{3zP>->)AVcEVbSA=zbS2gx8L~_pW9R8>M zr*6X|=6Cqtv(E1>3jaFfoZn%};pJq`?|5D`j!y!ndpf^M8_pQ>JWopVI=|!jP_oYN zcpfyCpApvi-Fac1-(3*a`Q0U9Ef0;zJm+_*Yl8baze|L5ewQ}#qLJ5xb$*xh1ow1) zR}j|u-DzQ+pYgmZ->!8UE}oB_vO^QzAxv~@7#KqYzpyFP7AB~ zVPT)1mG%C8IL2Q&FLM8WXLY?lPfPhXkzNm@uosPc`hFbc-H`KrIGP{Hp#Oh;z0>#M z{QI7@_3nbmefnNp@AUmR|Gwv|>s=jk##a^3-_ku*Jbz2}@taxi&)dd4V$AdWE#*Fb zh4uctZOqFe_wR=m5kKLD4CeQz*SnH&?@T1`hWxznufqJc7uP$!Pe=Ft`=TYRKM4Ey zqA{rp`}aj(Snn=@<0r-gm|8dx7OToLy1yNvu1{?G9LQHS^ntN*n~-VJ#}c-Mo7-^OeF z`-_bK4Ldo0NBlR3_>->%_Xyuhc@N?bj`K4So`(GTEXKn|&i5Z_{`2s!?=Z%@!aob% zG>Z9v@Na>)KaKIPFxPq0d5nJ}9su`l3GO`!`Hm@^KNJ25@TLjuZy4rzV*0Q0GuO6r z+3@Jrc6r`#?{)3+MELjN|8@-iG7(<~zASw3pmQI=`eV=a!9IBy+$;Q(;18TadI)QJ z7KAlFmxVPw&j@RJmW4Gv&kAdLRw8D4j(#A7N7FMetm)O;YvOD8qT#yXGdHx`FB=}c zv0a`Q*7Ph!%=8?-DVW#v9Eq6eIV!B_c~bP}SF!#l{J+5dmO9S!8F}O8 z;NSlNdH0*}|AT=qgOdi1($?xdIa%(8|z8JTHekJ zk3)XXX{?V#ya9Ys_$MIW^a9qugf)G8ZVBPl^i70ch57D&%(sMfKFGUp)`;gkSHBZ~ z8!uwr1$xTeVBV1vn@fW?zS%NI?gsPjoY>rR)OR>nChqz9Y0p}Jr%r6{7ieyqF?V}l z-nBCT`>8J6p@QH2oA3|R2pG#_?m@IUR9TT96)u8*2>PE-D8<@?nEzJd(Z3r>`!C%Z zm~%IB_icek)}l>@olgmu3|ECUJU#1zdF5f@|5xl6z;wUwL&1F<*KhOhOgH9$@bBd9 zL4Ha2zX%Vn5Au8N2yzS;+T6qNpv^rDpWKab@4t)rEV`SR>%zm^gL_jLt5dGbu{v4P zGrJ+U{}*C@R9Mri1il64HNDEhnjS@r^%;M)&$~h5^yXb23GA!+9*nKY>Rul>wx1SO z|2g)*EwX=BSna1!j$-@0!fKym=?_Kz6@@iCWn=%WvF}hW?~d%Jgw=ipV>9~yMHXhL zk!#zT@4%x7dbJzw{0^83+2F``?e^&3U4I;pQ~28l{jGa~enI#lVFy_(?-5ox>ksAM z67#GdWKFNS;T{AoHqW&Kvbxt0*6{S9e8=W53afcczkPX`_-F_}wo|=%wa<4GnV(vJ zIQNRfmq()#a|v?>mOqUT&vKBpzVNI@EazDavgU`&w96CWpE?@Cml0O;d0}y@k51a^VIVe0B%8RyNmxMJuqYnkSmgh6V+P?K3Y;Vu< zhD*X)UN0KvUOC%C&5u4X%d@s;+$)aTFYXnSwf*8=FG? z|1-iGpOL3rTYJpCf&}JO&bV6A60VRdf`?HJ|iFZ+3xBdup_*T`Cqri8Vg zT>`V7XgZ|PuEm@eR`=QNQLfB2OR|RdGMN5pJ>!~PTyD5#7nd8Z*~R6g4vx#o6x!98 zFAHlqVLwf|GS}qda*_cvJX)`og*83c&$1k8eA2?29v8u}efE=adUA~?E>~RBA*=m{ zm{fVJFbz$<#HHn5V8LSVYSaSh`5|`&4H}(=>y01*$>6xzYLDcG3WGg zIY!c5QLSx`0@{tObtqUy{S3-(m&2q*6oTp1pRiKEJn#33)f*@~)IYBVMfd~W>2>~o zgxLI}$XkEO2FggRZt#rzj!;%&wN1>IkjJt5#r33hE-~)>4#Kep|FQ3;;uwVL+Y!hV_70{ZpTATimiKDx1Z9&KYC1RopL{BJrxPM_c@QdTjoX_?wt|G$;OYzadtC zpy`G-GFG1uefBM}`i{8IHa}KB?90^_$F@9H+!Dmfj$QI3Tl)V&%=5mRHYM%Dd82<$+LO=^v~0X7 z=6P>KtiCJZ?=|{I#htV%&%fi-6Zcb?a{E;u4(OY5`>){{)A?`3(_Z;~Sp2PA8;8cZ zzmag(jlVxH{$@>ieqQ|TGw%E@^y%*hkZ)94d>@l|T`=Zv74tjA{GMRV6~%nd?V=I= ztZlj)Y2p#LZMJI&t!?&>E%DNY>`0W+=q-G)W-jm^1%BhV)Y40w=~X6#Ok=1 z=Y15h`diWGd$FF;(q#` z*fiEWBs{!dB37Rl^Sn1ARwu<@-lq_&Pxp{ePmTU-;y&+7h*g)=$5EsIW0GII4KvRbJx7_vmBAa}so)@88GjyJDX2iO1?c ziu?Y6S0p$k<%;jg$7+j~2cusRfB9Z|tbR($1K)R#)x+X%pV5Cy+~@n}vHEE(w+CWf z%(cWFzF!`zAJcNUJJxmXGm;LMjs72tzkE+TRtu62d>=ekxZ(y1=Zua1wFW(!E z)z`#*z7HI$UzYshd%Cgu9u2?IKP>6M_iSVJze_mtM*p`ZJ^9{ltp2OG-y>_aD*JJ9 zhws(K>IX#MnQ}fY@#1@`vD%^a&glP+=ua8_0f`si|BTfQ5`MnN8mq5~{w1UT^Wy$_ zZ{)gMS@bf*dSRIgZUVSvy#onK! zGuN(T_2c3W-`|MUKbLSeJ`wBsem1q2Obs8+vImi>KD)9 z$LezuU%p=utDO?g`ln-E=l)2{^Spno1~DY!x(PK3$>6td>_elQoJbSEekZ_(h?$FFy zq<<&1Hd^0P`98#J3)Y=f@th3RT^N^dcDMNbc5)y8f%+q_zpZHl=oNprMC(XXqW{k@ zAKMbG3*|-sV_x4!+Uq2u|Fg)q%~AL@WM!CVc(j>U^Y}CZ+}|j+om2fGum8TLzJ@>N z^;0YK?}xuz@Sm#TTf)<4B|Peb#`jn7!0;5sLL$FEb)Zx!8EIDbLh z_r(HIHNOkbOb1ON81stj;q+IFr^f3}G4G2Vp4FW#Vm{dR$K--LJ{_)CZI{RsUe!k` z^fvX?{SDsyjVttjT*BWY67}~fWE{iqiz#HPzulX^#g)apx_>_m(!A!FruS)|o*xv` zeg)Y--;R2*8Rc31)!&S5W|uMU;E1m`KVtN`heLO=M*oCQ&zqb1uI_6*nZCb^zh$H^75V?e z-}#6sPa!?2DE}?|={kxs&)HLvFX68q@&5pab~DJo3FcW2Dw-$PBL1&n-V;kjIhlJT zROJ7Lzf#1%3Ff&^D$4%=f80Z%Qr7hm@*m@m_mWbP>3%(8y6O5 z<-;;gp?#V!KH$aPJ*EfWL#3je>ERydKP~2ap300b-|JLne9Kbs)P6N$pPtg+Q%>_* z4#_M}y3U5`=EE>74WaUsu)5DZIx1zJi;Y>=r6{NWx{gZr`6Eqg(|*J>UyqpP`|w9a z^9-*<&$qMGOYldf@{xc|xyD!H^KMz&2X$ZlC%=Kev*I6_`FqK*?psj)hxj|q2Pieq zvqe-IKAz*HqMYR+xt`Qwo=c56e^Wd2+@;DH9$h!4dpW~8ZB|9Fm(O3m}! zqB70%+@Uhvf4Avr&A;|UaF5Lv^S2)Hx4~~l{FmU%5&uu{x+n1ufAM!2f2oMS1Kt+# zUxE7~{%i11#BYN$5q}qaDB`~Xk4F5r;6lV~pH4;0{Nq^+bXAJYbWGa%=nu_WD7XJDPP-J$k`w38FjxD75 z^~|dCH2$b4zZris1Sm4=#3BJo`BcCrKX3R2!(TD{qTzE9-;U>3BEAF9uSUEb&#ydEXVnv)7*8Mn5 zpNx@b4d)FP3>OWT43`a83|9@;4A%`e4Ev0a;+tZ`B2Ema4QC8z4d)FP3>OWT43`a8 z3|9@;4A%`e47=|3_@)dehIwXx5S0 zlfpcXN;xr{Hk>h>HJmqGFkCcTGF&!XF^FzhhTjpLg#oEYY-d9nG7VZKWc z%kzc{hKq(vhRcR4hO35ahURbO=dK<(#^`GVo1;gaF9;fmp^;hN#P;f7&d zkkjyn{nS=`5>C9JRGv1RF`PA=H(W4WG+Z)VHe4}WHC!`XH{3Anu+ODo-1E#&Fhf-f+Qi z(QwId*>J^h)o{&l-EhOOEF`PA=H(W4WG+Z)VHe4}WHC!`XH{39+8wbt!%X*}8Vp!LsRh}{O ztl_-jg5jd!lHs!9is7o^n&GJ^h)o{&l-EhOO9!1vpq_(z)KQWv(oH3j= zoHtxBTr^xVTsB-WTs2%XTsPb>tnZC9<1gm{loP{g!x_U_!+FC6!$rd-!)3!2!&SpI z!*#>H7_J(w8Lk^{7oM=_mog+`V|Zp4VMg;4Oa|T4c83U4L1zy0RatPii7$nyouqo;f&#|;k@C3 z;iBP^;j-b1;i}=9;kx05VRu7&d{c%K!)e1A!&$?5!v(`d!zIII!xh6-!!^To!wtjw z%0M&!Z){JG#Bkbh#&Fhf-f+Qi(QwId*>J^h)o{&l-EhOOpHxQWJ;h08#EIdw;f&#| z;k@C3;iBP^;j-b1;i}=9;kx05VLf`*%>QjB{)Y8^DK(!l@~q*!;ez3!;gaF9;fmp^ z;hN#P;f7(qC=lg$ii-phCx+98GlsK<^M(tCi-t>v%Z4k4tA=Za>xLVKot#I|{7e~6 z45tn2`3JS1HS)aSg5jd!lHs!9is7o^n&G=faNcmiaM5td zaM^IhaMf_laNTgjuuDw*4JU@vhBJn^Fzi0m9-oxq#Bkbh#&Fhf-f+Qi(QwId*>J^h)o{&l-EhOO zyWPa!aAG)ZSkHs5gEL+D%Nlv!aKUiVaLI7laK&)daLsVtaKo^>!^GckVmNI$V>oL# zZ@6H%Xt-p!Y`9{$YPe>&Zn$CCZ8z~ZoET0U&KS-b&KoWmE*dTwE*q{Gt{Scxt{ZL` zc6XZi8%_+T4QC8z4d)FP3>OWT43`a83|9@;4A%`e4C?{mbw~!*-&9|FdL)L^hBJn< zhVzCChKq(vhRcR4hO35ahU{45tn2`{$Z|StHLIE*LHvE*UNxt{AQw zt{JWyZWwkuO#BTehSP>KhV?ynO^>{h7Yr8-mkgH;R}5DT*9_MUHw?SGP5ccfhSP>K zhO>tAhV^}XO`oEXmkgH;R}5DT*9_MUHw^0mUky*Hzdbz?!)e1A!&$?5!v(`d!zIII z!xh6-!!^To!wti3XM228h7-eS!x_U_!+FC6!$rd-!)3!2!&SpI!*#F`PB5_cW;c1tTvSE*UNxt{AQwt{JWyZWwm=w8uAPI5DjEhiH5U}ku9*L2s4QC8z4d)FP3>OWT43`a83|9@;4A%`e47*(>{)Q97X~P-A zS;Kk51;a(dCBtRI6~k4-HN$no4a07?iNE2*aN2OjaMp0%aKUiVaLI7laK&)daLsVt zaKo@0GVwQ@7)~3`7|t5b8!i|w8ZH?w8?G3x8m<|x8*UiZ`((7fr}ng`M`Ac_IAd7v zp;7npMqV&nG+Z)VHe4}WHC!`XH{3An_O{12WjHaMHk>h>HJmqGFkCcTGF&!XF^Fzoi3_!~|PrwwNeXAS2K7Yr8-mkgH;R}5DT*9_MUHw^0w{90a9!|myj7)~3` z7|t5b8!i|w8ZH?w8?G3x8m<|x8*UhO_qNA3WjHaMHk>g$8u9ILTJK$A`jU&0oJ(;f z!)GJrQq_6GwTQWtQ#af&?CyghDEd$HDZ`23wBd~5tl_-jg5jd!lHs!9is7o^n&G

J^h)o{&l-EhOOzObv|OYLt@kHm1= zaK>=faNcmiaM5tdaM^IhaMf_laNTgju)DuKzA3|rVZGl{(<@`-dVht=^G04UTr^xV zTsB-WTs2%XTsPb>toNpAcvA=3(<3pQHk>h>HJmqGFkCcTGF&!XF^Fzi0k z9^aJV#Bkbh#&Fhf-f+Qi(QwId*>J^h)o{&l-EhOO%b55ZP7J3FXAEZz=M5JO7Y&yT zmkn18R}I$;*9|uey9bKhO>tAhWV`>DowAV;gaF9;fmp^;hN#P;f7)NPUt&0IIAb_# zIB&RMxM;X!xNNv$xN5j&xNf*%*d1(-Z_03DIBhs%IBPg>xL~+wxMa9&xMH|!xMsL+ zxMA2mY~pV?F`PD>F`PA=H(W4WG+Z)VHe4}WHC!`XH{3An9x?GZoET0U&KS-b&KoWm zE*dTwE*q{Gt{Scxt{ZL`c8{9)8%_+T4QC8z4d)FP3>OWT43`a83|9@;4A%``j(7x7 z@8LgC>+r`%ek+WMoWoz*@JPgY$VUwqA|8XhX!ym5*Fb*G@GFL2HT=5aHx0jSc;iRm z2^8Z)|F#+KH@x5QA;V7_o-usN@C$}tGZ4WBdois4rczi#+V!*3hj_?U^m;eNyW4IeW6wBZ@U&qsU{y#9*e za}jqz{)*vO4Zm*qO~Y>+-gu}LU%J1|aKGXGh7TEj+VG6wQ-)tK{G#D=hF>xKs^QlS zziIew!y7+t;%~U$@P5OG3_opn#_%b_FBpE&@HxY;7=G38>xSPn{I=nZpD^(^+;4cl z;X{U>Hauhal;IZ)zi9ZJ;a3d5YWQ`-ZyJ8v@W#hY{0;XT-f#Gj;inDH7(QkA1;Z~I zK4<~C-SC@+-!{B4YvOOX-|&9JhYUY$c*gK4!!H@%^Me|DSA6kJ}B$_mR^4K_fqG`1iX0 zpz9C2{;2DlU4Pv5X4ju|{aM%l?7H0bovuq=Z*+aT>#w{1yz8&J{-WzoyBb}8)Ag5K zzu)ygy58#gR@d8I*Z2NP*MI1GWaKkXf59z|7jpCC&*qL!&MwYB=LY5{X9gB0mlwm+ zgNGj(AWY7V9hsTT%`YyxiP?o*VQzlW0dg~QbH|qoxv|-a+)`m;Y;iJobY|}4Ko08Y ziA7g9zL=YspDVnRxyAXh@yXom`Qy3CXAzc#rXzGQcVcdKdU0;PsTpjj`%Uff zv6=Cu84U-+n?nSfV#e-xVSet&2z(>W)s&%;Y8)r=}+6 zUFY$8X3a)~SQGH`+|1lqu;dF+VQzYM@t%d_NA5X(bbb;Q=lBsf zu&_7}_woDO3I97khjw60vmY^Rwj^$8c4=X9;$C;?!3PKT<-+e!_)WJ^^@sIgpFRxk z^-sgOJwYd(+uam}r{UZ#)scs^>I~-wRTLi5s?mHH$_=*6r0;s5j zeHd&#?NPm<@Q}{Q!%%Lw>c|6PzsqYO1VPk%fXM40C_%!M+UKSVXb1L6*!G67`Jchz zT=?B5zkB3&sQG(uFo00&!{A>3gxqcF$P)tH)RHG8oYz8#f~fg`prtiTdqSA}4@zA4 z-7CLC&EMf5fX(L9zNS|5X>U`j^|VLzhQb3vsyc(=X*joAMd1NiuSVnnaq&7x?jUJC zpa=w=J^FxXs_j8Mg&H;F3&5V7{~ZcHyTZ?4_(8a&kld>e_y`*7AtT*<+UK8;c}<=0 zggjB5@PJsUhCHAyisfA@kq3lbby^Rtr}U7-Vz&4WPhr#d!XT1o{%I~gV6p62P2IUFKFJ|x?ca=Jh7}zaA-sOY1C+B~M!Vgk3XrpD* zhkg1mxYs`+psFJe$RLsIR!Mk3fP+T!0XZgGdvm+I3~^9PgRQ6ZuFiqqC4nE5^c~iR z!RFIGuZ0v>QS)IiJR#}D$Zr3OzzOdPRuHuCfYcBZqJh*^Bl3U<4ax|my@P9}_n-th ztwGj@L41lZl;VB<0YO%c=EGokLXcH77@kljRHykc7@kl`y$C6r*5vXU2z{{Ad_Wx) zqar~S4MrHRbQtmf{r-Ib3&d+XI5ED5tH#b9d2hRrYiqIE3ZUNlSQF0~;qqtO1wHNP zQs52v%NYIIe+xbpLonPOkn>UP(;ihH!r#gNy7;T#u?nVJ_LXmk{Z9U8`62oLzp?XI zX&?x~_=25?g@qgzvRK_SSV+K15jjf>;gA$*B*`0GVG%3;q!zTZOyL6rt5lLg&_Xo$ z04mzp=}dOMOR~tW}0%-h9;V&rs1B36{A;UCOup$!A~pl zFAaW6;WIPlf00I)`1kqGgpc>9QIm1NkK=dR(Z;_JzUPZm@cmz+0zdNKHS+JAvCBC> z-cB~~L)j+3#=m+EU~rhvYaRU1y9WQJBLCFj_Z0rJ!rw6X1L3DgRBH4e^18wAEBr*^ z9~=D7iu}Uh4;B83!rzniUmg=k%O&Sw_mHkX9lVB5#~q#LF`;E}CW?!2n9rz_`w#g< z)?dpX^P=v!M-o_z{T<;C0FpYse+|h$YUZ+TP?G1_GjV_FBxv)0B;%h?J%2r23Vh7} zRnp>|nAm6rwJyIavc?hf`JQV0+FS7By-oc2TcZ;5@2P)tJHPz2EvS^?<@?KKsf3-l auUpiic~RMf8|a6wsp{6fZqm|JFOl;{_EJyuknWTWjr^H#14W zW>pPjgzG>a6SVe{BLWTE7uNZS&&wJ{}MQ&W!aqm<} zjxAJ~u@jZfo~G1&CIdwWW1fe9-lADkg>eHuDeBTYfqgtfM&C0N0HMwM2cgN#I;EX0 zLdOmW@utD7Tc3aa`H|YkpC{*_nK@GXjKoK3pECNS(IZ9=3GJ?Z%;I^Y4;sDS=)Fcq zjoxAOR-wtH*|pW|y36dE;(3q5t{YH~cOBz-rS+?^R=!=uy<}1wGTggQ^mf-iB6JA# zVN*CeGk2=}rS)eye?pm?Aj>9b^PZOcZ09OGW7YUTIOT0h$E;sEA?$<2a!8X~@qw^t zE^{GH!x6@kMzcz(cLAO@?`PHyYX8`Enm&Zi_5oA%|19aLz1nW6T5Y$~?zafPbDGM{ zM)_zi;ZZ~LN&G~)cPpJ%sr~Him`!Jjgt1T~2Vpkbi}sz2r^0_pMwzY?H zQO|1?75(&BVNefDETpqJd^|%&ouAf7D|;U65e?MG5YAQV`goS%-=-Yo4fQkyfnODOAyCi6z7=e3;DuH6#vuKlpkqJL{WGVjyz81--3F}79hlJPXo&%Y^p4qAVfwrg;_ z=Buf=Q^4^1wLZbVB@*vM|1fKC+c^?%o+SkHlGb-N{4N?YjT)P?L<4H& zVH;DLH>zA_rP7)yG4DLUnplO*trCv2t_DcGg zn(Ix2y)Of2(X5+whJ^ETn*LeSL~pJDl2znx{83Q$2$qb>v{y7$Zsl{Jz@t0U!*PqbZIoQuuVSb$-g8iIj7@saQ*gxCi7z_x( zew}ba=P&zpp$wOBR$+RL&~S;ai@|>OF7>J5!Tu90jtU_J`?UkZCDPo8tAmG2q|9z)g-0Hut&f-@8ee*1C_1`zw;#U8CzQwKn`%baA z5R7wrh59iq3ij#vZB{i{XBV5_>3mQ+Pq_J^yMBrK@FqemWl%+HZrC)A3rU?^GSH&1p-Ys^hy*-)bHIoiI-7yItkn{N8Ns z*t}W!&6g-mhDv8xI_>bmYQ9eSV``o^gl1R_R=8OEE6Tq>_}Xt#zAt>X7TPfJeFim$Gu1DK57>Sul27f@`F_m&Z3x+=9&w%b$8@^( zhM-35YSH=M?JI{+tj}`#X$9(`$=(pm7rSoN^GqJbb)`yvD zP+N>#)>{n^`GW(8WF8D>>O7k{@EwbvD|Qs+mr6Wy;IO6ZKrE&&ll0*OMHe78w>HJ*W->P3T-AoWe zIRCRguDlz7X;Rcq?*B^lQjX=fdHwh-myfPn6Rmrt^{VpPrCz1wIQxUUKPdf9d!^p> zx(+9^QasL)OU*BA+}0_d^NbIeYFH!bsfM`H`jtj66x!XjPH1qa`D=Z%#1FcDK)Ev) zV1Hg&A?-su7*Y)vs2R8%#q;y zy{6*1zmm=L$0O;~$C=TUU*$b3a&A6&n2m{0<^4hUnFIH0yUx`4r_io5b)FhNaIdy+ zsUP?{e?{j*@dIDy;V56{Po1x$d>?j%zRv4WzW9N!^M8~te&8>m`B;Zpn%+B!f6vqn z#qfdAN$k~qdQ_jNzLMS9|TeGm~1Q_MR^B;RAO} zVlO5Wgs48X_p~y;+B?6D?}HQix)F@(QG4skm>G`BI(g` z)ZSN=@zvfrWqco;(ARxJRFB$g`>Kc^s=aoeo=K^wy}EH4K5+9S_S!xwqEGF$eN=?6 z_Ub+=YL}1lLSOe)QNG%%`>QBl?N!C$Ova-2>i%o^KyDIybw3u>r}papEXr4Vb)OdH z`zSB;bsrbytG%|Lj?`Q2)%|xkvzq40Jugu>Q}@%u2X;+jukN3t<*2;5~+S9>wBBSiUXFD5{QkP(F9e5Mm=@__CGqWM(c3XzZURbO2A;mnJK7WG{$ z@#KNUW%XBm%O$;BpY9{WnYuqL>eGE>@__C)qWM(c+DY`S5q>yxmC&NT)e=u0(EUfb zzNSg^tu%d?l;~?TeY!6y*LTGv`dUoi`VxK3rcd`<<@zq4MBh5ocWH^f%S@l{)5`T- zHHp5>rtiuUeVa_5?hDKHwN0Y0)%0yB(YMj`wNBzM$w~CJ3qPFsYN5scO-MX>pk)$$ zTO>VdxBBxnrtj(!egA0sbYB|Hr~a3jMBjGPx2;59r|HxEZMnYmB>Fl`-_{a+DbuI> z-g14xB>MVIUvG)NKGUcB?Q(rPCehbp`npQ=b(_BO{jRp(@FeHl~4`;qkXwm;(FY)Ao{z?4rwUQn!NB!?Lrf+wN zz7dtzX}a)U;0AE_;HmOP|3ZPAxcDGcdHsJS9l6w(@<$pLDxE%CXs}=Rb;Wyn-Oq(f zbiEAr>pm`AqU&U^U-xgtdwScq74GSEzgE1b=c-rY#_bsjs7VJO8;zDpQt^2OxJ^dU_pKJBs z_i~F{{rBnqDx9nPgJ7TStLEyyAlRq-sc^3D2ZDX{Lezh*)qkJvr^30~LBT%VM-}hs zb^jF3wfgVVeN$-fiGzK*Ukc}1{r9QEhjVqE5BBN)D4c8c-$!p^`BwjZx<7L7KZDzr zNP6LZYTr2$clS}nd-}75AM9JK@=(tjxclV4^`8D9%%O!fp0?Aln&%DQseXBiR?8cy zKO%k_y+@hooJ#*^3%K`Z?p~|A{;=eC_iWwudQaf)1*^Q{WPG^$z{$>$ZFZhLh%y)^ zldGxFi|LX2LsBm5$*QGlpVYXzftPy6k0qXZ$6t&-WAtgGPYDgSN^xG0)CO|>bFil*s6Z(@&#Mf&v~y*4+&B_4hr}3I?TLgbvNA4s6DKY z5BYm`Y+vb#Ri4fx!B+Km>ZN}KN%eQMr|STp4`uK^q<*EZ#c^4>qF6Sb(XmgmNYtEaoS z3hFqUz~1@vZnO*fpY7t_Yt>ZQ{9dPy5zN>5x>T;s-j4-!=rDv}KDA7wPyHfD>--y} zY+i;LQm*;)oO7&Pw;q(1tHZ*ZbDr7d-v5@CYjRu8`?J_nyVXqjl%xkq z9e7(#_`Sqk`ZP2rTQsQCaa=Ry0h2#Z=#~?HEp(dozspzU-7ox_3LWRcd~5GIYwwbJ z*G$oIJJRr=&WqMgBUk4&UeaD84UY+b_`RAwe3!=E_eF~7%3pNFcf_9I1FC;f&7p$b zQa`tT&)4=Z^s}{Njq7K(52(}q0p}y>Kb^kdude@WKT)Uqj8eHa_%%b|zCTb$@7MRc z$JUt`BrX;+?JY;OF30?n_C=b zHB~wf2QSbL&JSv)sQyB~Cbb?TwbrjC^>N?Fa_}3E?Q79U;Vb=2kP!#ab8fz2=PcF!3F@>y zg?>8K>SgWZ+Sjemb6RzyqsZxEN`0^MtjZjmwd%wH!B3xcOoDz2-kl=_Q`G{5;#g7uwh98$VA^u4ldI zS+wzTp>E!E>s#BDm&!U8oO-IH7xq1MbrN^&8qA-s>F9s7 z4Bvf7=EE(G%O&p0ckff!I}7ks$tsQaL&(OOFAuf6G4vC|3>y!@N{!c4{>kQF?YB0Y zr^iy7K4ZU9_nwk^PXVh?4)jVn1L_xJs29VGCZ%JqR61j=(%|hz+mz0^{f|<KCo zqb+uK1uGCF_59z7bg9o)J^vT?zl)>*>J@)X={bPwnbfCF%b)ZdfqYykN9uLF@mxVy ztCDYnp!}^13;Lz}B)znF?hI0=YdXr4@{{!%hrLq1&2K@+smh0P(UVfc3garRe$M$< z>oM`T3fg_ugHO?urZ>Z$79n2HIafgOJTXt>&@1)~s@&w~32Se(i|FsL_P)gQcUXI) z9%5&^wKtMQf86x@N?-JO0_8D8`+@rZ*7HS;cOF_E!MTx%~KHW&}q zUamfK=czojNPSjFv~VsgoGb7?pGCNHMUBPXx&FnUGfokEhYvh1zMrm`D(&jxH5F%% z+b6jBx@JnXojZOh1{US3LFBo?YarxV({8Y~)^QHLm^T-ui@4|V+?T;6rp%Ca_ zY|rLR67_1P+5T?6PGrFXJCBsewN111`A9$r7EsGX`s_T?sRJlT+d7BrQm*;&oGaXZ zc9L>+TKDE`vUGPIDJ|FJwrtY&bn|)nd1OrNEuKeyCULiqbMs>HJn~7CKTqhE6Mi7H z{5K`GXz@HUf0A-HncPXvU$wo1(sE61%SrE(3@>;dnWP<@|I|!V z`$u+LzZT9T)~_Y?arNnT*Iw^czMKEue6naj?N~l&=as@eiPKZk z?vv`dQ0lR0)2oEKdDZpj|K52;_dl-u7d)@*zgX;j@#hsC@9x|%e7EjHXITHf;JM{3 z+An422lL|@tHm5I7&5Bv;msLz=N6sUUQGFvEIYrv#TUi@-ST;#KLr|6wfa6HdXN0$ zque96+22Q$%16Ji`r`i1LN|);J&F6?9ltNnn(=$vq`XIFXsbR~_kF) z5A*%){iO5T-`h`ZQ@?y8ddL`V~n(2p+@jO1#Voy6@OTC&N&yvA++I zaSR2SmsvWS0Tpbk)Oqiy`5-qgAM~_UE|e0Fnh#}+%ZGSd7rN9>r7|+ zk4qPO^*)N}oJq!|gV9!5Q_#;Uj!T#Fb)9E^u4CiU!Dy@0{Snjs(sVG|?C*G(9xqJ? zqs{gSOwTd>c-se}Zk)LuHJYxNBIp0iTrd&b`;hKg8h8CUwm{?VeJTBbp}SW99U-Vp z+C#?wmeFq*{kl;-=XKY9)#6_^`X!?}|GU2n@2>s4@jqu&*Z=O?&q$p0zZF$pyGhXb6#4{|ol%0ut7bS-R=3 zSAPwTH~qB_OM2!u9ruT!M*OVy)52#vFg~(-yTo1p4Bw;u;`%At&(5Et{XsnsLXXSu z@@3xNuX+#AdfY2RZle5|_f0N0kv|RJrSdh$yh^@7(ss;kGC$8qX*urifl^&LDSxC( z{d=U#%DGqNra`&*%e9sdmEul3>L*kX*6Mr`*6R8m*6REe*8W8Fhg~|qgk8E$hFy;v z|KE-3d=qx*IvIB9yc2f)o2BbK6?W-+!uIwfZ|2MSVKo2iKwi1oG)uRo;~nr(ZFQ*zZ|P_WKGWwc4+)|HqEf zzYC!++P|!q`#p{5Jiz+6eOz%KzyS;)Y|#D=>TDb~Xuk*ceSok*`#-Sv*KDa%MExVUp4PEE^^?Hf%Z3fQ-UaslIBd}V3hH#f5H=i=_73blbl9N%7}V*x zH*8RU4(fD&7&fS12X%UG3>(xRgLzhdz4~P^%j#dR{u#`2^N{*!Fw4zD>aW2pH_xfx z28DS}{kJ&JsUHWkw8KOD-d1oOdx8F_ejUux^HNx^eiqEq`>L>B{VSMd^{+R-nx%dp z)~nwIv#kF0I(~y$R{we($H6SCf4z>M;CQQlt&XFNeUFp#XdHIYUzdSy0=*d2t()}Q zPas}8AHJA*^fBa{q1O>!=3bkx4uFXeoj+-r(@%fqzQWCmFEsznu=RQD9%Mq8hHjO4 zkV+7^Uy06x8T*5CH?9vC=80d5pJlR|&;8xpVXUJv?i>C~(xdHl0YpRF|4~mIan3u! zuP^^T(;v8=n_c`}lELS&pF;c6^TOI+i+;xWrxwF^={n}tpNZO=9_iXEY3-Z2NB6H$ z`A@T4s~5}t^Pir7eiQPdDE!^ooAC*|(Fv%k$Ls7T0m3fb7e>!p)KmI>L6%QF?(gXs zufkI^wH2S}%gFch*w60!eD1wnkXj*fe9xxoafvtE{xj&fMAK1O_Ct_t)Hn*|h;n}y z-|YHLhp9IND*doZ-&?wS6!I4%U${!Y=M&{G6uzE=qI_TYEDiOH=qD#!r4AnD)6Q^} zZs@|PdLK3f9_trQ)pJyMn!aD9zX<;{J)gPrO0cyF(zv&@^dPlT;_iKFkXohbXg6t> zj@256ebO#VCF;4~1?U9d2h05~!ty=LRop*@Cu;uiMAaLfsO7tLV+if-Lq0rF?TGTF z9ZuAKiSnf#PSpMjZ;T_i`kB=C#uXBOA^&pwU3Y&)|3Z@-<$J4I@fz=3(NBN9QILLc zK48mNd5eTGe4s_fU8;6K=DW zJ)`$noEJ;S?~89&T~Ef?2nd~^KB)5x_kNH1J1Ag*?L7~;^zX?F=Wz4WgOH(W_x&Ay zzk(~`qPlP66Mo7`1;L%yXXv)d8&)^P`)U;k2X!3{=Ppsu3_ zU>i$^eA@2Ob@(E#_2xQ+f1dl@h8dkA?gcwdPg=_lc3L@UN@Kn{QE8Y~ez4P(qxJR?Wdzj~^ z-YoMB*Io9Ki@9=t83lgeDhTyJ_=# zL!dHGrS*NVTd&BM_Dt*i8&>IhF$66#PgUtWom{N;M)Rq@v!uKzU-jwy5?1NGe^K9= zA|L423*>h}=s{ttul#e>->ST3iL<|!0WF%lQ{o&4v}`hoUGu-M8sKjJv0 zWrg->XKsuu`~99Ut^QTC^E69$cBsD#s1a{!{D+^_OBlnZFeJLC0aSpLBc{`$7Fbu=g5a)j}yhbDj|IFq%IOI0Q<+ z+5~zrXwm=BfC$_tvZR9&<9ey>P&%(T zJHslq|L>mRgB&S~>Q)Osqr<{0JD;4V?Ot{cnXmnlv3t1keA<8I`Lut>F3}21!!YEC zbL;Pznj?O4_%AqLBvo!~K&kuQ8^7<)ky|sxm+ud;o_i2?`>$v{Ib$!XQ-A(@>sjUL zJQmf@nPO3$`o-U?|8H%_D%s7T{dAg>@1?3etTpB*>(_9;Ziw9df&Nv>C%1kNY@(U$BkZK zbg|KeMt!5Rjm|V$ZB(`&(ErSHTFz5OpEP>J=pmzz8-2`Z-spoy?>Bm{(NUv!7`;{K z@CmwpMDGu7#kt&l|IVG)xsQDkhTVXNE5&fA?V5ap&MQ10aOGNb+eS!YKJB_i^pnp} zW4{;2IJm(eLvIP^^~Rm%BbF7ba(0fRCm{t#@G9%ZhNoWZTGv~cCXvrrT1ao zU3!n(-DU5yx(-=>y|3%;(tEjXyKnBcd*<#gy-)16d*$vfeb3O{^_b=Vywcjg2(77@ zFYR4ZajN)jY5Qbm@of$I>x9RI&d}xE>+Z7iTgkn8)DPr$g#yV|c>m)1#r3PZS8?~P zf&KkzSZnvH_V-y~t=+56Rfh=e_ooBXAJ*Evs{K7wSgY+5*x$#7wR&&Kayf)2FZavR zF7EHb!dkm;EtIS683Z~JgthkGrckck=N8J<``k>R^HlM@%OlqQ7s>ao+bW)x?^&0( z|AW$irR~qZ-(yJay;<~(?9Cb7CG=?h!1XZN56lqO9+7-ut-V*U_p|O^E!?aA81A+A z3VYQr!@c%?V6XaTxL5C&!@cUK;a+=BvRC~&+-vVC_UiZu_v*c1xL3zXxYynf?A7rS z?$!I+aIcP=aId{**{kC$+^hGFNo!wsFF6FS<_r_s{cV)b@g3IdJ#JX5_tiu23pQzJ z@0XJSrN++-s_$uSUt4cjj_UiJwcl@q7WL`;5CraAvPtwh{~xKnL*!rRy~d06i=*}X zLX=$Sci7GPy7_s=Trmsl=rjR^d-uYb&I3PLEpkFa^oy^qM!3Z}dF5qdri z?Rx>mc2U18wu|{&;eChtWAS~5u2aQ!(R=ga`wksf#dgv0<>tF|#p$~5uISf&cZJ(` zUn3Q8ew~`+_s`4y_!Cmk()sOZejJ^@GB*E@)b5h?J+n*OHF}Tt`I7erZ-tAyd)D&z z5zmO<9fn)VxZe9mi4WhY`p0aXl;_iZLGgXa?ez>_vrdtNey1z$z>i&L-wC9U$Fa8}kLu$9J1G{%gdC~WuvL0Wna`bbKoWgv7(pYZ6&TFk5 zZJsOEOWPxJo$9IbbVL5P+Bxzf?BmQ3wU6hZztz67b4G*qOL_lUzoXi5l*4)b0+v^= z{aWRniWvOnfDrwEFtC5j%$nGLU4Q&{>e+xITs;Hpudr74q3->7m3Ynnjy-UV7qlm= z{j>U8q-)^y@S(zqjl|KGkrm)H~In`>@pXH)-77Z;b7S5W*1h$^N0? z1KLgzzRWWX?=R!i|HFoRg&)4-9-;hR65AowaEHW)@6vNk*zi7yciaE-(Y^a_i6`gv zv-slw3u6BtMXL5Sk{;H6)aXZy-f#3@jDFbYeMUcI^n*fEwWAh)$ml1HK4|oy(N7qC z!05+~e$41+jQ*?9PaFM|(C*r|3QgJjov?OH;?e#Rf4fVnEB6DO?@Ils^nH!HcP^DH z{%+{CqOi^GBdfgEOPun$LTh=PPZt8Z_ia_)up~IY3a*_ialZFqzXsR&5-y>IlrtIT(6 zCVAhxM(?wu@~m&TW|H^4YqWKu^5leTP7*t!eDVJ^Ckj8T=IV~|f`|1At7{mQ-*<-T z-k4TX-cM^jZMP_2%hC1=7y8UD^SS6-sQplQPkcRZY4JV|?Jn(j9czX5*6d!eNAJ`4 zI!yaT@0a=di{-fAX?OiYzE8fbo2BbT^7wwF;{L*=-zHIc4=8reULp+ly~80?TIxCb zoHD-DXZB*@hj-}xrQ5eMpQhXW@vbvP4!@Tv`=oHTy?4>(kn*W1oUQeB`yA@IK=@kk zDBl;pwpWx-PB>fJHOkj|s9#3;T5t8waG`cE*I%)Jq5660I4>Qq?t7*j-^eQa2GUZw z;<(oHd-VSiibLK7j%Qj@`rdV{T_xOopgUI%TRYa+|DP~y-M9S-ceS06x#`DpW}R_Y@tOUInHW&(~o<0U6O8Hso{F8Tc*Z+lg{t}H`eHobRq~9n z7SBuZFkFd;=PTOLj^|}~I2JhT5U;`0kB6%@6hZ!{l+PQcJzr8>!M0}nKQvy=CG@aiog4duiO#Ywy>K0e*y|dpH#A~<^k)D2K@)jw;Vp6 z#fIB~8x4;F^Up@vhDY#R4NSY9!qaY;?VJWa1(Z(EhsOtG+wR6AZ&A=d*8?L|^R4Yo zhUrWHq*zv`IO3y-!%r&NZj;%{Uk;&7?RY*3Oq=@gJOa$R(GLy*qg~~%lYV0IU>EAc zcI99Exca-gwNpq#Y@S0UV+z?M(9VTG93RIh%X(V1 z66f&HR@N(zr^f8#ST|KbUQBmygzCqI@?<0kKgOM({TWL2V&JnO1V682Ic{uH@+Lg& zPxcA>kz?7%Z93IyB9MC+T zXrHWNWndC_yu~8C*m=U1k8y2`I!TDO@lxX*t&Q0ac|0vv-ZJ25d9Os=!npG2UlwAo z0%%8m{Hx@AxC6lX1+N@kTRQpW06Fk6zADcLc5TV@Tfk>ORO9()!<=XCF-)Jh-|z*% zpESG%_z}aKfxl*WJMec5b4>oo@Xf%#G<*l}?+xDz{Aa@t0^?8qq@N!Ho^JSY;4AT5 zgy#^R6TpvyK7*$g_!7|BXhi-zBI|wuFy#@dxyGCZKK{wKlk6vXy#_GY?khl1yw?nh zP`v^lTY<to-Um;5&fPKO%oOFy-z8rQ9b#;UltluLgY#ar*q% zKz-2ffW|?81eyc=1t=9fO9};|lnM}{etvvJ5>O2au|`Vs4r$8$OOmx3O_(+Vs*GZgTT z!2HL$a~`5RLo3oa-!snhEb(kSgTPSh`FJca>J5Xx%9`U1zzpvKWgR|Xc^(60v;7bh z|D?gtX5Pv8K)t;4L8pPXfYP2mP|o3R0L5DA9RTGxdkB?K{tZV0}VmzK;H&>D(I&`=Yt*sJq`4apbJ1xMx_^m zE(1LsbQ37&`T@{0K;Ht&x$|E@Ip=>7l=J70LFHtR##)T{%Ro7oo(;-*vl;YU(Cwg{ z3-^F>{=6G>8R(}$IsbeMl>Yk^DCawVO_4tAgK}Xo_?^HR$UFu7L16kIaK%Rr{|5L$!@tE(wv8En8vI8L{~q{p z!+!w&uHiod|J3j^(9h7t4>Z;@;J+U52*$-<4Re51V36d%j|20os>C_qV}V)6Jn+ka zuLV8=Tnl^yum}HUfNJ|Dvgci)ERR14YIC`FCh%tCp96fgVG@{F$ueFE{s!Y;WH@f| zji5QhAhBk{=#nZ74X+XMt;Smo%sz?)`D^iw;3E|Fu1UmQ*_lY55#UV_jN{p3I0tMY z#yY(Te3UKcE!usRCDLxfXo(?xf>Y#Xo%ft zF(>~3WD>^vG;qrBmw*wf+wt*r!%5(88QuzfSg~uDis;@S$qeE}ZoF~+U;wjEj3t`G~!ziF)wPBP|(P9`wCU&)9xP3*+F#NBg+c2it ziqJ5o$cjCNj|INPFsG%r8b$>x?lz3(s`!{;kc!XXgLblw4;df0;z`3;zbc+Jj5@^F zxU37-uZr1*Yk_gDlRpQu@}3_FxIk)PQzHwDn<-r?yR`cFxIt-+YMuF zt9Z9ztZx+`GVB9?5*X&mdIRG1;ECh;9QY{TYsd3%!00;}+fNvO6mf<=Jb40y5YG`j zhafWm%E9zQllOrc2JyK0dM9u@_&$M}g$a4s;*Is&{1_=~_iuw7_4kA?7Z;IDw4!-Tlm9LzsAG;OCFTr_x!-3`xnGnuQ>3rc>YK}CeD+9snZ8u2+X-Oj*p9h zrCz`-z|Vl61HK0MJHUD1w3Ele92jl~9tNfjIpCWO=Yih|{4Dr9jNS$OXUYJ71eiAD zf&UelZFmIuE5LsN=HckOz$nu5fqw%00C4=}Ud8VXuK#^+z$K&;J<>O1O77b1HhxehYa5f{0m^(c0Vu=`S*e3fnNeFZ8aC?ZD0-<9@6>K z`#gxoftLVFe*s?voM2wy)xa|D>QHCHKJZn*VkdA1Fzsmv?l-(0c*Jmjoma8nZ~**4 z!`V7->ZgJEiZ=)OM}XI|T;L}R^YY;Pz?HNU_!q!@w~z<^Juv@{krxY9=xE|N@Ck-< z!1D~}ftLUuL;2HiR(JBimjfRQehzpG@NvL-;2z*#LYx;BA@EBm1N=JR{a_Qn3HW3T z-J2>I1kMK;*>ZJ1#^MlLq3%u9N@ScRbIEH=9K>g(Z6*BZChGXGK4$lE* zeJR89^;<*&^DX$d1IH~65_5ToogDG6(bs1|9-;aUA#faP!{y+=-FQY~|B1vE;8TX> zUJCrqhzuG^Y;FM`zA51wvcyz6JPB!$xAiLq#n}xoB@G(=id3`o0}`T^rvC z{9Y6I2ry&{ee^!#vybipj$0fg=JF6bIoixReopm>^b^4Sf-vtzFj-FiR}ucR5j-91 zrORt7Vt+s#PX-^M`a{MkdyU|0lfmP!B_@-5IB!a7Xc$we;6NY zf$1l4auEJSiF_hkRUBvoWbTjfzhHPX_*Isde-8c;bB7QGYlT-O-hZhd_Sh1jPZ!YdGb{ltaX zXB+~540t!nLVL>{YqXBMZ=wI^@Z10y;!!-e0_Q>bSb$4tJ)kEVg#WMLkdLjs3$ceC z0{gNe7Wq#iPI=a!0kkp>dI!pVgbakJ-zta0S^JrJ+17VL9-;a%e0&_3dRl-#5A4P% z@3q;FO#CAFP*N#zu)ah-!neT3((mQ)xV(8_24s^tjriaAfxx@$FDsN!mW!sG^#1z0 zX4}zZs0j6vK6(=J*NbNK3C0EVdp;i4Wdmp&5625}4v&S{v*rV|nd9zv(DS$@J{!SK zp6?bN{yG5u6yC$X%4!%g1Au!Wh7*A~F5-AzZa5Fjwt50I4n6b0zm^ycd^&JHa69lK z;O_wEfX@Y%dQH{42+H`tuQd5M@Wn1KFwe{Xhk1dU!2dpQ9(Xb7|wSU z@K1=3#XTGFPl4md;VvBbXTUrgV)~c4;xS;pi{`xXI56L75+4H2LZ0{t@NQtvOHTn` z3;dtJJXh`o{skbt?*slN_<7(D0RM(Er+D6nff1^|A!Ph~{!Qb#`8V2CZ2wwsJBcMJb2=onfulH^6r7whwdK)F79?Bes z4BLU>$H1J&=~ouedYVpIMt%tyOiwZfp82ttsNYCM!ItS-W&^#WtIeowb<($uT zlfGmjcGv(|@OBN~a{y-2uVz4>tM|#k9LEg*VAuzq0}MB)ya4zN;3J?6=NQfbpAVcR z6NN1|%ynapVIO$C;l;r83+`2E?^WQ#4ZId${=5tEX5heZJMa)N?3A;`>w$j=JPQ6E zV1())2+=*uOyK?Cb6>~sR^WFl^xh8q4#V%Vyt5JiFfjV2G7g*v)+G40Ve*d{&H?|{ za2~i4zAEytXO3akYnkC3aI@h&@OEJId8LoM{7#zW1?Km^h~s#6yS%{HyS%_}0%m{F z=DUD5gEHI$Okd04`4BMtsq%i{&j5cBl;MlO?DISnd;^$16^Fwe21clU5+6TM?A{mC zH5p>hz_y#0B$wB8`zES zHvqGp@-WHGwVwn2C-4!fewI$UiEWA)F#%0`{(D4H_aDN{X>g`#QqGO7Gkj?p8ks>j{OYyCBC$Y*!&9MDodXa zOdpHmu@L)xB=1uZ%>6I}NTIK{D2t!5>6VT*lkZTF>C#=>Q;y$LvJm?f@FHJg)h{jb zrC(n*Pum&Y;{5~gOiQ0Lmfs_ZNGybSo|z_UORDt*I{ ztK^@0kk7U0OknC{|8k5I`*@ZbpXDw$`8aV*^R5A2K|b=d05kA=xb2{;!ABVP9*t!& zybLl7{C&q7!;C|%_$AjEH;;3S6Z;fEcp;fv=gZeC_AG=f-`z}9_d6hS63V4L2Y_9D z?*pbh>-&J=3xFABLq0A@KBUblTZu;^ps=mV2W2?f>cu+N8YZ9i`=G3&Ta#Ev;v61^ z7m~SPfqV~3`uAdB+Rw1cFskic2JG6c*>D{EO~71(+hNBwhC6_hhPT7r(}pv^8N*$` zI}CRN2Znoq2MzB49yZ(y{940(z}EuP&VD@3XL7(d8~;||S*J^TR$|}bO;)d3$hdl) z0zAk1Wg#$v+?73Ndf2~{>ERiH_V{?5J?!@*^=bq2{wFZ|nBi%|HJ7^B8xg6*6G;-+V?D@+ z-?%W&Kbf{3I_X!$R|7K-L)tLw(hFRLHH`z^$cC;^Fs2i1T>(o}2gx9`{`V z8d7KSJ}}QQl!*iL`#r?%!2G`CQqUY8_dOmTWu9w!%2@U>`XA9#zC0nRv?OYz-N!#?m0hU36*0cKyf1HTKH zeccaykKtXw4+4vAzz-W91^%kxJn%Px_rQiDz=xg8W!RTH8Q|X+UGI`*; z4Icr1z_7O--zzfg13zjw4*X5S?Z8J2=Yapy@F?)pz&BLD2Y@TK8g9W}1(}8zMR26AX_6*8x*c9{3Dk z`q&ZR6^5VOmix2y4YP{40tUf}Nn-wvDueiC>Ga31*gz;C7eKf-T;eYnoT zYTS)?=b?^<^XEe`@JZx1 zpq~x51HZ#?4)_CxM}a>DoTYx?M}Y@``G-nR0Ph5j10MnAlAZ(p74R;~13wSEoAMW8 zAy4amFMgp{^$&)dF7ztSHoOY>62q&3+YGNk-ff0kfJ4KZfpdo2fp0Us9ryq+w)9>< z@JE4P0~>O{p8|d@ZC;M|J;3yVJn$jIM}U70{5slv5x##2{0cM~-x+gnaU<9a729y0 zqRc{^UxB$#hy$Mhd^Py(z_q|)1Mqy{YrxL|p9P$Sd>;6fCi6J3yMsFdyaN1Lka-e# zt>LGDTMRz~e3fDR5nDylFn$uSB4ZeTAYG9)JOy~S;Y#4^4CB8-sCcvCslc}zo(BAG z!_~kaGkgs2!-l5=f6egB)A4T0Fn@05r@$PKKJc#%$AO;#W?Qub^N&`xuuXugfnN=r zCtpJ>1_dtxAEEjmk#>S&*{aW!ZKkBfrm!FlvA;m>WXOS4c?Wn4ffL{{ECNol3_Od0 zUjms6z~|m@wn*Ub@W7{iKAww#+mRLrZUm-G4$nGZ7CDM%Bk%`6^LRD^bKdcg$K8+e z&Y1m5pWyv1&xBZ-uLIs@_(tGv!+U{`gPz5Ze;Y9C!f>ZyANW4Qao|q@bG~TD^C&R=GKc5u zhHnP`zTsPee_@#I|12=`j^dfRUB6G72R_*R%`5jk+km%&$It_uF`Naa4>0To?lL~( zXe+rhE4=Fj&%NJ7*Gixa+w^2SY%7N7T@>R`SxL1~nb)DLso=MRAFWs6J{TDa_rxgR z2D38?{-aboa^+kp=Q zbFIn&|I%O}v!kRcw$2k*f8 zke6jyh&^Yz5Rb_p&S*l+{(B9^VqsBc`d^DUHdw^dr$uxQMflYb{;=`E!gQel$0`YI zGYheih@6{CO6870KQN8o>vDc^A?iw8jpq`>^rOoRQ~nBI_NR}h@Xi*v&G~z+Z^y`HJBj_=kb%gQIwUZunl9`zOQj)rzVvmHDs?83>xE(O%j> zgI)?5G{3ZM4e<5W7bgLuzA~502WB1ye#e2>XGDY4>pChyoRI~P$22VUx={6qh+cHk~x?lW^RYtZnuz^^xa9qsokeiQJ= zf!S`iLdKnoZ^IEkZ~Qxu_9c@a1%ATt{lMP?Mt{lN`4iw9t*w3zj6R1(j9awNO`vf+ zTtD9kn#1!)$gtcz@Lvr-23*;#{KtWhGkgg6Wx#BIA9+|7w)hhGT^5#=*C;<@H$$EQ zBk_G zh1m26J~o1XTa=H@h~T#azhZ~vu6_sb0$}>aVzAtO9aEQpk1q4#c(_->HkiKcEeC%s zXdX{H_!uwJh%3RT4EH;i8TNtOoE~5Xw2$18Cc%Fjc)ZW1zBJ-@Af5q5IPRS$kA8Rw z86OW+&dKv$o@KjypG7*taqqHxz|%*I_!#!r(;T2M^F81flf3D76(h&c*Z*pAcbeRB zgPPC9Pb!JeD~T^FiA!$S^MOb?EaGyLd%uv48Wfv^@7&o-G!S z#odO-_|_dZ$K$FWvAHV)qt{d7J{;0 zE&@e(Dd*IpIxqH>Vq`jw)ZQwS`*MjK>tSK~3V`({_wOZgPekOFL+(nG`}%k}@tcQ{ zPTLmZd9}%XvzQ-T^)DxPDj0(%_wA#|&4b*#OzuC9BFA~>(Dod>yNhwy>^_<_mILma-tZI<^h#v}Q-VXij^Blw{R{?`c3NATw(_|XXd zw+Q}f1b;1pzY)O;U>nX3VjJyXn2Cq|!NC4ILy+hBpqOeUkbk~mAGFCZ`HP?v3PtAu zD~rBxw_%d^M)3P0_?`&9H-bNC7~|3VuwjnJZvnFpSuXbuaZuX+9pDa7`paS98$sz4 z{|ER^Q06%T{17Pn_J_cagR=k5gZ+q#{T~5fp#96iN0`Qsl@ZLpPeKrWbA zSg~QlimMVE*IwO}XuYbXDUt97hWpdrj^wUHx_2O(NrzrXCYw$TB={?)7hKu@WbuYICug17Zlm4?a8b+c}>^FQ9S)x3z!JJnQPLumG08Ov06ljg)xSzwn(ivT4=zkxyMFbCri~lDCMB1z*qm6oc0*HR>)Ph^t~#c|Dz7nQ zUD4Rs)Y9s0Xu1^Q9&@f*yRNlqLt=&G-pDeoPYMXxR;^v1Sha4&YH#CJ1~zPLRZh#s zH40WVUZ$XJ!)gUhjZF#~H*HYB>;*Y0EkvlV5HYLaA0B_JIh{!F#2k>wCWjK;+p_6& zBJAD@ryuY@@zcv$uct4CUwcNA;n$%o>Mt-n3F8OiXNOTCuTty$Ek?X=+@%YVCxn ziH*&Tmzk1izLv{c6Rpk7iH(=9ShucBYinamq7em_C8##*u(H&)m6K#!-_&Y-OdHDb zHMg#5+OVd1qt-QQ=GsXVg1>hCnw1;MR3TwQi((}2Rh9<*>zi9PG`BXFC0W~SXuR^0wpFXjk|k5(@}}}eWouu)@;vnW zs^+%!<$Y}FZR?vhqo-RZ=x38ywPNkMwhc{XMVn=pz`55oV*tUSqK(Yl99bJDOu4#g zL-VG!>sK~!nlQO}-O9wu6|E~KPQ0>d!_o4tYQDT_!|HN_Y0-vNjpv`ci3s#oTU2N9G22`R< z*LsY@@_7l`6RmKv#xnWV6__z((Vj4=b@L>NYa1`0Y(lreJaGfcbiZQd$_-5$HZ*T2 zcU(!I)UDlGcgfoFny-YfuiC&}MaiT{rYy3TCtBBD-h=@&u?WZXsukr65%x8nY_^kG z>{E+d2H6kv_hG+%HWvT0iwS3)bvFKO?ca8GVWWK3b{1oXC?SnYJ zZ^JaNKb`JQcX-=jSz=o@iSxoX)GFD7Et%Kf&mH`5szA_JY9*oGr0j%vyNa1e(!ZmT7hpp&Ga+vA7jRNHp;Bzxha_|Hzz zw&pC^^bV0jw5Ja(Dv?wl#zT4lJNO}G^k8d`(>mI^AHIN*g0AV^mdWn$Oi?#b*- zXK{4HQ5T03v=r)>>Pd!SX4^0s$pM_hae53x5BbBC^qHR3`*(&MH~s1Cz%T}LFHV8( zyr`#nX?q7YzAep z8#DN>jx@Fmfkdi*5Z`V}?MPixaRhF$JLqt^i zD3KT{bQL@lhgsJf9LdsQuuy@tio$_Z2L84+jq8lwWPgS?Ea+6Yq0~gGVgIL1M}i|b z+t;_Fq-`yl=~uNj0m+FBM@hK39L=LpJ)TLSmmKKZkx3!d4)B}>8y3qvXftrwm+JQG z!)|}Mj|M+0?B-0lR3y^9gFDEKp6l>`@bGvCX`LE1`YMsHotwLT zgux4ZwCF$@Di^CBa;YoqrW_4KBckGPcepY9B9Yyd;OH)(1csno9~C*NzP|2Enj)Ax zyGixNh7zfraLACSkL8zGl=ApB%2pp(WTP_ZA+#&aj;JYaX;3bfE}DK`iKd#1d&mS z1rEcpm=mjp)P|Rhrp&QqNrPmzZxCj3PGr8$5SMSfvN6nzvTz|T)zoPdv4LGW^J*SU zP|2-5m~N2LozC_q#8nW%tcw;+QJ!gqXjYGYwV0O5vZJbSM?0+D` zEeK{o&Y_4=G9mww78V)AQ!trcQ}8>u82KPDP)lKQ#Iso864szxsh|Scw5u{!gl(8P zwho5Fbf6w8EkuAiIytAppfseM({IJNNc3+Pr<4S}t5G3XEsci(QRrb=f2n35_wl5CG0`11$#;FomgBtg;heB(H56h_h)JB35I|M^Wq^!+}0*UZj|8YS6=7DfbHSKoLNLFR`r- zDpQ=aQi*NJ9hn~5gN0YFd5NR|;T4LKSf+pHvc6su$Lho#O-e55Qi-j&jqN}y@5H<( z!t_A_vaCYu&Shu;-sQ0fL{KyAo*knwH>MIDeZ2#zzW!k>Qk@xgCl-rcI@c67u43kd z%%VvJJ=}vb0}JaGkm%Y2F9y@LY2m{0_Rb#+~~ z6j^DE8yz?VV7r^f4oTPpLs)v$MjZ~=RSov^gyU=~GN>uji?xokuxxn1U|S7Y%R(Qc zz1Xi)HtXiZRALCehn*HSbh4X9R}OLZR#6#QGB40vnIABKG3G8I>$U;OE>_~QIQXbXljz*A|%%-*%grO=ZWW@fMx63@@xP?fgZoew2&KlX6 z+UnSw#B4j1%($I9){kr?E}KEgtTVV&Lh{S@QC@ms=gz8OQ;iF3!Z4BCMm37Jrm?_d zP!Dd^e6*r7-OD}OV2I(42Ey4zp_~n{MTSucY^Zqzv%-17?)7F1Z`oKH-?m|A4?pBa zS1O?=LvBXVJ|!H{gszn{sGgq!9y1+>&gEstZ57akNF54zsqgLDmfhiY7`yuE+IZK+ z?b6oaOvf(SGGZQq>*EWLT+LBQ)KT|Od=H25o9@9r9?Mw|mMhG>BtyxbZe;2SR2inD zOfRPB0WGc=^+*B=Q?8J6R3g?c07hasm(7<1m40^jd`n>Frbjg6aOd%F1(uSdg zhnX+*L>R7xt z{VeY+F$lO`Nyl^b&Fn%yB;dOR9O^qWyuZ};%j zrsLctND_m3GTYL!Jf^l`DZ>sS!B>;0){e{oycavpNL&1+Y@!6o0oy0eF;PN^<%3y|dRYx#Hz*Jp-RdUjJ~s5QKf;8AGi)?nci`LmdOB?H zEl?elpV%;ZTl?@TI&ldpzW9xl+;c8&5Tlo#FGZC$A-+~oXh2}LpdG{!!<7&klNXkY zE~5MSXZV7Pw-I`gxkMYJyuzZrc=O2uP-WHZZHJp;6~NNmH<-mLW#8|B!FnMuPF<>h z7;UiVBA!oC{Fy$KoTVDh^qX4OBv!0zT5rucVMcxxB$83f9G4XhB~8R)puKRLgpOdV z^$zrH4;N~&avHPJw0BCgEEl8jF06&U;`(5j6o4KtbLC8bp&K&&kzUC3Pu2+-ERicE z=StK)uKscZP3^^QKmlfZnY3$WwiYvG`j=v%h?vIc3+84g9W}Sq%FArW4VCRy2Dtp= z;L44KyNdM&26}MQkApKOp}`&4>v9zf(VVZuFjOyjfhy2t_1aylf~Gw!(|?wnJtEao z1}mnz^=f`n-CDs(bjvy()h!Iwt%Hzy>)nYS?gfKftT;WDj?~FLWaqLM?;||ML0Z2Z z)U&k*Uq2e=1+Fwa@7$TV9bfa&#e8wYGPd-KZEZS?Hsx77w+x=;jw@&Sc#*^}r06JB zwYd3vp-wD%(pl1qM_0>sWhiM|tCluxIF!Ll1b?AxaCh}rK4`EUMn?^z+J`!wp69Wo z(oT{*=5WkVoK6v&te3C?jk_~uF)EJjmrRW^QHJRN-yPzD9bnTO;=4+`NyA3Z3y1OD z7)+)8cnbgtZ!p{G;o{Is4dF~8XE3r7{R3GsT6U4+W;K*{mKJ3bgX}!ZL}-QyU|P}h z<-*gD*_JFzbMrJetI#33wDFn3sl`&nkYY-)9bD%iw}UFiEf?0HOg~ceN)5XkTpGX@ z95nTtML5(fJdI1OETd*qX-9>r0a$H4#Ym*982wxNijGk3K3c-!@rA$|)9&pY!0R-r z18?$ihTxS0Rh73rvz!%ed~3hrvLq7Yda$&@gB`{C0!ONg*xz4UQD3YqOF&&!^(3`I z;?XNbfuq)mU#VMG+`nivQZr>tvWY2ro4$jgy=?ySo^-_&t7j!4hbuyX9`=QYi#C3b zlolp%v_}^!=0)IOFJ5BGl_0v@kbJ{bc(~h1r11_B1iqc@*oo~kQ8pQ6bUgBwSNO##VOQf?qaYF6GGMhyP%qO_$$CZ4CCs*EhNopecMMcevf;1Id zpfuih;Wa=4`+XeJ`v$h7xE;w}M4^JGXFV%Q%W-tGlF|wopi<%OXKB58kzE|>BE}Xf z6(i4vddYinW*Oht(cP+~{*UxD${63#vS&5@cyEPPME^+_@H?$Tcmpd(B7DX6{}y*P zz;PbueP57(4alT@q$nApDCwn4Ng&0K0|=6!Mad_Mk|2wc&m!&6jbt1NfCNL3I0K*s z$7*_UISwYHv^TYqGL4g7UAaLUCB39d%BE^_Nt6bSrR3r=3DPR*&4f-lW3|1sO3k*; zr2qf!^W5z{I8apDneGsL-`_s_{k{9{?uBLJv$N+{Opb}2*H4Fuoy`OYpPVpQlT8OK zoR|hQ_KG6u@E#J&H-ECH7+V!|!>w-?VUCRf_v23DBNXOdSWY?HcW=X#Y|BOGJtlR4 zUN^+SkBd zFZvW@jCbnHY-VRzRG5Lde>=u$&B1hJ#_ytr2|rK4U~;>62va=?AXDLd9+ePl3oE38 z?>p|*z-&*rckAJ&Kclfh(u67FSuCfZ9?T^7-pjdUf)r|O0%6EQ3yf_hXZr5;EBr}_ zK*A4yiT^@_t}1~?=&2Cvd&ZtUclUNWC0@~y)?C0W19dyz9WK#om+tMXFHg+-k(wbd zu5UghEKF<4-iRgHMBdytj^Kvb^n0+avtj>y5BB^Vo<)Ik`KCR;YvVl?2#zIM^C1+9 zM~^*m)aFCM;(vUn^n8h_o5;yWTnU4Gg9WLRkok6o&Zff_zVp2qiz zv1myErvY^3M>asQY-3~C<9%u|xPw89>^#DX4Zp@WHNrA5^-gG85E?3Of?^B7l?e!A zTxqKma)B!_SmGMxUKFe?aTMo6Ihx0^1AK)q4>)Y>#sPvEH{HJne}nkjjlThRBJWOM z0lB{)e`zu!Ca@}eQbWVBHmcag2ZnF3>{h1IM$`dR<=TEXwA@=?}Qf*VnsuAmXBRwCLYfK zb)3suiwV=@NLY^J$`%rYTL<9*R!L}-m6-c{AAaT#QwLECi!Lpzk)cPBFAgDOD-BcL z)AWtei>Gi5u;0+emV)IiO!q<|V*Y6cB5$OtJ%Uorl!VUKK8;(l!$(uf8(j^h(i=X| z?BLWn7GIiT5kSU`fdLu{y z)>Oox8NwhAec}s@+TOvfO=)uAK(c#4GC{(O9dak-j19h8EBaNjF+F&r0@*)sB- zKBHj^i)MtaK4z%nVfUO*`j&bxyXxMhnumSwNcGv#Ic+Lh+`{ghs1c!Sjke&}2u^4m z4OZKPBG{B*_>Fs^h4M{TBdE)pC(37bj=IP%Zv_La)A-zIo2OXTrsavnx`=KY9DJQ< zv^jD4YUv?$-&w1sjM7*u8Ut^Rg;^}k@Gsf5D|yDB0!%Pr({Xy@$LZL8IWxP{M|pWI z4UJ*5_>7<9hNcmo%HzQ143dzcZhmBFNXDn7q4)La%q(VtvRWV3Dlk`pBmN!Z14JSi zXf6h|81TVstkkLD=8Rnzh2kWX5excvznXz3YrfIl>V4*p*)f?pG4C1X&9s1^^=V5@ zZY&;e18q$nYP65ON~Av;8hSbrswqsNUB|LqvQ29cn;y6`@BlOA(9=f}&czcPlS#0h zBpFD;xO(a&T{ty6ksQY+bmW=e{&EiK?dUocTQ#O(`0B>E`K~025E?eqy})BQ8}gHG zFrwHJsgohLW`6P42SOUsh8JiH;9sLDz&l%l1Fe%B|LKay7^NhJuOu+r(scXq<0BWt z?|Up3+NROQywjV*%yL$pGX>kx03BgnV+n$FQ~W$wXs2w0^dPi!Sr1CB9TVb>wkv zT|&biPH(qaTrBcSQR^s%8{9q;9WHM< zxx)0ZRxlPc7T9=4Jy5eAwX@#N#}ys0em4d=^wC4pGi#RT;8DzIlCRe>=&Jxv$cM4B zx8a+m8?qC^Bp&&O2yNdD*_0FV8c5g`og1=^1*CIRu*-_*- zC0f@gRmF;gecjOl+I3b%)DoY5wG9jK{*HhVmW#G^(%dD01`Uyj>Li_FNH-hzRtoE~ zRI^~5M`dGEz~`$X0s=^MdVA`VeYYox1~NP;gq#z)o8&~!9s zJ7J-;A-;i%YiyVrXr(B(VWwfL-O2{r*0q(5uUNsFR{n}rWyQr{O)fr{msV_6zyPkz z3SMkYb7-g5@Exdft+j>5Lnjkb5PNdy8ILL9Kn9Z4f92`BH_R;j*r+*d^Ti?9!{N(H+f&AK6Naiosl!N&SbJ=~5hPpv5xVj=@WT@+#Ps8onKE$?ALmC&?rbtFz(?7qdW zN3??MFqS8uOi)|1>^dX5cvPsPyZ@U}p}N=B4aS+DX|`d}0%}x!ei$VMj@x0YR+F>FyEI0zl1B2mPOq)@r$>4@J+BTSB0iAA`X_+20 z*DrjHXsilEix-lcp#hDT0NO5Hv^djP(QpjGau=FwoLv2oB@E+26q4l*G=gxQT@PkW z!Y{U6$9QN`kVxD3fuk{z)4qh+ErOi{OLVKi$>9-T)p<+ zLL0#mNq?~L!VAv3YW@o$7o!7hN1bY;nT%pQ1P8vhtU4|Cf4PaaL7_h;IL78msT6E+ zx4=nTUWd9YWoiCaO=v>cF&fb6j^0t*Gnkhub9^p=lcEfmeYy~TEc(^g) zdf}lHD~IW?C*zYd_(CIf;zrBS^}@V@TUfzwDFl89?fnbOkC3-yEsATqXwh{k^5|t( zD)J&~Q{+W*BSl_}-e{3;36A6t^cst+A$AS_mz!|Rs&=Zy<^U_PHltj)ZI?>Rvb7~} z%)MswAR?yKjb&?#TavBN3lWuUZEz!7o8GhwaFnf#Q@du2`fW0l+BcG$G!>Afgi?&4#nY^I?J|*iAtOVGuj_KZX7YP;by?YH4R1rNuC~jGWksi%7_H@o`ZrQ& zq_#Glx#1!sK5RuSR!q{BT-PXusd~G1MqO{02rQ26yJ2R`3N!pT*PD$XzCAqV7vis1 zxoEYG75L=`(s2-JZ1d@vk?$X$p2Btmi9gq%Ifzn|IHQOd;Jf>2jQG%JyKCb{N#wp4YqL#|K!@ z<|hnCGpcT2=*&{R;?P;$!H64~F4qs@qN|6GJ~BMAY^_fQ$N%qEcHf>G4cOR^yT>9x-@NIky^&3lm!3Euu?kfRXj2oQ=Fitvb4|dwc zI%LT7`1j#EF0VvgJ~Rv_j*pIxV^5VgF^h|XbT7Z7H%x=s2~JSXFob-?QjB^Rwc5rI z4kEm}A$)tLq4Y5LelFvQ$#>m<*E~Kb<;yq4K32@k&CT^>4Rh=M9e%=lMyrzY--^_Mw!X-gV}@rOP?{;oKExDB|j(oV}fnXQX=j#z%<-EN7&x^rd%wBRmSFB zX@1X=Y*oQ~wDmu1YLe57AyoK=*y1Nw6nL^J9A0dkzqF~bosEg!o8b?V2BRh}t;g}& z$EVedIH1*9T#edl8{3snfHlu=#8EZgiGQ-n)rT{9_%1#*lRWfLa_9)I?L0F4#A8Rd zCin(Udg|dL*n7vCL3QIni4UXQcwqeIs<0toTOSUG&&=|}IPR%J4*DZ&_%i!YGU)%m zZeX=&H9whz(;e+I#F~;Mva!X`s<91%8$nh$)fGo~nakqzN`t+4tmjXEh0B9@FBv{$ zo%7d4d-r*l4mNM%WCpkG^QLb9C5InZcvBQFOE`s17IF)qf?m*pt5r_JqCY~$D?H9) zPb?PK{oNh3$7Ah&pPaYo54rPNavl=reGPm?zW`sxX7C&tJI@=F;K2EF$Nh8W>?AHz zz-e@RKf;|KlX6!Rt_R0}VhWpkX^Z=iaFF6G;xL1IcD$Yj5Vz=sQ{}uy%3qtJ_v-oE z=fbgk`uO}LuHfbqyJLA@k(_foiz^Po{Z%cOkI?y?AKa^qGlMAGk>XH{DH4^8-2}h#j^~XuSIyn|Q(02<}-7$0%XzBoY93)t&R1 ziSD_n!P}t1-C!6w_*BKAit$rT1EZ6tk@~o;2RAC7!{q^|aGHO10{2T!&f><3@lnL% z6f&HL0{xXVC#StW-}TD3bi;Op-(7aK9ohQ^(eHT<>+3R~hJ&*@}0FK7V{# z-a^rZVO&Rpv#Y$FDU@2tC0~MRF_!tzhMQ7Y@_81Lg&c=?&Pg3QGmf<5WrRHSeF8Uj zOpoJ(&$zz{(7(c8UU|Zzo_DzJ4Hr3t0QY$P=s4V^p zv*1P^cXP?*h$uTGUopdViL8D4HiAQ6lT$eQ*;3ZH#gCaFiQ6BMH+VAgw%%t4vuy)(StvDr2>%CVNGkT`IfJV@c;9;fCz~-KF!k%*k8;WGVntVBcms@b%*Zg1a4m9Elen!;RZ`O#3Yj2 z*x0-wd06Is^UK=glM1;uibH-7QwvGJ_s`)nBwh=IgKD9;yZj_?DD`(;QS|h>6{oI3J!3YMl{eaESHCwA`TuL;9_6EfR4Njx~3Oh@sQyay2 ziV~b>#GA}ULeuVb{GC}oGz}aGH*tBE$Oqd|;R$8S8$sb_?ngRo6uk&9zVV37p}`2k zJfpA8F$0Gk$^2anZx{*Dct4LeuLXRA;*H@#T-=um?@>{`1`+~yPoT32;^T7Lni6+| zoJQmG{c0Y5VoJuMz*a_$aZ1j+htyOv;^MI0VC60F3OV$ojR5#U$%~)7G&FL)vUr51elye-1+`$fEUZ?BOxQC=`w45yJaA4~B)9$<*ffOP3*OT(3(CPf% zAX80maCBq}_w~suZ?=`!$=&RRmHcg0B4Nbkbp}I8@enKW{M@LL5HK}{h9mqI4X%CV zC6ekAjy+@jLGdg;Wu49+$E_>A+IvGIv*(fIlMWAF0>=3OE?xE_{FDuTG&Pvw8}p4t z8x5921L<8pi<_Uli4gV4<0s%2zN>i;&}88ox+t1upzA%XBjOT{sL@mBBS;(@{GN7)1L3MImYAY#b2=Yx$~z( z1v-`IPxY|Ee}Q-Wxk(g4pWmT797i|9^1-6XW@cvG-+0HaOy5y1(9?0^>`jIh0gf!F zMW;eH#}Szy9qT|LjLOC_Y+*;{y^F>T<5L_apg9{y6NzDxCmc7K;f-)%hTw-y7;EEJ zY@{11Gn-nB^ZW}M1YQ`8A77=%DC;UKX+}y)50}(z8uJvi@_O~g*7yN z!_V+u{AciUDEJ|uqYpoGw4tNS^g045sfpJ?;0n)8Kc%y-q@-X z#!~u%tJ0c>d;HDr=tH-=?TIb`YZfO07Au_Yh3L`Pf<^1r)0Gz925!_ z2xc|%{>z9gJ8r`T4IxTQo)!zs6J*7(b7?PJP}Nvy3aHzj+S&|XY5poU8{e)Z7UCKA zCgaG2y&X$*ZXhhpGSTXyzq~1m8orZqe4{S$)^Y;zAtSm5}a^E>pz~0lK1QriWv4`0j37f?y9eWm_HxY%P@! z&LA87ZC8?Mu@$Cqdn?hcNNS<+Ig6+DQKEfn`+h2NIgWzAktvFVXPk8VD@4OveXkJ_ zOC~CD2-_Ti`m2P_pND#HiR`UhCuAIR?~S{Hpz*G$M`Nw`1fg%BTVAs{j$o#;(0KFQ zf`(EPyioJa3v;~LsE?vK8#MG&?~-!fCobZOPy9;KxVuQpSi2y>U3?xD{01d=1E@~; znjY&!TL%1(Rq}_`_|uHx@DQzrpzo0L_Tgk>lbGd+juL_J{N~W_J#_ohLA5+ljry_mKAOgK(NB$=zX8A|I9xR&vmRN>_X`*P!eT5nj__uh5Nh21jb(=s%(KXT z4TQ56ESuoUBu@1F@)@;sl1y2+)B~SsdASGU;L{Ri4xhO|bPBHGnldaaag8#XHR1~I zCWct~<((7feIokx7HSOO9`mM-EbA~lKEkrb$8hNqAK$GkUI%T7gCwo{``_+xiT}A~ z{C+u}{M)eKx$ciT_g(zrb2s=5en?w>=Gr|Cp6GA#4hTvEP3{MO3qJy`<{!YzX9nfR zqha?!@B)6M`*A#To1Oavo|g_dcLdM*hn)Kqo+)_pRXnd9b?&F|{G)wM_dW*RI@IKc z!J`pB0-lffQSeUtC-^5?f zSV+5=`8(s}|AN1+*$$V2d*nOUIydwJ%nPeL9m%^O9~QoMy>qu0I$TzG7`*jDhsz1i zfqO1?xC!BZ0$zhen-i{p*OodQjClVaPQduV^s?|*!1n+ag%ND8s=|Me;5X2bPD>H5 z1J{MW19=znwSv54{5EfP?v{*mi^8-|^Ht#ty6s$B_4wSGfr=acJ&6B1h_A2@9}>R+ zj>9)E@&TBCs&lm~M*J}NV#JSu7mWRKB=3Q|Cj1S!*MrQxhBk-!areF8HQ?^RXTiOx z)zA##`4Mmze!smBO!HqSBlCxaHGi_g{{ixk53Y8{B7O**i}(?6-q@duxWmnt43Zoa(7dRulk=7zNruP53P2I zk@p$-knk3m-*kAj%Yd1`Pl5;Da4svX={qOpH9ZT$7hwLWh1G68;)lV-h#v!&gs;GS z*UPKjC1G{15}EIU`I@i~-)l$@;ayvizGdY92SWUPdR-<`92jAKn7$gYdnS z&!W757@>f{HKOC9wg82zyAO1C+ zE-!rbew2^SPB$0v&ESIYhqgoB-RUk0tNSJ4y^wzbjqO6jkATY&4}&Yh7h%5Z@@iKV zR`-@7^Ib51P1uL`3d#feRi@W>VE^{FP#z+_8JrUS@D9jpt6eWRPVYWpmj88cI5#Z3 z59WKSs1L%AgKxd+TuxZSHvy*qqmXae(&^@fe*xUnhx7r{KefLgtoF-dUhP+f)&7#0 zzq|wOUuTEADy;Ue3I8SJ>$^Lg`(W_z_MOh%v8KbNh4+JR-qhhrM!sO=W#Q*wesj{{ zE*W`6Slv&3DEO!D_X?~0qOgXyEc_+7|AD~{w4t=#2j8DY`V0RwcuAG4dX>T-!GZ*H}U=x_;W8{T!n{n7dRzcgM33_ zmFqR~#K^P4w+&z%`r;~g%$VmK0Yeq9=uZXA@mq^OC5*qso_7c1Gj&(Zb8fLp+<6D@ zofw059mO~sdgLACV;z3Z@#|uc`XkWiU|h}pQ*ci&%9Ge5-wMXm!Mj7=2+l-20A~2; zp2`_d@OR+eJ=;56UijWUD4%^Dt{{wuSM&Iv?j3`C&8yB`6#hPN*ULyZ zVL$$U1<(=9d-ow^6F~dk{nyZD2*adTsM22kV{oqr?cI>!jIb{+hr)b>?tK&HduwP9 zMgHHw>#z7Jk#~>sb&%&pe$!s(_ST&%2nXLWH;d%!AipU5IL!B;JtztL_70g*7RJT^ zq#x!J!aoV#bU4gU7(Q|jxF+&n0k1s@Ur`1qSMC=6uaIw?Ly-_(hdJhk0_tcazZvo& zVf-wOTmOP{8Q~+~JCT{kBK`n)LRjOQ7k(b*doF~z?^h77Kg6Fy{$7Hd?8_taYboMe z!L^7tf-&T8m>&RR2;X4J9lAtyFAeUFxCfjPz6*2wTPldJu+MMIcZP(AAZLG=6ZYlr zGV)XSI|<5T4dqezDVS&fxES#UFuH6X9)!%Rny@e5ZzDd!zP!GJ_z3&*d<|AQTkT`W z;obA)7yV&k7+tQH`|^syk1oOUMTBoX`ad+m4f#6Av%qWaLk0CW;tzpK!UrEf ze>{x%N4y_gj(8)uBCP4VDEwvE-+;!Y`+XsP=1+2X_Zn_quypIVJoY%&(h6{0t8Xe~t1Nu-+s5ZSY_f`6YZW*3#CazAgxl zgFjS*|H52rTYDAt70mLjsw!N8d=UNWRbf9KJ%;jkQ!t-=4CBLRk-ox@f$u_oaQ;U3 zRL=PpS>+kw&%yj1NUy4q*NnU#$?t>v*CM8WZcT7s-Af6pf0u+8;9nZ$hw}}FhrA7( z$M~5{`^>*N;Xk4IS=3kIEg8fQ>CN#q&1b<|3Mj7;-w93$e*y9x7f@a!z7NdtIPL!| zq1<~O_q_k>>suf2%& zP52G)x)R)r_)c(M_@;x9U+Qr4hKs`6DSr*=DSR4y*CO&?_*Kf^2;+H%Z;4zD<9Bl3 zBM46&@k@p9e;;`BcM(3}7s0o8V!h4C^TIy^`F-fj=Zw58{6~=AwqcdKWaOL=sQmgx*-4Guzz~A=L^+aKxzZjfc6!!Ud4C{-AFA4v9*kAYTDpwP}v8ZbIB9KP%Z4ur&%pdf z)Tbrke-2((MEMY20&hiqsz-bu_*%qwgS$5e|I~a+xO*7xq5h?X9|3QA1^u(|De!Fz zDF0y27tcQ7oSVh^jPNgkdtSi$OT-Dd81YBI3lYH{-As?s9=QP@B53ffWO z?|^$S-Z*CL=Y-$?B-Sq~7%xY>7Q86DAM%eZVSW(t&EQ(Zd%^CW5WabsU;7ruL&CoV z-c(2bAMs9bKH`1gV#LGXixGbkd@14)a7FmThtZzBgYZQ1TOhwGd=T>8*ATXQL-@!C zz^RBI1*aqaEO?b?F&DB|0|Md5>x z-?e>}D+!O0(cUaXd>gnd{1wP|A42;Z@g3lb@HZj<$YFfn5WWT;>_vTx_(8C{FQfL!CDTDcC#5=(Y5$^*pMm!9zM*K-I*Kt`N=-vqU zn($4ZK>8j+f85uU-vW6eya)2#!>I2O9{>+W{3v)L;?IKf!aoZ8cOAxjBa+_+`GWB8 zKt4Eu@n^&jf|nxR1imVKXBOd^#Q0-t2ru~nI4i8_mlG~Pz6tGNA>#YN^AQh%7a~3g zE=T+%xEk@N!ArtRaQ}9;r;+>?$lbOOKKDtKeYBrR#QVX05g!51Mf@zdApBX_zw4_A zPsDq`i^8u`UPAgrya(KUfAIgNXR!YF8p>b99|fl)-UJ>JK0@;qgeT&Ug6D;Qp7Pf_ zTqWX|`7Nx=M!XK35K z?azB7W`CXtYk!^=*8Y4bV)o}5;jcUu`t#w4*`H^HwLhN_*8V&nG5hmk#O%*Y!rGtL zBW8czJs9Gr^(Q5){dqcK_UAYU*XW?a*L&DmhXCh{QJ`pkd^EqMd&le+Re_j*T^s5VN zf1bj+Cd(h0{dsT1?9YcHW`CZEnEiP!V)o|~!rGtDM{@S(MPcpF%Mr6buSCrL{9459 z&mGp$8D8zrvk|jD&k1XPUW%Cg`6XfP&npqLKd(j1{=6=%{dwO|@Sn{7d`MXP^L)hY z&kMqu{@k0!@N0j5QCR!)s}Zw5uM2B`?)C@!+MlO{wLedUwLkBRnEiQLSo`l`!&zbN z&yPjS{yZnF{dpl`_UA=m?N7Px&iInqpO%ERKP^Yh{&X>7_NP_DHDT?~9oD7kKAHV_ zN?85xjhOx?!s>rIV){QEG5yaP&Vf0ft$qsg`R*<^C;UIa$(k-#6h8QA$WvXeB<$C} zHgvhG!lx+T)aB~JUjz5{bvcK1Zn{st8=Qa{KUICgI)0donB#|nu#O+*Bj)&_D6Hd$ zi(vZqr|@s!MXVo0ybsKEZpuIQJ?Nib?sRqG=fJnT(&@Uf&Q1B>AiswFDZ(1wMA)a# zTliit?9-=)@9DxmeXd}?gs@MaxAFa3*ngiI?(%y@>EF*WJZM`hA}9BN7b9K=UW&LE zT#I-E_-e$oUl)D@?rp&PwTM%AcUWhq|CINFQxV?Y&D|AAN0 zpBp|V?DyB=y=3GI!dGCQ<@c)LYsS2LIQX~jufhMr$RESK!XJnHAj)T-u!b)k$-5vg z2>boK$dj6|-_MKknmHK!^ZR*os4o%U3Z55M_lv?`hyNd3#Q4DQqA_0;{v(*@_y3yV zx-oxE*rylDZwlWHn19N>!gqcz@)zZm-xVky1V4iEH6-l!qeeA6#Rc~3))kZ-=grpAb%C{1+%{WErf4}0aw9HFI8N3Cr`qB zauw}`VXni+=GkVFeSW-+@EaZy_WO&!i}ED=^Kk!QC&rt?>izh5cc~Sd$E3OqdT6*nh81WBe?+`~XKjpJXzleLmHR1mV_j__!e-c*rx}Og2`~EAB@idtA{rp*& z=)?Mf@FMt@?f4!SaSu2v{68V@9m4le;SJ}|z8zTQs=`l`Gq8_&FWvuPa4*&qh9ka} z>kAQ6o{O0Bys-a1br|Dc;a`S(T<@J1{!ieZ64F!Hhwl~CKjE9E5FYf0W#L2QOXz=% zychFihF9ei!aqRsuc7`5SIHHWpHBw!+Ftew-<`*p0Q18{_)+kVH?W>8?8E;y=9j`g z{H%Y%KM3<(ok$O1AD{1H{%z!Qn5Qzl|CZ*v@%;z$FtWC%#faISUKC#UIfQQw%9HRQ zcn7|xF9>USSrndvya(Td>*%BDzAry7qI?Pa@GZa==1G+M{oAi#ZX@i^8*(Ji-Uz-JG3BL*DK873g8jP} z!#t4jA+H73MXvElVO~Xf5%Sv|@;lUIr z@9&jW{_JCHZUFsd*EM6#;T#3R!84IN@l18#JOiE^*Wy1YWRpWP>5}yx{T^Y;4+$3_ z|1$W2B*?2`{!QUJ0zms;+8pHY-t(Ub53fdB1UtL$4)UVmim-;KyEB+q9uoc;v7ZOi zzxRD4xHl)P{_)J*I{5dgEkS-&_~(UrW{&c;_XPPoIChVF1j*{&BKTIA{}nNxeSa(0 zgm1k!xHr)qks)&G0*xz*7T|wMmOGKo_nZc_qd0etl{ZH`HtOR64vk) zP`+7S-uh?=?*cfs&po8fKdnF9lS#RTFNek<=1T{Hdm5i5VXZG*W21j6Phm}mtohL= ztny*uTaShCO&Ig@!s`BlvA<~SUlsn6xYvy}7W!Wj?i2nM;bCDd2N$uX!t(NF62jA+ zX)QkmVJ*)UVQt^C54W~w6T*%1Y2>Mct?e1tIM^O)ejEdT8A18`e+~aZmB0+2wr`x% z$L$&C^ki+%IHxB!+9sRYv1a|FwO%Dy-?jIWf~)C=ZX7TaS@ACCFr_L*b4xP9dqEpC4~=8D_XK4DES zj-lfAl4B;a%0n)+|9^F(rEHb2c4%ab+O>*J;q47wc;VGAqyO^$ zKaufJ&~?2|$P2=+FL9zfXi87xO%87^}|$Xg(vFTO#3VXEF=2x4qW; z{8_=ssQYaR|2*1VD(*p7g%iH|+IW7;yVLDZjhCs_&Lk#GoJILzKB`)f*FX39h&2`! z$n)>`bX(;zlHV%+H3?@5?G4pciQ@13C)P5I`L(`0U{6oe>tGT8ueOCjB&&Jgpp6P6 z^5ufnZv8UNUzBiaJTKiDjC@hz%QIiG`eO+vw)eKEy69I-z4*Lmq>q<3ty~lHJfjk; z-IAV{jQ&56bSoKm`o$gY{f^bIN%%8HpW|`nur23ztG}kc?Ds_q^*SnNbm7bRV|o4> z0jf7V+-@x=ru-k2aMnycN{PQbD-f$b@waBO?0-R@{{FHUzEz%QC0^V!AFCe}^HtH* zVJGJyJ{>m4ro;Pc9t>`yRy*~rRfwJ4ts0DFuN(fhU*ZZXe2ow54c;xG3)#%e;+r*6!TXt|a30k!kGxX(R} zv3g$9!RUWh+~*#|SbbC6Piw<3!ZAsQKBIp`+~?lFSpA`f-{^m@#Fu;jV)cx;&pmyy z>J@jm*DhASso^*JKOpJAy>hYoQwb;6Mq|Y@wX8?ndl#$w#C+Q1?`6?XjQ($lKKE3` z>Q0Fl_nO7(H$FYe)rRU+Z%UbI-fB>E|%f48__Gy4Bn!oxje zvHD?k$N0NO^cPIOe5>d?qyLzePosZ_q+8MG^S(gl3)gpI^_1vO$ar35eW0xg|YgQxL-LE z>tY@+;pexzSk+}-z;AR^^D-~_lIG*5g1-L$mgw`FOst9$9)7!v)dLb9eq)K%pNM|m z=zqWJe>ygevsMy*XUt#K^f%^zKO~vXt2`9e+S+yGJ-%hQL)+<#0W5}2F(fUSe zLy%m>W1YLhe=DI*U(j~{2!W$`gy^sL?)%7jot)@@1NpN)n$ISp zKLCBYuS)&d40jkFZ6?+HpL=)KsBx$Izv%Tp*wEMT*SvmnQlt9#+>7w6Z z-Fyt)A&v$ zZzUg2$t36{qW_%Nzg4K2Uq9^idzb0=LZ9K+oKb%-`E=gc$Vt`z7G4R-eQ%_kBb)&I}PDC+ka z{Tb2MVV#=)ZPZ7a*K(!$>qS3f+?n_Bz1)Au#}%^^Ri z$o~p|mm;P-1$U?@|3&=iI)`#mDhm16@K=lY*TE^@js*L!gZbT!iss4Hi2pgbPu348 zC-W`^D)J@#@$LjF@~ik;jQBUey8cEvS=TSf{|0|N`%gut`#kGUMW*{a+fPNN`#if( zMgC3v@vJ_Tn&;VkWtzW~c=U3{FSWUqc@~|D_P>F@V#IHNdA6I1@;C9vvtd-q+$*ol z_+2v0z2dPvXLv#SeY(f=;9f8)GSeeDOlmUsS}8MqxVK7~@#WqrW!mT7DP@}HUMXdo z*K$t(Se|qpjO`iqhSuUgC}rLaNTuN`TaNOd{_`$8Dw?PNae1Ksyc>?H;a^;6{<-~V4+mH>Amj12#45m&*B5wq>T9Pu~7wTS;6`0a=< zgRe#Wd*DtPPtm{M$6qSqKLBrvcnRDW@gIT*BmVc`!x6Loaqkut-DiG}Ma=dqAMxwp z7b0eTnUDBQa7mcW9NW8BB4&BG6fx_|>k+fQRwHKn_g2KL-&Z4Md-zVozYWHPq+Zc~ zmbW#Adkt?foHl&G@UUTiSEFM3{SN-N6QIa!`lY$>+zXcK?)i4`v-qQ;d5#h$2~cF# zfms5S@(TeQe8KRGhF>!LRl~1Dyan&CMSKt5--!5LyuTIkeRzL6Vm5`}jToP#-5N#$ zis8@TFNv7xlQx_&oHd*?oHtxBTr^xVTsB-WTs2%XTsQ199r0p(8NU?&gHlcmrwwNe zXAS2J=M5JO7Y&yTmkn18R}I$;*A3&NQA>Pvywixkk?Z#ym1m4RYdB{(Z@6H%Xt-p! zY`9{$YPe>&ZrEpZl;7byOO*eNXe3V?&KS-b&Kb@dE*LHvE*UNxt{AQwt{JWy)(H@` zSu}9m&@1J{aN2OjaMp0naNcmiaM5tdaM^IhaMf_laNV#@xHP`0_nG(`P8-e`4&Qy6 zx`yy1f3qT!O^vf+y1s^Oa9x?x>N((tA@QGrrU45tle z4D0?NwVyL`-It^Cf{_;umkgH;R}5DT*9_MUyAQO+H)S|6%ze648vcyotl^yDyy1f3 zqT!O^vf+y1s^OYp-urqhVzCChKq(vhRcR4hO35a zhUyyThcdNF<-!Si1jpZ4`yjwMv=M3i!7Yr8-mkgH;R}5DT*9_MUJ6R8E#NTjY zIBhs%IBPg(Sl27nzk-qL`lZTCMqV~tFU3)|{nD%F~QiD6wYRr47m&l=7d z&KoWmE*dTwE*q{Gt{Scxt{cW8cuRRr8BPqR4QCAN{xeOVoRQ}Z7Yr8-mkgH;R}5DT z*9_MU>!&3RZ)$yOdL)L^hBJnT;ez3!;gaF9;fmp^;hN#PVcm+V z`JIwawaSU%wBd~5tl^yDyy1f3qT!O^vf+y1s^Oa9x?%TWlmCVj!)e1A!&$>Q!+FC6 z!$rd-!)3!2!&SpI!*#>DF`PA=Gn_YEFkCcTGF&!XFh>HJme?H(W4WG+Z)VHe4}WHC!`XH|#%BM){rMB0|K8;k4n5 z;jH1D;k@C3;iBP^;j-b1;i}=9;ksdWXKQ@F`PA=Gn_YEFkCcTGF&!XFh>HLT}D)xErt z7Yr8-mkgH;R}5DT*9_MUySq*N4JU^A+ay#PpN!$G;hbUKAsE{)7%mzv87>>H7_J(w z8Lk_4A88E_e`7ojUt&0IIAb_#IA=I-xL~+wxMa9&xMH|!xMsL+*llTzZ_03DSoa?` z^54j_hI5AVh6{#^hD(OahAW1vhHHlFhTT0T{)Q97X~P-AS;INQdBX+6MZ+b-Wy2N2 zRl_yIb;Isn6Mw^rVg5D_RVV&B@Ru>1HLT|;)qLK_3x_tkHm1=aK>=faL#bv zaKUiVaLI7laK&)daLsVtu-n=i-<09RaN2OjaMrM%$JO-B8+pNS(QwId*>J^h)o{&l z-LRhj)$paZwWdd6IBhs%IBPg(IB&RMxM;X!xNNv$xN5j&xNca_GdJRYzlpzL{uU9H zn$H-{8qOKc8!i|w8ZH?w8?G3x8m<|x8`cfJ8lTkm*7zlc(}pvKvxakq^&GeQS1|IT z;gaF9;fmp^;hN#PVYj0-zA3|r;k4n5VLkt?@zZnS%6VhHV7O?wWVmd&Vz_F!X1H$H zUp5$(Xa4pPm4-Jl%-=qWQ!+FC6!$rd-!)3!2!&SpI!*#F`PA=Gn_YEFs$d9wEUHfyll8) zxN5j&xNg|>oA?{ndlfW1dY^)F#+c6<&Kb@dE*LHvE*UNxt{AQwt{JWy#)aZuX?#+K z6T@l48N*q_Im3Cw1;a(dCBtRI6~k4-HN$noZnufQVg8mEm8MVHFn_x%mS+v;4Cf6O z3>OWT43`a83|9@;4A%|2!PfBWJsBF`#K_ZzGlsK5(<^ zoZ-CTf?>VqMBOVHdD(EqaMf_laNV%mXX0--F`PD>G0fjeqtfu_4Cf6O3>OWT43`a8 z3|9@;4A%|o0doy+YN$2*iQ%;2jNz=|oZ-CTg5jd!lHs!9is7o^n&G-(x4$*My%BGL zh`;Sd#q=R(BIZ(B*6>8cTxyy#toPT@Jub!QJvYjiBJ&SH&fjXIqMTfd_+#L@;qH$@ z6N=_D`0F)181X8|4;emcc+Bvu;R}XeGW@dPR}H^z__E@T-PjH+c@MXhS48LQzGvlM*jKAS6h6fEFGJMqV znBiH&7Yx5-_+`Vd8h+jIWy4ntzhk)bVdqsN{|#?3JZSik;iHDf49^9lzD_jgD`2{JW0J9sj1|*E*^lzu)mj z$M1E#+3|-RzuWPv9lzP}+a15t@dq8Z_I|zNA9n10;6XP#nje`S{rt$u@pH4&UvN98 z$0v8rj?c}8w}VF?+esKdcl^ZU_{j9^tQ$KwGm@X0o^^nc$*HL`=kp`S&y9_o&yO9S z9UnP4IraR`5vY@AXI=ix?8w;kRQ}z}(bCA-sdJ}ir=}a0XQz*kj*qAl4ein6lcVP+ z)hh;R1aWAH$4*a=kIwpF%=oa)H#X%{ zWh59xl4yqe#EZ0HaOU{g{A6T=<$>WlL%p4olb=63a(rxTdgk;gl&wbk2KV#Fr)S2Y ztx4LdtI61^D{0%R*>sl~w9MI9v*Bv2sg5@FMkY^>j-T_P3l-u_etPP}xXUAE$Ojav zsp&CaNtR zvFpsq>GAOqyt$n-v(s=N&o7+y&*>>tv*VgS2!ErZy7TAG&y0`lcZUxi?B6>Qp1Z>{ z-9*(N(wBYu(!bZg?H}3Q&mX~y-A%-DJ@0U;x`B3f~ z3D3Rq+!LOIg1bj>6Deb}dt{%!^f%u2sMc7jB z!#+6TAN}$i5DbsB5Nby-g_5Q({mr+14ZXekw#UC9sH)L;83;NEk&2ox&A0TR1adb6 z33nvN!xOO>YP{{$w>|nYs4xBDEj_Z^>mWz{69vCvLf(*MBEmBy&p=2X|3LDD=kCUH zziRB$m;Sx}4Oyo;;RT^rjqrl-s7Al)3^a7)4SCnlN)NR3W!j1F9FU;)`_w_^=u3a& zZJ%oG)tCP8hCry!fPX=%1P!ExO7IeQq79Gx#pASktS|jT`nFGB_UcQ2^KFmn?bet6 z@Pd1fS@OnSq(<%zOO}!0xrQZ9RTK+8~ zT=F7sY?|hw;A&e`;U# zXIAk4(hd4QwSxakZTw%;#((}66#gwM|4ZV3A7ru8@T(qv2D)whueS03gRrFbHU7C3 z{C8)A5AWvxvn%+2t&RWm)B3-51^@fR|0D#X@xR(vjh+?!@BX_GKxsUg|LgIm_ErCx z75pE*LI0m#!T;Vi{2;g|6?n} zf2fWBy>0yGy>g7dG}Cws>XP{1MPa1ahQ{g#A!nMxnHE(!7ckeN+f0AezDfLF$-ec^ z1jn(MQ_tF86#p01BYA56tM+e-|H}F-PKCEkZ>mms+VK23?6=GRVM+fa>i_P96Y9T~ zf9lyd(Wh4q1l=tMNlj(_-w07V|0jMt*e^dE?0y7~1b^25Pau4&5C0`&TfkpK^~(4` jK1($8gR2d63y8lhJCs&`Aefw&{+skm_*4H`7ODOZz$<0R diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/tracer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/tracer.go index 3341294ae..3629c899b 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/tracer.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/tracer.go @@ -35,20 +35,25 @@ const ( aggregatedFlowsMap = "aggregated_flows" dnsLatencyMap = "dns_flows" // constants defined in flows.c as "volatile const" - constSampling = "sampling" - constTraceMessages = "trace_messages" - constEnableRtt = "enable_rtt" - constEnableDNSTracking = "enable_dns_tracking" - constDNSTrackingPort = "dns_port" - dnsDefaultPort = 53 - constEnableFlowFiltering = "enable_flows_filtering" - pktDropHook = "kfree_skb" - constPcaEnable = "enable_pca" - pcaRecordsMap = "packet_record" - tcEgressFilterName = "tc/tc_egress_flow_parse" - tcIngressFilterName = "tc/tc_ingress_flow_parse" - tcpFentryHook = "tcp_rcv_fentry" - tcpRcvKprobe = "tcp_rcv_kprobe" + constSampling = "sampling" + constTraceMessages = "trace_messages" + constEnableRtt = "enable_rtt" + constEnableDNSTracking = "enable_dns_tracking" + constDNSTrackingPort = "dns_port" + dnsDefaultPort = 53 + constEnableFlowFiltering = "enable_flows_filtering" + constEnableNetworkEventsMonitoring = "enable_network_events_monitoring" + constNetworkEventsMonitoringGroupID = "network_events_monitoring_groupid" + pktDropHook = "kfree_skb" + constPcaEnable = "enable_pca" + pcaRecordsMap = "packet_record" + tcEgressFilterName = "tc/tc_egress_flow_parse" + tcIngressFilterName = "tc/tc_ingress_flow_parse" + tcpFentryHook = "tcp_rcv_fentry" + tcpRcvKprobe = "tcp_rcv_kprobe" + rhNetworkEventsMonitoringHook = "rh_psample_sample_packet" + networkEventsMonitoringHook = "psample_sample_packet" + defaultNetworkEventsGroupID = 10 ) var log = logrus.WithField("component", "ebpf.FlowFetcher") @@ -59,38 +64,41 @@ var plog = logrus.WithField("component", "ebpf.PacketFetcher") // and to flows that are forwarded by the kernel via ringbuffer because could not be aggregated // in the map type FlowFetcher struct { - objects *BpfObjects - qdiscs map[ifaces.Interface]*netlink.GenericQdisc - egressFilters map[ifaces.Interface]*netlink.BpfFilter - ingressFilters map[ifaces.Interface]*netlink.BpfFilter - ringbufReader *ringbuf.Reader - cacheMaxSize int - enableIngress bool - enableEgress bool - pktDropsTracePoint link.Link - rttFentryLink link.Link - rttKprobeLink link.Link - egressTCXLink map[ifaces.Interface]link.Link - ingressTCXLink map[ifaces.Interface]link.Link - lookupAndDeleteSupported bool + objects *BpfObjects + qdiscs map[ifaces.Interface]*netlink.GenericQdisc + egressFilters map[ifaces.Interface]*netlink.BpfFilter + ingressFilters map[ifaces.Interface]*netlink.BpfFilter + ringbufReader *ringbuf.Reader + cacheMaxSize int + enableIngress bool + enableEgress bool + pktDropsTracePoint link.Link + rttFentryLink link.Link + rttKprobeLink link.Link + egressTCXLink map[ifaces.Interface]link.Link + ingressTCXLink map[ifaces.Interface]link.Link + networkEventsMonitoringLink link.Link + lookupAndDeleteSupported bool } type FlowFetcherConfig struct { - EnableIngress bool - EnableEgress bool - Debug bool - Sampling int - CacheMaxSize int - PktDrops bool - DNSTracker bool - DNSTrackerPort uint16 - EnableRTT bool - EnableFlowFilter bool - EnablePCA bool - FilterConfig *FilterConfig + EnableIngress bool + EnableEgress bool + Debug bool + Sampling int + CacheMaxSize int + PktDrops bool + DNSTracker bool + DNSTrackerPort uint16 + EnableRTT bool + EnableNetworkEventsMonitoring bool + NetworkEventsMonitoringGroupID int + EnableFlowFilter bool + EnablePCA bool + FilterConfig *FilterConfig } -// nolint:cyclop +// nolint:golint,cyclop func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) { if err := rlimit.RemoveMemlock(); err != nil { log.WithError(err). @@ -132,14 +140,24 @@ func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) { if cfg.EnableFlowFilter { enableFlowFiltering = 1 } + enableNetworkEventsMonitoring := 0 + if cfg.EnableNetworkEventsMonitoring { + enableNetworkEventsMonitoring = 1 + } + networkEventsMonitoringGroupID := defaultNetworkEventsGroupID + if cfg.NetworkEventsMonitoringGroupID > 0 { + networkEventsMonitoringGroupID = cfg.NetworkEventsMonitoringGroupID + } if err := spec.RewriteConstants(map[string]interface{}{ - constSampling: uint32(cfg.Sampling), - constTraceMessages: uint8(traceMsgs), - constEnableRtt: uint8(enableRtt), - constEnableDNSTracking: uint8(enableDNSTracking), - constDNSTrackingPort: dnsTrackerPort, - constEnableFlowFiltering: uint8(enableFlowFiltering), + constSampling: uint32(cfg.Sampling), + constTraceMessages: uint8(traceMsgs), + constEnableRtt: uint8(enableRtt), + constEnableDNSTracking: uint8(enableDNSTracking), + constDNSTrackingPort: dnsTrackerPort, + constEnableFlowFiltering: uint8(enableFlowFiltering), + constEnableNetworkEventsMonitoring: uint8(enableNetworkEventsMonitoring), + constNetworkEventsMonitoringGroupID: uint8(networkEventsMonitoringGroupID), }); err != nil { return nil, fmt.Errorf("rewriting BPF constants definition: %w", err) } @@ -152,7 +170,8 @@ func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) { if rtOldKernel { log.Infof("kernel is realtime and older than 5.14.0-292 not all hooks are supported") } - objects, err := kernelSpecificLoadAndAssign(oldKernel, rtOldKernel, spec) + supportNetworkEvents := !kernel.IsKernelOlderThan("5.14.0-427") + objects, err := kernelSpecificLoadAndAssign(oldKernel, rtOldKernel, supportNetworkEvents, spec) if err != nil { return nil, err } @@ -181,6 +200,30 @@ func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) { } } + var networkEventsMonitoringLink link.Link + if cfg.EnableNetworkEventsMonitoring { + if supportNetworkEvents { + // Enable the following logic with RHEL9.6 when its available + if !kernel.IsKernelOlderThan("5.16.0") { + //revive:disable + /* + networkEventsMonitoringLink, err = link.Kprobe(networkEventsMonitoringHook, objects.NetworkEventsMonitoring, nil) + if err != nil { + return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err) + } + */ + } else { + log.Infof("kernel older than 5.16.0 detected: use custom network_events_monitoring hook") + networkEventsMonitoringLink, err = link.Kprobe(rhNetworkEventsMonitoringHook, objects.RhNetworkEventsMonitoring, nil) + if err != nil { + return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err) + } + } + } else { + log.Infof("kernel older than 5.14.0-427 detected: it does not support network_events_monitoring hook, skip") + } + } + var rttFentryLink, rttKprobeLink link.Link if cfg.EnableRTT { if !oldKernel { @@ -212,20 +255,21 @@ next: } return &FlowFetcher{ - objects: &objects, - ringbufReader: flows, - egressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, - ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, - qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{}, - cacheMaxSize: cfg.CacheMaxSize, - enableIngress: cfg.EnableIngress, - enableEgress: cfg.EnableEgress, - pktDropsTracePoint: pktDropsLink, - rttFentryLink: rttFentryLink, - rttKprobeLink: rttKprobeLink, - egressTCXLink: map[ifaces.Interface]link.Link{}, - ingressTCXLink: map[ifaces.Interface]link.Link{}, - lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported + objects: &objects, + ringbufReader: flows, + egressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{}, + cacheMaxSize: cfg.CacheMaxSize, + enableIngress: cfg.EnableIngress, + enableEgress: cfg.EnableEgress, + pktDropsTracePoint: pktDropsLink, + rttFentryLink: rttFentryLink, + rttKprobeLink: rttKprobeLink, + egressTCXLink: map[ifaces.Interface]link.Link{}, + ingressTCXLink: map[ifaces.Interface]link.Link{}, + networkEventsMonitoringLink: networkEventsMonitoringLink, + lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported }, nil } @@ -286,6 +330,48 @@ func (m *FlowFetcher) AttachTCX(iface ifaces.Interface) error { return nil } +func (m *FlowFetcher) DetachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("failed to setns to %s: %w", iface.NetNS, err) + } + } + if m.enableEgress { + if l := m.egressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close egress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook") + } else { + return fmt.Errorf("egress link does not have a TCX egress hook") + } + } + + if m.enableIngress { + if l := m.ingressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close ingress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook") + } else { + return fmt.Errorf("ingress link does not have a TCX ingress hook") + } + } + + return nil +} + func removeTCFilters(ifName string, tcDir uint32) error { link, err := netlink.LinkByName(ifName) if err != nil { @@ -306,7 +392,7 @@ func removeTCFilters(ifName string, tcDir uint32) error { return kerrors.NewAggregate(errs) } -func (m *FlowFetcher) removePreviousFilters(iface ifaces.Interface) error { +func unregister(iface ifaces.Interface) error { ilog := log.WithField("iface", iface) ilog.Debugf("looking for previously installed TC filters on %s", iface.Name) links, err := netlink.LinkList() @@ -364,6 +450,12 @@ func (m *FlowFetcher) removePreviousFilters(iface ifaces.Interface) error { return nil } +func (m *FlowFetcher) UnRegister(iface ifaces.Interface) error { + // qdiscs, ingress and egress filters are automatically deleted so we don't need to + // specifically detach them from the ebpfFetcher + return unregister(iface) +} + // Register and links the eBPF fetcher into the system. The program should invoke Unregister // before exiting. func (m *FlowFetcher) Register(iface ifaces.Interface) error { @@ -372,7 +464,7 @@ func (m *FlowFetcher) Register(iface ifaces.Interface) error { if err != nil { return fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err) } - defer handle.Delete() + defer handle.Close() // Load pre-compiled programs and maps into the kernel, and rewrites the configuration ipvlan, err := handle.LinkByIndex(iface.Index) @@ -401,7 +493,7 @@ func (m *FlowFetcher) Register(iface ifaces.Interface) error { m.qdiscs[iface] = qdisc // Remove previously installed filters - if err := m.removePreviousFilters(iface); err != nil { + if err := unregister(iface); err != nil { return fmt.Errorf("failed to remove previous filters: %w", err) } @@ -504,6 +596,11 @@ func (m *FlowFetcher) Close() error { errs = append(errs, err) } } + if m.networkEventsMonitoringLink != nil { + if err := m.networkEventsMonitoringLink.Close(); err != nil { + errs = append(errs, err) + } + } // m.ringbufReader.Read is a blocking operation, so we need to close the ring buffer // from another goroutine to avoid the system not being able to exit if there // isn't traffic in a given interface @@ -670,6 +767,10 @@ func (m *FlowFetcher) ReadGlobalCounter(met *metrics.Metrics) { "FilterRejectCounter", "FilterAcceptCounter", "FilterNoMatchCounter", + "NetworkEventsErrorsCounter", + "NetworkEventsErrorsGroupIDMismatch", + "NetworkEventsErrorsFlowMapUpdate", + "NetworkEventsGoodEvent", } zeroCounters := make([]uint32, ebpf.MustPossibleCPU()) for key := BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED_KEY; key < BpfGlobalCountersKeyTMAX_DROPPED_FLOWS_KEY; key++ { @@ -681,8 +782,13 @@ func (m *FlowFetcher) ReadGlobalCounter(met *metrics.Metrics) { for _, counter := range allCPUValue { if key == BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED_KEY { met.DroppedFlowsCounter.WithSourceAndReason("flow-fetcher", reasons[key]).Add(float64(counter)) + } else if key == BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_KEY || + key == BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH || + key == BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS || + key == BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD { + met.NetworkEventsCounter.WithSourceAndReason("network-events", reasons[key]).Add(float64(counter)) } else { - met.FilteredFlowsCounter.WithSourceAndReason("flow-fetcher", reasons[key]).Add(float64(counter)) + met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", reasons[key]).Add(float64(counter)) } } // reset the global counter-map entry @@ -724,16 +830,32 @@ func (m *FlowFetcher) lookupAndDeleteDNSMap(timeOut time.Duration) { } } -// kernelSpecificLoadAndAssign based on a kernel version, it will load only the supported ebPF hooks -func kernelSpecificLoadAndAssign(oldKernel, rtKernel bool, spec *ebpf.CollectionSpec) (BpfObjects, error) { +// kernelSpecificLoadAndAssign based on a kernel version, it will load only the supported eBPF hooks +func kernelSpecificLoadAndAssign(oldKernel, rtKernel, supportNetworkEvents bool, spec *ebpf.CollectionSpec) (BpfObjects, error) { objects := BpfObjects{} - // For older kernel (< 5.14) kfree_sbk drop hook doesn't exist - // For RT kernel both kfree_skb and tcp_rcv_kprobe aren't available - // Here we define another structure similar to the bpf2go created one but w/o the hooks that does not exist in older kernel - // Note: if new hooks are added in the future, we need to update the following structures manually - if oldKernel && rtKernel { - type NewBpfPrograms struct { + // Helper to remove common hooks + removeCommonHooks := func() { + delete(spec.Programs, pktDropHook) + delete(spec.Programs, rhNetworkEventsMonitoringHook) + } + + // Helper to load and assign BPF objects + loadAndAssign := func(objects interface{}) error { + if err := spec.LoadAndAssign(objects, nil); err != nil { + var ve *ebpf.VerifierError + if errors.As(err, &ve) { + log.Infof("Verifier error: %+v", ve) + } + return fmt.Errorf("loading and assigning BPF objects: %w", err) + } + return nil + } + + // Configure BPF programs based on the kernel type + switch { + case oldKernel && rtKernel: + type newBpfPrograms struct { TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` @@ -743,45 +865,45 @@ func kernelSpecificLoadAndAssign(oldKernel, rtKernel bool, spec *ebpf.Collection TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` } - type NewBpfObjects struct { - NewBpfPrograms + type newBpfObjects struct { + newBpfPrograms BpfMaps } - var newObjects NewBpfObjects - // remove pktdrop hook from the spec - delete(spec.Programs, pktDropHook) - // remove fentry hook from the spec - delete(spec.Programs, tcpFentryHook) - // remove tcp receive kprobe + var newObjects newBpfObjects + removeCommonHooks() delete(spec.Programs, tcpRcvKprobe) - newObjects.NewBpfPrograms = NewBpfPrograms{} - if err := spec.LoadAndAssign(&newObjects, nil); err != nil { - var ve *ebpf.VerifierError - if errors.As(err, &ve) { - // Using %+v will print the whole verifier error, not just the last - // few lines. - log.Infof("Verifier error: %+v", ve) - } - return objects, fmt.Errorf("loading and assigning BPF objects: %w", err) - } - objects.DirectFlows = newObjects.DirectFlows - objects.AggregatedFlows = newObjects.AggregatedFlows - objects.DnsFlows = newObjects.DnsFlows - objects.FilterMap = newObjects.FilterMap - objects.GlobalCounters = newObjects.GlobalCounters - objects.TcEgressFlowParse = newObjects.TcEgressFlowParse - objects.TcIngressFlowParse = newObjects.TcIngressFlowParse - objects.TcxEgressFlowParse = newObjects.TcxEgressFlowParse - objects.TcxIngressFlowParse = newObjects.TcxIngressFlowParse - objects.TcEgressPcaParse = newObjects.TcEgressPcaParse - objects.TcIngressPcaParse = newObjects.TcIngressPcaParse - objects.TcxEgressPcaParse = newObjects.TcxEgressPcaParse - objects.TcxIngressPcaParse = newObjects.TcxIngressPcaParse - objects.TcpRcvKprobe = nil - objects.TcpRcvFentry = nil - objects.KfreeSkb = nil - } else if oldKernel { - type NewBpfPrograms struct { + delete(spec.Programs, tcpFentryHook) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = BpfObjects{ + BpfPrograms: BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvKprobe: nil, + TcpRcvFentry: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case oldKernel: + type newBpfPrograms struct { TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` @@ -792,43 +914,44 @@ func kernelSpecificLoadAndAssign(oldKernel, rtKernel bool, spec *ebpf.Collection TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` TCPRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` } - type NewBpfObjects struct { - NewBpfPrograms + type newBpfObjects struct { + newBpfPrograms BpfMaps } - var newObjects NewBpfObjects - // remove pktdrop hook from the spec - delete(spec.Programs, pktDropHook) - // remove fentry hook from the spec + var newObjects newBpfObjects + removeCommonHooks() delete(spec.Programs, tcpFentryHook) - newObjects.NewBpfPrograms = NewBpfPrograms{} - if err := spec.LoadAndAssign(&newObjects, nil); err != nil { - var ve *ebpf.VerifierError - if errors.As(err, &ve) { - // Using %+v will print the whole verifier error, not just the last - // few lines. - log.Infof("Verifier error: %+v", ve) - } - return objects, fmt.Errorf("loading and assigning BPF objects: %w", err) - } - objects.DirectFlows = newObjects.DirectFlows - objects.AggregatedFlows = newObjects.AggregatedFlows - objects.DnsFlows = newObjects.DnsFlows - objects.FilterMap = newObjects.FilterMap - objects.GlobalCounters = newObjects.GlobalCounters - objects.TcEgressFlowParse = newObjects.TcEgressFlowParse - objects.TcIngressFlowParse = newObjects.TcIngressFlowParse - objects.TcxEgressFlowParse = newObjects.TcxEgressFlowParse - objects.TcxIngressFlowParse = newObjects.TcxIngressFlowParse - objects.TcEgressPcaParse = newObjects.TcEgressPcaParse - objects.TcIngressPcaParse = newObjects.TcIngressPcaParse - objects.TcxEgressPcaParse = newObjects.TcxEgressPcaParse - objects.TcxIngressPcaParse = newObjects.TcxIngressPcaParse - objects.TcpRcvKprobe = newObjects.TCPRcvKprobe - objects.TcpRcvFentry = nil - objects.KfreeSkb = nil - } else if rtKernel { - type NewBpfPrograms struct { + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = BpfObjects{ + BpfPrograms: BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvKprobe: newObjects.TCPRcvKprobe, + TcpRcvFentry: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case rtKernel: + type newBpfPrograms struct { TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` @@ -839,57 +962,98 @@ func kernelSpecificLoadAndAssign(oldKernel, rtKernel bool, spec *ebpf.Collection TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` TCPRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` } - type NewBpfObjects struct { - NewBpfPrograms + type newBpfObjects struct { + newBpfPrograms BpfMaps } - var newObjects NewBpfObjects - // remove pktdrop hook from the spec - delete(spec.Programs, pktDropHook) - // remove tcp receive kprobe + var newObjects newBpfObjects + removeCommonHooks() delete(spec.Programs, tcpRcvKprobe) - newObjects.NewBpfPrograms = NewBpfPrograms{} - if err := spec.LoadAndAssign(&newObjects, nil); err != nil { - var ve *ebpf.VerifierError - if errors.As(err, &ve) { - // Using %+v will print the whole verifier error, not just the last - // few lines. - log.Infof("Verifier error: %+v", ve) - } - return objects, fmt.Errorf("loading and assigning BPF objects: %w", err) - } - objects.DirectFlows = newObjects.DirectFlows - objects.AggregatedFlows = newObjects.AggregatedFlows - objects.DnsFlows = newObjects.DnsFlows - objects.FilterMap = newObjects.FilterMap - objects.GlobalCounters = newObjects.GlobalCounters - objects.TcEgressFlowParse = newObjects.TcEgressFlowParse - objects.TcIngressFlowParse = newObjects.TcIngressFlowParse - objects.TcxEgressFlowParse = newObjects.TcxEgressFlowParse - objects.TcxIngressFlowParse = newObjects.TcxIngressFlowParse - objects.TcEgressPcaParse = newObjects.TcEgressPcaParse - objects.TcIngressPcaParse = newObjects.TcIngressPcaParse - objects.TcxEgressPcaParse = newObjects.TcxEgressPcaParse - objects.TcxIngressPcaParse = newObjects.TcxIngressPcaParse - objects.TcpRcvFentry = newObjects.TCPRcvFentry - objects.TcpRcvKprobe = nil - objects.KfreeSkb = nil - } else { - if err := spec.LoadAndAssign(&objects, nil); err != nil { - var ve *ebpf.VerifierError - if errors.As(err, &ve) { - // Using %+v will print the whole verifier error, not just the last - // few lines. - log.Infof("Verifier error: %+v", ve) - } - return objects, fmt.Errorf("loading and assigning BPF objects: %w", err) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = BpfObjects{ + BpfPrograms: BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvFentry: newObjects.TCPRcvFentry, + TcpRcvKprobe: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case !supportNetworkEvents: + type newBpfPrograms struct { + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + TCPRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TCPRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + } + type newBpfObjects struct { + newBpfPrograms + BpfMaps + } + var newObjects newBpfObjects + delete(spec.Programs, rhNetworkEventsMonitoringHook) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = BpfObjects{ + BpfPrograms: BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvFentry: newObjects.TCPRcvFentry, + TcpRcvKprobe: newObjects.TCPRcvKprobe, + KfreeSkb: newObjects.KfreeSkb, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + default: + if err := loadAndAssign(&objects); err != nil { + return objects, err } } - /* - * since we load the program only when the we start we need to release - * memory used by cached kernel BTF see https://github.com/cilium/ebpf/issues/1063 - * for more details. - */ + + // Release cached kernel BTF memory btf.FlushKernelSpec() return objects, nil @@ -934,6 +1098,8 @@ func NewPacketFetcher(cfg *FlowFetcherConfig) (*PacketFetcher, error) { delete(spec.Programs, constTraceMessages) delete(spec.Programs, constEnableDNSTracking) delete(spec.Programs, constEnableFlowFiltering) + delete(spec.Programs, constEnableNetworkEventsMonitoring) + delete(spec.Programs, constNetworkEventsMonitoringGroupID) pcaEnable := 0 if cfg.EnablePCA { @@ -989,7 +1155,7 @@ func registerInterface(iface ifaces.Interface) (*netlink.GenericQdisc, netlink.L if err != nil { return nil, nil, fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err) } - defer handle.Delete() + defer handle.Close() // Load pre-compiled programs and maps into the kernel, and rewrites the configuration ipvlan, err := handle.LinkByIndex(iface.Index) @@ -1018,8 +1184,13 @@ func registerInterface(iface ifaces.Interface) (*netlink.GenericQdisc, netlink.L return qdisc, ipvlan, nil } -func (p *PacketFetcher) Register(iface ifaces.Interface) error { +func (p *PacketFetcher) UnRegister(iface ifaces.Interface) error { + // qdiscs, ingress and egress filters are automatically deleted so we don't need to + // specifically detach them from the ebpfFetcher + return unregister(iface) +} +func (p *PacketFetcher) Register(iface ifaces.Interface) error { qdisc, ipvlan, err := registerInterface(iface) if err != nil { return err @@ -1032,6 +1203,47 @@ func (p *PacketFetcher) Register(iface ifaces.Interface) error { return p.registerIngress(iface, ipvlan) } +func (p *PacketFetcher) DetachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("PCA failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("PCA failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("PCA failed to setns to %s: %w", iface.NetNS, err) + } + } + if p.enableEgress { + if l := p.egressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close egress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook") + } else { + return fmt.Errorf("egress link does not support TCX hook") + } + } + + if p.enableIngress { + if l := p.ingressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close ingress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook") + } else { + return fmt.Errorf("ingress link does not support TCX hook") + } + } + return nil +} + func (p *PacketFetcher) AttachTCX(iface ifaces.Interface) error { ilog := log.WithField("iface", iface) if iface.NetNS != netns.None() { diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go index 274cc7eef..18a1f9cf9 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go @@ -27,12 +27,13 @@ type deduperCache struct { } type entry struct { - key *ebpf.BpfFlowId - dnsRecord *ebpf.BpfDnsRecordT - flowRTT *uint64 - ifIndex uint32 - expiryTime time.Time - dupList *[]map[string]uint8 + key *ebpf.BpfFlowId + dnsRecord *ebpf.BpfDnsRecordT + flowRTT *uint64 + networkEvents *[4][8]uint8 + ifIndex uint32 + expiryTime time.Time + dupList *[]map[string]uint8 } // Dedupe receives flows and filters these belonging to duplicate interfaces. It will forward @@ -94,6 +95,12 @@ func (c *deduperCache) checkDupe(r *Record, justMark, mergeDup bool, fwd *[]*Rec if r.Metrics.FlowRtt != 0 && *fEntry.flowRTT == 0 { *fEntry.flowRTT = r.Metrics.FlowRtt } + // If the new flows have network events, then enrich the flow in the cache and mark the flow as duplicate + for i, md := range r.Metrics.NetworkEvents { + if !AllZerosMetaData(md) && AllZerosMetaData(fEntry.networkEvents[i]) { + copy(fEntry.networkEvents[i][:], md[:]) + } + } if fEntry.ifIndex != r.Id.IfIndex { if justMark { r.Duplicate = true @@ -120,11 +127,12 @@ func (c *deduperCache) checkDupe(r *Record, justMark, mergeDup bool, fwd *[]*Rec // The flow has not been accounted previously (or was forgotten after expiration) // so we register it for that concrete interface e := entry{ - key: &rk, - dnsRecord: &r.Metrics.DnsRecord, - flowRTT: &r.Metrics.FlowRtt, - ifIndex: r.Id.IfIndex, - expiryTime: timeNow().Add(c.expire), + key: &rk, + dnsRecord: &r.Metrics.DnsRecord, + flowRTT: &r.Metrics.FlowRtt, + networkEvents: &r.Metrics.NetworkEvents, + ifIndex: r.Id.IfIndex, + expiryTime: timeNow().Add(c.expire), } if mergeDup { ifName := ifaceNamer(int(r.Id.IfIndex)) diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/record.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/record.go index 0d677dca7..c68aca22c 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/record.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/record.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "net" + "reflect" "time" "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" @@ -18,7 +19,11 @@ const ( const MacLen = 6 // IPv4Type / IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml -const IPv6Type = 0x86DD +const ( + IPv6Type = 0x86DD + networkEventsMaxEventsMD = 8 + maxNetworkEvents = 4 +) type HumanBytes uint64 type MacAddr [MacLen]uint8 @@ -52,8 +57,9 @@ type Record struct { // AgentIP provides information about the source of the flow (the Agent that traced it) AgentIP net.IP // Calculated RTT which is set when record is created by calling NewRecord - TimeFlowRtt time.Duration - DupList []map[string]uint8 + TimeFlowRtt time.Duration + DupList []map[string]uint8 + NetworkMonitorEventsMD []string } func NewRecord( @@ -80,6 +86,7 @@ func NewRecord( record.DNSLatency = time.Duration(metrics.DnsRecord.Latency) } record.DupList = make([]map[string]uint8, 0) + record.NetworkMonitorEventsMD = make([]string, 0) return &record } @@ -120,6 +127,22 @@ func Accumulate(r *ebpf.BpfFlowMetrics, src *ebpf.BpfFlowMetrics) { if src.Dscp != 0 { r.Dscp = src.Dscp } + + for _, md := range src.NetworkEvents { + if !AllZerosMetaData(md) && !networkEventsMDExist(r.NetworkEvents, md) { + copy(r.NetworkEvents[r.NetworkEventsIdx][:], md[:]) + r.NetworkEventsIdx = (r.NetworkEventsIdx + 1) % maxNetworkEvents + } + } +} + +func networkEventsMDExist(events [maxNetworkEvents][networkEventsMaxEventsMD]uint8, md [networkEventsMaxEventsMD]uint8) bool { + for _, e := range events { + if reflect.DeepEqual(e, md) { + return true + } + } + return false } // IP returns the net.IP equivalent object @@ -159,3 +182,12 @@ func ReadFrom(reader io.Reader) (*RawRecord, error) { err := binary.Read(reader, binary.LittleEndian, &fr) return &fr, err } + +func AllZerosMetaData(s [networkEventsMaxEventsMD]uint8) bool { + for _, v := range s { + if v != 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go index 3e8b388d1..3230f4af9 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go @@ -144,14 +144,14 @@ func (m *MapTracer) aggregate(metrics []ebpf.BpfFlowMetrics) *ebpf.BpfFlowMetric return &ebpf.BpfFlowMetrics{} } aggr := &ebpf.BpfFlowMetrics{} - for _, mt := range metrics { + for i := range metrics { // eBPF hashmap values are not zeroed when the entry is removed. That causes that we // might receive entries from previous collect-eviction timeslots. // We need to check the flow time and discard old flows. - if mt.StartMonoTimeTs <= m.lastEvictionNs || mt.EndMonoTimeTs <= m.lastEvictionNs { + if metrics[i].StartMonoTimeTs <= m.lastEvictionNs || metrics[i].EndMonoTimeTs <= m.lastEvictionNs { continue } - Accumulate(aggr, &mt) + Accumulate(aggr, &metrics[i]) } return aggr } diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go index 6832aca9b..518010112 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go @@ -53,7 +53,7 @@ func netInterfaces(nsh netns.NsHandle) ([]Interface, error) { if err != nil { return nil, fmt.Errorf("failed to create handle for netns (%s): %w", nsh.String(), err) } - defer handle.Delete() + defer handle.Close() // Get a list of interfaces in the namespace links, err := handle.LinkList() diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go index b45c8ee41..25274d6d7 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go @@ -82,6 +82,9 @@ func (w *Watcher) sendUpdates(ctx context.Context, ns string, out chan Event) { "netnsHandle": netnsHandle.String(), "error": err, }).Debug("linkSubscribe failed retry") + if err := netnsHandle.Close(); err != nil { + log.WithError(err).Warn("netnsHandle close failed") + } return false, nil } diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go index 61cc72fc7..77e3ac28d 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go @@ -92,6 +92,13 @@ var ( "source", "reason", ) + networkEvents = defineMetric( + "network_events_total", + "Number of Network Events flows", + TypeCounter, + "source", + "reason", + ) bufferSize = defineMetric( "buffer_size", "Buffer size", @@ -144,6 +151,7 @@ type Metrics struct { EvictedPacketsCounter *EvictionCounter DroppedFlowsCounter *EvictionCounter FilteredFlowsCounter *EvictionCounter + NetworkEventsCounter *EvictionCounter BufferSizeGauge *BufferSizeGauge Errors *ErrorCounter } @@ -157,6 +165,7 @@ func NewMetrics(settings *Settings) *Metrics { m.EvictedPacketsCounter = &EvictionCounter{vec: m.NewCounterVec(&evictedPktTotal)} m.DroppedFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&droppedFlows)} m.FilteredFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&filterFlows)} + m.NetworkEventsCounter = &EvictionCounter{vec: m.NewCounterVec(&networkEvents)} m.BufferSizeGauge = &BufferSizeGauge{vec: m.NewGaugeVec(&bufferSize)} m.Errors = &ErrorCounter{vec: m.NewCounterVec(&errorsCounter)} return m diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go index 3d5a5b60f..6a5c43207 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go @@ -248,6 +248,7 @@ type Record struct { TimeFlowRtt *durationpb.Duration `protobuf:"bytes,24,opt,name=time_flow_rtt,json=timeFlowRtt,proto3" json:"time_flow_rtt,omitempty"` DnsErrno uint32 `protobuf:"varint,25,opt,name=dns_errno,json=dnsErrno,proto3" json:"dns_errno,omitempty"` DupList []*DupMapEntry `protobuf:"bytes,26,rep,name=dup_list,json=dupList,proto3" json:"dup_list,omitempty"` + NetworkEventsMetadata []string `protobuf:"bytes,27,rep,name=network_events_metadata,json=networkEventsMetadata,proto3" json:"network_events_metadata,omitempty"` } func (x *Record) Reset() { @@ -464,6 +465,13 @@ func (x *Record) GetDupList() []*DupMapEntry { return nil } +func (x *Record) GetNetworkEventsMetadata() []string { + if x != nil { + return x.NetworkEventsMetadata + } + return nil +} + type DataLink struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -747,7 +755,7 @@ var file_proto_flow_proto_rawDesc = []byte{ 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0xbc, 0x08, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, + 0x22, 0xf4, 0x08, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, @@ -814,36 +822,39 @@ var file_proto_flow_proto_rawDesc = []byte{ 0x19, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x45, 0x72, 0x72, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x75, 0x70, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x75, 0x70, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x22, - 0x3c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x73, - 0x72, 0x63, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x72, - 0x63, 0x4d, 0x61, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x64, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x22, 0x6b, 0x0a, - 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, - 0x25, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, - 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x73, 0x63, 0x70, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x73, 0x63, 0x70, 0x22, 0x3d, 0x0a, 0x02, 0x49, 0x50, - 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, - 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x42, 0x0b, 0x0a, 0x09, - 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32, 0x3e, - 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x53, - 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, - 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x75, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x17, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x15, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x4c, + 0x69, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x72, 0x63, 0x4d, 0x61, 0x63, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x64, + 0x73, 0x74, 0x4d, 0x61, 0x63, 0x22, 0x6b, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, + 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x73, 0x63, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x73, + 0x63, 0x70, 0x22, 0x3d, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, + 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04, + 0x69, 0x70, 0x76, 0x36, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, + 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32, 0x3e, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e, 0x70, 0x62, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16, 0x2e, 0x70, + 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62, 0x66, 0x6c, + 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go index 20f1d8853..c312b63ad 100644 --- a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go @@ -2,20 +2,23 @@ package pbflow import ( "encoding/binary" - "net" - "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" "github.com/netobserv/netobserv-ebpf-agent/pkg/flow" + ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" + "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" + "net" ) +var protoLog = logrus.WithField("component", "pbflow") + // FlowsToPB is an auxiliary function to convert flow records, as returned by the eBPF agent, // into protobuf-encoded messages ready to be sent to the collector via GRPC -func FlowsToPB(inputRecords []*flow.Record, maxLen int) []*Records { +func FlowsToPB(inputRecords []*flow.Record, maxLen int, s *ovnobserv.SampleDecoder) []*Records { entries := make([]*Record, 0, len(inputRecords)) for _, record := range inputRecords { - entries = append(entries, FlowToPB(record)) + entries = append(entries, FlowToPB(record, s)) } var records []*Records for len(entries) > 0 { @@ -31,7 +34,7 @@ func FlowsToPB(inputRecords []*flow.Record, maxLen int) []*Records { // FlowToPB is an auxiliary function to convert a single flow record, as returned by the eBPF agent, // into a protobuf-encoded message ready to be sent to the collector via kafka -func FlowToPB(fr *flow.Record) *Record { +func FlowToPB(fr *flow.Record, s *ovnobserv.SampleDecoder) *Record { var pbflowRecord = Record{ EthProtocol: uint32(fr.Id.EthProtocol), Direction: Direction(fr.Id.Direction), @@ -94,6 +97,22 @@ func FlowToPB(fr *flow.Record) *Record { pbflowRecord.Network.SrcAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: flow.IntEncodeV4(fr.Id.SrcIp)}} pbflowRecord.Network.DstAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: flow.IntEncodeV4(fr.Id.DstIp)}} } + if s != nil { + seen := make(map[string]bool) + for _, metadata := range fr.Metrics.NetworkEvents { + if !flow.AllZerosMetaData(metadata) { + if md, err := s.DecodeCookie8Bytes(metadata); err == nil { + protoLog.Debugf("Network Events Metadata %v decoded Cookie: %v", metadata, md) + if !seen[md] { + pbflowRecord.NetworkEventsMetadata = append(pbflowRecord.NetworkEventsMetadata, md) + seen[md] = true + } + } else { + protoLog.Errorf("unable to decode Network events cookie: %v", err) + } + } + } + } return &pbflowRecord } @@ -152,6 +171,10 @@ func PBToFlow(pb *Record) *flow.Record { out.DupList = append(out.DupList, map[string]uint8{intf: dir}) } } + if len(pb.GetNetworkEventsMetadata()) != 0 { + out.NetworkMonitorEventsMD = append(out.NetworkMonitorEventsMD, pb.GetNetworkEventsMetadata()...) + protoLog.Debugf("decoded Network events monitor metadata: %v", out.NetworkMonitorEventsMD) + } return &out } diff --git a/vendor/github.com/ovn-org/libovsdb/LICENSE b/vendor/github.com/ovn-org/libovsdb/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/ovn-org/libovsdb/NOTICE b/vendor/github.com/ovn-org/libovsdb/NOTICE new file mode 100644 index 000000000..156dcf39f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/NOTICE @@ -0,0 +1,13 @@ +libovsdb + +Copyright 2014-2015 Socketplane Inc. +Copyright 2015-2018 Docker Inc. + +This software consists of voluntary contributions made by many individuals. For +exact contribution history, see the commit history. + +Modifications Copyright 2018-2019 eBay Inc. + +This software contains modifications developed by eBay Inc. and voluntary contributions +from other individuals in a fork maintained at https://github.com/eBay/libovsdb +For details on these contributions, please consult the git history. diff --git a/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/vendor/github.com/ovn-org/libovsdb/cache/cache.go new file mode 100644 index 000000000..0b1e09e72 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/cache.go @@ -0,0 +1,1284 @@ +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/gob" + "encoding/hex" + "fmt" + "log" + "os" + "reflect" + "sort" + "strings" + "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/libovsdb/updates" +) + +const ( + updateEvent = "update" + addEvent = "add" + deleteEvent = "delete" + bufferSize = 65536 + columnDelimiter = "," + keyDelimiter = "|" +) + +// ErrCacheInconsistent is an error that can occur when an operation +// would cause the cache to be inconsistent +type ErrCacheInconsistent struct { + details string +} + +// Error implements the error interface +func (e *ErrCacheInconsistent) Error() string { + msg := "cache inconsistent" + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +func NewErrCacheInconsistent(details string) *ErrCacheInconsistent { + return &ErrCacheInconsistent{ + details: details, + } +} + +// ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes +type ErrIndexExists struct { + Table string + Value interface{} + Index string + New string + Existing []string +} + +func (e *ErrIndexExists) Error() string { + return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value) +} + +func NewIndexExistsError(table string, value interface{}, index string, new string, existing []string) *ErrIndexExists { + return &ErrIndexExists{ + table, value, index, new, existing, + } +} + +// map of unique values to uuids +type valueToUUIDs map[interface{}]uuidset + +// map of column name(s) to unique values, to UUIDs +type columnToValue map[index]valueToUUIDs + +// index is the type used to implement multiple cache indexes +type index string + +// indexType is the type of index +type indexType uint + +const ( + schemaIndexType indexType = iota + clientIndexType +) + +// indexSpec contains details about an index +type indexSpec struct { + index index + columns []model.ColumnKey + indexType indexType +} + +func (s indexSpec) isClientIndex() bool { + return s.indexType == clientIndexType +} + +func (s indexSpec) isSchemaIndex() bool { + return s.indexType == schemaIndexType +} + +// newIndex builds a index from a list of columns +func newIndexFromColumns(columns ...string) index { + sort.Strings(columns) + return index(strings.Join(columns, columnDelimiter)) +} + +// newIndexFromColumnKeys builds a index from a list of column keys +func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index { + // RFC 7047 says that Indexes is a [] and "Each is a set of + // columns whose values, taken together within any given row, must be + // unique within the table". We'll store the column names, separated by comma + // as we'll assume (RFC is not clear), that comma isn't valid in a + columns := make([]string, 0, len(columnsKeys)) + columnsMap := map[string]struct{}{} + for _, columnKey := range columnsKeys { + var column string + if columnKey.Key != nil { + column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key) + } else { + column = columnKey.Column + } + if _, found := columnsMap[column]; !found { + columns = append(columns, column) + columnsMap[column] = struct{}{} + } + } + return newIndexFromColumns(columns...) +} + +// newColumnKeysFromColumns builds a list of column keys from a list of columns +func newColumnKeysFromColumns(columns ...string) []model.ColumnKey { + columnKeys := make([]model.ColumnKey, len(columns)) + for i, column := range columns { + columnKeys[i] = model.ColumnKey{Column: column} + } + return columnKeys +} + +// RowCache is a collections of Models hashed by UUID +type RowCache struct { + name string + dbModel model.DatabaseModel + dataType reflect.Type + cache map[string]model.Model + indexSpecs []indexSpec + indexes columnToValue + mutex sync.RWMutex +} + +// rowByUUID returns one model from the cache by UUID. Caller must hold the row +// cache lock. +func (r *RowCache) rowByUUID(uuid string) model.Model { + if row, ok := r.cache[uuid]; ok { + return model.Clone(row) + } + return nil +} + +// Row returns one model from the cache by UUID +func (r *RowCache) Row(uuid string) model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.rowByUUID(uuid) +} + +func (r *RowCache) HasRow(uuid string) bool { + r.mutex.RLock() + defer r.mutex.RUnlock() + _, found := r.cache[uuid] + return found +} + +// rowsByModels searches the cache to find all rows matching any of the provided +// models, either by UUID or indexes. An error is returned if the model schema +// has no UUID field, or if the provided models are not all the same type. +func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + results := make(map[string]model.Model, len(models)) + for _, m := range models { + if reflect.TypeOf(m) != r.dataType { + return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType) + } + info, _ := r.dbModel.NewModelInfo(m) + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil, err + } + if uuid := field.(string); uuid != "" { + if _, ok := results[uuid]; !ok { + if row := r.rowByUUID(uuid); row != nil { + results[uuid] = row + continue + } + } + } + + // indexSpecs are ordered, schema indexes go first, then client indexes + for _, indexSpec := range r.indexSpecs { + if indexSpec.isClientIndex() && !useClientIndexes { + // Given the ordered indexSpecs, we can break here if we reach the + // first client index + break + } + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[indexSpec.index] + if uuids, ok := vals[val]; ok { + for uuid := range uuids { + if _, ok := results[uuid]; !ok { + results[uuid] = r.rowByUUID(uuid) + } + } + // Break after handling the first found index + // to ensure we preserve index order preference + break + } + } + } + if len(results) == 0 { + return nil, nil + } + return results, nil +} + +// RowByModel searches the cache by UUID and schema indexes. UUID search is +// performed first. Then schema indexes are evaluated in turn by the same order +// with which they are defined in the schema. The model for the first matching +// index is returned along with its UUID. An empty string and nil is returned if +// no Model is found. +func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) { + models, err := r.rowsByModels([]model.Model{m}, false) + if err != nil { + return "", nil, err + } + for uuid, model := range models { + return uuid, model, nil + } + return "", nil, nil +} + +// RowsByModels searches the cache by UUID, schema indexes and client indexes. +// UUID search is performed first. Schema indexes are evaluated next in turn by +// the same order with which they are defined in the schema. Finally, client +// indexes are evaluated in turn by the same order with which they are defined +// in the client DB model. The models for the first matching index are returned, +// which might be more than 1 if they were found through a client index since in +// that case uniqueness is not enforced. Nil is returned if no Model is found. +func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) { + return r.rowsByModels(models, true) +} + +// Create writes the provided content to the cache +func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid)) + } + if reflect.TypeOf(m) != r.dataType { + return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String()) + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return err + } + addIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + return err + } + + uuidset := newUUIDSet(uuid) + + vals := r.indexes[index] + existing := vals[val] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + return NewIndexExistsError(r.name, val, string(index), uuid, existing.list()) + } + + addIndexes[index][val] = uuidset + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + } + + r.cache[uuid] = model.Clone(m) + return nil +} + +// Update updates the content in the cache and returns the original (pre-update) model +func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid)) + } + oldRow := model.Clone(r.cache[uuid]) + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return nil, err + } + newInfo, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + + addIndexes := r.newIndexes() + removeIndexes := r.newIndexes() + var errs []error + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + var err error + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return nil, err + } + newVal, err := valueFromIndex(newInfo, indexSpec.columns) + if err != nil { + return nil, err + } + + // if old and new values are the same, don't worry + if oldVal == newVal { + continue + } + // old and new values are NOT the same + + uuidset := newUUIDSet(uuid) + + // check that there are no conflicts + vals := r.indexes[index] + existing := vals[newVal] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + errs = append(errs, NewIndexExistsError( + r.name, + newVal, + string(index), + uuid, + existing.list(), + )) + } + + addIndexes[index][newVal] = uuidset + removeIndexes[index][oldVal] = uuidset + } + if len(errs) > 0 { + return nil, fmt.Errorf("%+v", errs) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + for k, v := range removeIndexes[index] { + if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + r.cache[uuid] = model.Clone(m) + return oldRow, nil +} + +// IndexExists checks if any of the schema indexes of the provided model is +// already in the cache under a different UUID. +func (r *RowCache) IndexExists(row model.Model) error { + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil + } + uuid := field.(string) + for _, indexSpec := range r.indexSpecs { + if !indexSpec.isSchemaIndex() { + // Given the ordered indexSpecs, we can break here if we reach the + // first non schema index + break + } + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[index] + existing := vals[val] + if !existing.empty() && !existing.equals(newUUIDSet(uuid)) { + return NewIndexExistsError( + r.name, + val, + string(index), + uuid, + existing.list(), + ) + } + } + return nil +} + +// Delete deletes a row from the cache +func (r *RowCache) Delete(uuid string) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid)) + } + oldRow := r.cache[uuid] + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return err + } + + removeIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return err + } + + removeIndexes[index][oldVal] = newUUIDSet(uuid) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range removeIndexes[index] { + // only remove the index if it is pointing to this uuid + // otherwise we can cause a consistency issue if we've processed + // updates out of order + if substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + delete(r.cache, uuid) + return nil +} + +// Rows returns a copy of all Rows in the Cache +func (r *RowCache) Rows() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + result := make(map[string]model.Model) + for k, v := range r.cache { + result[k] = model.Clone(v) + } + return result +} + +// RowsShallow returns a clone'd list of f all Rows in the cache, but does not +// clone the underlying objects. Therefore, the objects returned are READ ONLY. +// This is, however, thread safe, as the cached objects are cloned before being updated +// when modifications come in. +func (r *RowCache) RowsShallow() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + + result := make(map[string]model.Model, len(r.cache)) + for k, v := range r.cache { + result[k] = v + } + return result +} + +// uuidsByConditionsAsIndexes checks possible indexes that can be built with a +// subset of the provided conditions and returns the uuids for the models that +// match that subset of conditions. If no conditions could be used as indexes, +// returns nil. Note that this method does not necessarily match all the +// provided conditions. Thus the caller is required to evaluate all the +// conditions against the returned candidates. This is only useful to obtain, as +// quick as possible, via indexes, a reduced list of candidate models that might +// match all conditions, which should be better than just evaluating all +// conditions against all rows of a table. +// +//nolint:gocyclo // warns overall function is complex but ignores inner functions +func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []interface{}) (uuidset, error) { + type indexableCondition struct { + column string + keys []interface{} + nativeValue interface{} + } + + // build an indexable condition, more appropriate for our processing, from + // an ovsdb condition. Only equality based conditions can be used as indexes + // (or `includes` conditions on map values). + toIndexableCondition := func(condition ovsdb.Condition, nativeValue interface{}) *indexableCondition { + if condition.Column == "_uuid" { + return nil + } + if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes { + return nil + } + v := reflect.ValueOf(nativeValue) + if !v.IsValid() { + return nil + } + isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array + if condition.Function == ovsdb.ConditionIncludes && isSet { + return nil + } + keys := []interface{}{} + if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes { + for _, key := range v.MapKeys() { + keys = append(keys, key.Interface()) + } + } + return &indexableCondition{ + column: condition.Column, + keys: keys, + nativeValue: nativeValue, + } + } + + // for any given set of conditions, we need to check if an index uses the + // same fields as the conditions + indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool { + columnKeys := []model.ColumnKey{} + for _, condition := range conditions { + if len(condition.keys) == 0 { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column}) + continue + } + for _, key := range condition.keys { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key}) + } + } + index := newIndexFromColumnKeys(columnKeys...) + return index == spec.index + } + + // for a specific set of conditions, check if an index can be built from + // them and return the associated UUIDs + evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) { + // build a model with the values from the conditions + m, err := r.dbModel.NewModel(r.name) + if err != nil { + return nil, err + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + for _, conditions := range conditions { + err := info.SetField(conditions.column, conditions.nativeValue) + if err != nil { + return nil, err + } + } + for _, spec := range r.indexSpecs { + if !indexMatchesConditions(spec, conditions) { + continue + } + // if we have an index for those conditions, calculate the index + // value. The models mapped to that value match the conditions. + v, err := valueFromIndex(info, spec.columns) + if err != nil { + return nil, err + } + if v != nil { + uuids := r.indexes[spec.index][v] + if uuids == nil { + // this set of conditions was represented by an index but + // had no matches, return an empty set + uuids = uuidset{} + } + return uuids, nil + } + } + return nil, nil + } + + // set of uuids that match the conditions as we evaluate them + var matching uuidset + + // attempt to evaluate a set of conditions via indexes and intersect the + // results against matches of previous sets + intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) { + uuids, err := evaluateConditionSetAsIndex(indexableConditions) + if err != nil { + return true, err + } + if matching == nil { + matching = uuids + } else if uuids != nil { + matching = intersectUUIDSets(matching, uuids) + } + if matching != nil && len(matching) <= 1 { + // if we had no matches or a single match, no point in continuing + // searching for additional indexes. If we had a single match, it's + // cheaper to just evaluate all conditions on it. + return true, nil + } + return false, nil + } + + // First, filter out conditions that cannot be matched against indexes. With + // the remaining conditions build all possible subsets (the power set of all + // conditions) and for any subset that is an index, intersect the obtained + // uuids with the ones obtained from previous subsets + matchUUIDsFromConditionsPowerSet := func() error { + ps := [][]*indexableCondition{} + // prime the power set with a first empty subset + ps = append(ps, []*indexableCondition{}) + for i, condition := range conditions { + nativeValue := nativeValues[i] + iCondition := toIndexableCondition(condition, nativeValue) + // this is not a condition we can use as an index, skip it + if iCondition == nil { + continue + } + // the power set is built appending the subsets that result from + // adding each item to each of the previous subsets + ss := make([][]*indexableCondition, len(ps)) + for j := range ss { + ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1) + copy(ss[j], ps[j]) + ss[j] = append(ss[j], iCondition) + // as we add them to the power set, attempt to evaluate this + // subset of conditions as indexes + stop, err := intersectUUIDsFromConditionSet(ss[j]) + if stop || err != nil { + return err + } + } + ps = append(ps, ss...) + } + return nil + } + + // finally + err := matchUUIDsFromConditionsPowerSet() + return matching, err +} + +// RowsByCondition searches models in the cache that match all conditions +func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + results := make(map[string]model.Model) + schema := r.dbModel.Schema.Table(r.name) + + // no conditions matches all rows + if len(conditions) == 0 { + for uuid := range r.cache { + results[uuid] = r.rowByUUID(uuid) + } + return results, nil + } + + // one pass to obtain the native values + nativeValues := make([]interface{}, 0, len(conditions)) + for _, condition := range conditions { + tSchema := schema.Column(condition.Column) + nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value) + if err != nil { + return nil, err + } + nativeValues = append(nativeValues, nativeValue) + } + + // obtain all possible matches using conditions as indexes + matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues) + if err != nil { + return nil, err + } + + // From the matches obtained with indexes, which might have not used all + // conditions, continue trimming down the list explicitly evaluating the + // conditions. + for i, condition := range conditions { + matchingCondition := uuidset{} + + if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) { + uuid, ok := nativeValues[i].(string) + if !ok { + panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i])) + } + if _, found := r.cache[uuid]; found { + matchingCondition.add(uuid) + } + } else { + matchCondition := func(uuid string) error { + row := r.cache[uuid] + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + value, err := info.FieldByColumn(condition.Column) + if err != nil { + return err + } + ok, err := condition.Function.Evaluate(value, nativeValues[i]) + if err != nil { + return err + } + if ok { + matchingCondition.add(uuid) + } + return nil + } + if matching != nil { + // we just need to consider rows that matched previous + // conditions + for uuid := range matching { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } else { + // If this is the first condition we are able to check, just run + // it by whole table + for uuid := range r.cache { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } + } + if matching == nil { + matching = matchingCondition + } else { + matching = intersectUUIDSets(matching, matchingCondition) + } + if matching.empty() { + // no models match the conditions checked up to now, no need to + // check remaining conditions + break + } + } + + for uuid := range matching { + results[uuid] = r.rowByUUID(uuid) + } + + return results, nil +} + +// Len returns the length of the cache +func (r *RowCache) Len() int { + r.mutex.RLock() + defer r.mutex.RUnlock() + return len(r.cache) +} + +func (r *RowCache) Index(columns ...string) (map[interface{}][]string, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + spec := newIndexFromColumns(columns...) + index, ok := r.indexes[spec] + if !ok { + return nil, fmt.Errorf("%v is not an index", columns) + } + dbIndex := make(map[interface{}][]string, len(index)) + for k, v := range index { + dbIndex[k] = v.list() + } + return dbIndex, nil +} + +// EventHandler can handle events when the contents of the cache changes +type EventHandler interface { + OnAdd(table string, model model.Model) + OnUpdate(table string, old model.Model, new model.Model) + OnDelete(table string, model model.Model) +} + +// EventHandlerFuncs is a wrapper for the EventHandler interface +// It allows a caller to only implement the functions they need +type EventHandlerFuncs struct { + AddFunc func(table string, model model.Model) + UpdateFunc func(table string, old model.Model, new model.Model) + DeleteFunc func(table string, model model.Model) +} + +// OnAdd calls AddFunc if it is not nil +func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) { + if e.AddFunc != nil { + e.AddFunc(table, model) + } +} + +// OnUpdate calls UpdateFunc if it is not nil +func (e *EventHandlerFuncs) OnUpdate(table string, old, new model.Model) { + if e.UpdateFunc != nil { + e.UpdateFunc(table, old, new) + } +} + +// OnDelete calls DeleteFunc if it is not nil +func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) { + if e.DeleteFunc != nil { + e.DeleteFunc(table, row) + } +} + +// TableCache contains a collection of RowCaches, hashed by name, +// and an array of EventHandlers that respond to cache updates +// It implements the ovsdb.NotificationHandler interface so it may +// handle update notifications +type TableCache struct { + cache map[string]*RowCache + eventProcessor *eventProcessor + dbModel model.DatabaseModel + ovsdb.NotificationHandler + mutex sync.RWMutex + logger *logr.Logger +} + +// Data is the type for data that can be prepopulated in the cache +type Data map[string]map[string]model.Model + +// NewTableCache creates a new TableCache +func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated") + } + if logger == nil { + l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("cache") + logger = &l + } else { + l := logger.WithName("cache") + logger = &l + } + eventProcessor := newEventProcessor(bufferSize, logger) + cache := make(map[string]*RowCache) + tableTypes := dbModel.Types() + for name := range dbModel.Schema.Tables { + cache[name] = newRowCache(name, dbModel, tableTypes[name]) + } + for table, rowData := range data { + if _, ok := dbModel.Schema.Tables[table]; !ok { + return nil, fmt.Errorf("table %s is not in schema", table) + } + rowCache := cache[table] + for uuid, row := range rowData { + if err := rowCache.Create(uuid, row, true); err != nil { + return nil, err + } + } + } + return &TableCache{ + cache: cache, + eventProcessor: eventProcessor, + dbModel: dbModel, + mutex: sync.RWMutex{}, + logger: logger, + }, nil +} + +// Mapper returns the mapper +func (t *TableCache) Mapper() mapper.Mapper { + return t.dbModel.Mapper +} + +// DatabaseModel returns the DatabaseModelRequest +func (t *TableCache) DatabaseModel() model.DatabaseModel { + return t.dbModel +} + +// Table returns the a Table from the cache with a given name +func (t *TableCache) Table(name string) *RowCache { + t.mutex.RLock() + defer t.mutex.RUnlock() + if table, ok := t.cache[name]; ok { + return table + } + return nil +} + +// Tables returns a list of table names that are in the cache +func (t *TableCache) Tables() []string { + t.mutex.RLock() + defer t.mutex.RUnlock() + var result []string + for k := range t.cache { + result = append(result, k) + } + return result +} + +// Update implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update(context interface{}, tableUpdates ovsdb.TableUpdates) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate") + return err + } + return nil +} + +// Update2 implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update2(context interface{}, tableUpdates ovsdb.TableUpdates2) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate2(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate2") + return err + } + return nil +} + +// Locked implements the locked method of the NotificationHandler interface +func (t *TableCache) Locked([]interface{}) { +} + +// Stolen implements the stolen method of the NotificationHandler interface +func (t *TableCache) Stolen([]interface{}) { +} + +// Echo implements the echo method of the NotificationHandler interface +func (t *TableCache) Echo([]interface{}) { +} + +// Disconnected implements the disconnected method of the NotificationHandler interface +func (t *TableCache) Disconnected() { +} + +// Populate adds data to the cache and places an event on the channel +func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Populate2 adds data to the cache and places an event on the channel +func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error { + t.mutex.Lock() + defer t.mutex.Unlock() + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + if row.Initial == nil && row.Insert == nil && current == nil { + return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid)) + } + err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Purge drops all data in the cache and reinitializes it using the +// provided database model +func (t *TableCache) Purge(dbModel model.DatabaseModel) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.dbModel = dbModel + tableTypes := t.dbModel.Types() + for name := range t.dbModel.Schema.Tables { + t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name]) + } +} + +// AddEventHandler registers the supplied EventHandler to receive cache events +func (t *TableCache) AddEventHandler(handler EventHandler) { + t.eventProcessor.AddEventHandler(handler) +} + +// Run starts the event processing and update processing loops. +// It blocks until the stop channel is closed. +// Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection +func (t *TableCache) Run(stopCh <-chan struct{}) { + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + t.eventProcessor.Run(stopCh) + }() + wg.Wait() +} + +// newRowCache creates a new row cache with the provided data +// if the data is nil, and empty RowCache will be created +func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache { + schemaIndexes := dbModel.Schema.Table(name).Indexes + clientIndexes := dbModel.Client().Indexes(name) + + r := &RowCache{ + name: name, + dbModel: dbModel, + indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)), + dataType: dataType, + cache: make(map[string]model.Model), + mutex: sync.RWMutex{}, + } + + // respect the order of indexes, add first schema indexes, then client + // indexes + indexes := map[index]indexSpec{} + for _, columns := range schemaIndexes { + columnKeys := newColumnKeysFromColumns(columns...) + index := newIndexFromColumnKeys(columnKeys...) + spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + for _, clientIndex := range clientIndexes { + columnKeys := clientIndex.Columns + index := newIndexFromColumnKeys(columnKeys...) + // if this is already a DB index, ignore + if _, ok := indexes[index]; ok { + continue + } + spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + + r.indexes = r.newIndexes() + return r +} + +func (r *RowCache) newIndexes() columnToValue { + c := make(columnToValue) + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + c[index] = make(valueToUUIDs) + } + return c +} + +// event encapsulates a cache event +type event struct { + eventType string + table string + old model.Model + new model.Model +} + +// eventProcessor handles the queueing and processing of cache events +type eventProcessor struct { + events chan *event + // handlersMutex locks the handlers array when we add a handler or dispatch events + // we don't need a RWMutex in this case as we only have one thread reading and the write + // volume is very low (i.e only when AddEventHandler is called) + handlersMutex sync.Mutex + handlers []EventHandler + logger *logr.Logger +} + +func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor { + return &eventProcessor{ + events: make(chan *event, capacity), + handlers: []EventHandler{}, + logger: logger, + } +} + +// AddEventHandler registers the supplied EventHandler with the eventProcessor +// EventHandlers MUST process events quickly, for example, pushing them to a queue +// to be processed by the client. Long Running handler functions adversely affect +// other handlers and MAY cause loss of data if the channel buffer is full +func (e *eventProcessor) AddEventHandler(handler EventHandler) { + e.handlersMutex.Lock() + defer e.handlersMutex.Unlock() + e.handlers = append(e.handlers, handler) +} + +// AddEvent writes an event to the channel +func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, new model.Model) { + // We don't need to check for error here since there + // is only a single writer. RPC is run in blocking mode + event := event{ + eventType: eventType, + table: table, + old: old, + new: new, + } + select { + case e.events <- &event: + // noop + return + default: + e.logger.V(0).Info("dropping event because event buffer is full") + } +} + +// Run runs the eventProcessor loop. +// It will block until the stopCh has been closed +// Otherwise it will wait for events to arrive on the event channel +// Once received, it will dispatch the event to each registered handler +func (e *eventProcessor) Run(stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + case event := <-e.events: + e.handlersMutex.Lock() + for _, handler := range e.handlers { + switch event.eventType { + case addEvent: + handler.OnAdd(event.table, event.new) + case updateEvent: + handler.OnUpdate(event.table, event.old, event.new) + case deleteEvent: + handler.OnDelete(event.table, event.old) + } + } + e.handlersMutex.Unlock() + } + } +} + +type cacheUpdate interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error +} + +func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error { + tables := update.GetUpdatedTables() + for _, table := range tables { + tCache := t.cache[table] + err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error { + switch { + case old == nil && new != nil: + t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", new) + err := tCache.Create(uuid, new, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(addEvent, table, nil, new) + case old != nil && new != nil: + t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", new) + _, err := tCache.Update(uuid, new, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(updateEvent, table, old, new) + case new == nil: + t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old) + err := tCache.Delete(uuid) + if err != nil { + return err + } + t.eventProcessor.AddEvent(deleteEvent, table, old, nil) + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (interface{}, error) { + if len(columnKeys) > 1 { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + for _, columnKey := range columnKeys { + val, err := valueFromColumnKey(info, columnKey) + if err != nil { + return "", err + } + // if object is nil dont try to encode it + value := reflect.ValueOf(val) + if value.Kind() == reflect.Invalid { + continue + } + // if object is a nil pointer dont try to encode it + if value.Kind() == reflect.Pointer && value.IsNil() { + continue + } + err = enc.Encode(val) + if err != nil { + return "", err + } + } + h := sha256.New() + val := hex.EncodeToString(h.Sum(buf.Bytes())) + return val, nil + } + val, err := valueFromColumnKey(info, columnKeys[0]) + if err != nil { + return "", err + } + return val, err +} + +func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (interface{}, error) { + val, err := info.FieldByColumn(columnKey.Column) + if err != nil { + return nil, err + } + if columnKey.Key != nil { + val, err = valueFromMap(val, columnKey.Key) + if err != nil { + return "", fmt.Errorf("can't get key value from map: %v", err) + } + } + // if the value is a non-nil pointer of an optional, dereference + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr && !v.IsNil() { + val = v.Elem().Interface() + } + return val, err +} + +func valueFromMap(aMap interface{}, key interface{}) (interface{}, error) { + m := reflect.ValueOf(aMap) + if m.Kind() != reflect.Map { + return nil, fmt.Errorf("expected map but got %s", m.Kind()) + } + v := m.MapIndex(reflect.ValueOf(key)) + if !v.IsValid() { + // return the zero value for the map value type + return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil + } + + return v.Interface(), nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/vendor/github.com/ovn-org/libovsdb/cache/doc.go new file mode 100644 index 000000000..3b176f277 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/doc.go @@ -0,0 +1,16 @@ +/* +Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server. + +The cache can be accessed using a simple API: + + cache.Table("Open_vSwitch").Row("") + +It implements the ovsdb.NotificationHandler interface +such that it can be populated automatically by +update notifications + +It also contains an eventProcessor where callers +may registers functions that will get called on +every Add/Update/Delete event. +*/ +package cache diff --git a/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go new file mode 100644 index 000000000..f7c139737 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go @@ -0,0 +1,101 @@ +package cache + +type void struct{} +type uuidset map[string]void + +func newUUIDSet(uuids ...string) uuidset { + s := uuidset{} + for _, uuid := range uuids { + s[uuid] = void{} + } + return s +} + +func (s uuidset) add(uuid string) { + s[uuid] = void{} +} + +func (s uuidset) remove(uuid string) { + delete(s, uuid) +} + +func (s uuidset) has(uuid string) bool { + _, ok := s[uuid] + return ok +} + +func (s uuidset) equals(o uuidset) bool { + if len(s) != len(o) { + return false + } + for uuid := range s { + if !o.has(uuid) { + return false + } + } + return true +} + +func (s uuidset) getAny() string { + for k := range s { + return k + } + return "" +} + +func (s uuidset) list() []string { + uuids := make([]string, 0, len(s)) + for uuid := range s { + uuids = append(uuids, uuid) + } + return uuids +} + +func (s uuidset) empty() bool { + return len(s) == 0 +} + +func addUUIDSet(s1, s2 uuidset) uuidset { + if len(s2) == 0 { + return s1 + } + if s1 == nil { + s1 = uuidset{} + } + for uuid := range s2 { + s1.add(uuid) + } + return s1 +} + +func substractUUIDSet(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return s1 + } + for uuid := range s2 { + s1.remove(uuid) + } + return s1 +} + +func intersectUUIDSets(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return nil + } + var big uuidset + var small uuidset + if len(s1) > len(s2) { + big = s1 + small = s2 + } else { + big = s2 + small = s1 + } + f := uuidset{} + for uuid := range small { + if big.has(uuid) { + f.add(uuid) + } + } + return f +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/api.go b/vendor/github.com/ovn-org/libovsdb/client/api.go new file mode 100644 index 000000000..497758944 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/api.go @@ -0,0 +1,593 @@ +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/go-logr/logr" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// API defines basic operations to interact with the database +type API interface { + // List populates a slice of Models objects based on their type + // The function parameter must be a pointer to a slice of Models + // Models can be structs or pointers to structs + // If the slice is null, the entire cache will be copied into the slice + // If it has a capacity != 0, only 'capacity' elements will be filled in + List(ctx context.Context, result interface{}) error + + // Create a Conditional API from a Function that is used to filter cached data + // The function must accept a Model implementation and return a boolean. E.g: + // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled }) + WhereCache(predicate interface{}) ConditionalAPI + + // Create a ConditionalAPI from a Model's index data, where operations + // apply to elements that match the values provided in one or more + // model.Models according to the indexes. All provided Models must be + // the same type or an error will be generated when operations are + // are performed on the ConditionalAPI. + Where(...model.Model) ConditionalAPI + + // WhereAny creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match any (eg, logical OR) of the + // conditions. + WhereAny(model.Model, ...model.Condition) ConditionalAPI + + // WhereAll creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match all (eg, logical AND) of the + // conditions. + WhereAll(model.Model, ...model.Condition) ConditionalAPI + + // Get retrieves a model from the cache + // The way the object will be fetch depends on the data contained in the + // provided model and the indexes defined in the associated schema + // For more complex ways of searching for elements in the cache, the + // preferred way is Where({condition}).List() + Get(context.Context, model.Model) error + + // Create returns the operation needed to add the model(s) to the Database + // Only fields with non-default values will be added to the transaction. If + // the field associated with column "_uuid" has some content other than a + // UUID, it will be treated as named-uuid + Create(...model.Model) ([]ovsdb.Operation, error) +} + +// ConditionalAPI is an interface used to perform operations that require / use Conditions +type ConditionalAPI interface { + // List uses the condition to search on the cache and populates + // the slice of Models objects based on their type + List(ctx context.Context, result interface{}) error + + // Mutate returns the operations needed to perform the mutation specified + // By the model and the list of Mutation objects + // Depending on the Condition, it might return one or many operations + Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error) + + // Update returns the operations needed to update any number of rows according + // to the data in the given model. + // By default, all the non-default values contained in model will be updated. + // Optional fields can be passed (pointer to fields in the model) to select the + // the fields to be updated + Update(model.Model, ...interface{}) ([]ovsdb.Operation, error) + + // Delete returns the Operations needed to delete the models selected via the condition + Delete() ([]ovsdb.Operation, error) + + // Wait returns the operations needed to perform the wait specified + // by the until condition, timeout, row and columns based on provided parameters. + Wait(ovsdb.WaitCondition, *int, model.Model, ...interface{}) ([]ovsdb.Operation, error) +} + +// ErrWrongType is used to report the user provided parameter has the wrong type +type ErrWrongType struct { + inputType reflect.Type + reason string +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason) +} + +// ErrNotFound is used to inform the object or table was not found in the cache +var ErrNotFound = errors.New("object not found") + +// api struct implements both API and ConditionalAPI +// Where() can be used to create a ConditionalAPI api +type api struct { + cache *cache.TableCache + cond Conditional + logger *logr.Logger +} + +// List populates a slice of Models given as parameter based on the configured Condition +func (a api) List(ctx context.Context, result interface{}) error { + resultPtr := reflect.ValueOf(result) + if resultPtr.Type().Kind() != reflect.Ptr { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + resultVal := reflect.Indirect(resultPtr) + if resultVal.Type().Kind() != reflect.Slice { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + // List accepts a slice of Models that can be either structs or pointer to + // structs + var appendValue func(reflect.Value) + var m model.Model + if resultVal.Type().Elem().Kind() == reflect.Ptr { + m = reflect.New(resultVal.Type().Elem().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, v)) + } + } else { + m = reflect.New(resultVal.Type().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v))) + } + } + + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + if a.cond != nil && a.cond.Table() != table { + return &ErrWrongType{resultPtr.Type(), + fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())} + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + var rows map[string]model.Model + if a.cond != nil { + rows, err = a.cond.Matches() + if err != nil { + return err + } + } else { + rows = tableCache.Rows() + } + // If given a null slice, fill it in the cache table completely, if not, just up to + // its capability. + if resultVal.IsNil() || resultVal.Cap() == 0 { + resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows))) + } + i := resultVal.Len() + maxCap := resultVal.Cap() + + for _, row := range rows { + if i >= maxCap { + break + } + appendValue(reflect.ValueOf(row)) + i++ + } + + return nil +} + +// Where returns a conditionalAPI based on model indexes. All provided models +// must be the same type. +func (a api) Where(models ...model.Model) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger) +} + +// WhereAny returns a conditionalAPI based on a Condition list that matches any +// of the conditions individually +func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger) +} + +// WhereAll returns a conditionalAPI based on a Condition list that matches all +// of the conditions together +func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger) +} + +// WhereCache returns a conditionalAPI based a Predicate +func (a api) WhereCache(predicate interface{}) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger) +} + +// Conditional interface implementation +// FromFunc returns a Condition from a function +func (a api) conditionFromFunc(predicate interface{}) Conditional { + table, err := a.getTableFromFunc(predicate) + if err != nil { + return newErrorConditional(err) + } + + condition, err := newPredicateConditional(table, a.cache, predicate) + if err != nil { + return newErrorConditional(err) + } + return condition +} + +// conditionFromModels returns a Conditional from one or more models. +func (a api) conditionFromModels(models []model.Model) Conditional { + if len(models) == 0 { + return newErrorConditional(fmt.Errorf("at least one model required")) + } + tableName, err := a.getTableFromModel(models[0]) + if tableName == "" { + return newErrorConditional(err) + } + conditional, err := newEqualityConditional(tableName, a.cache, models) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// conditionFromExplicitConditions returns a Conditional from a model and a set +// of explicit conditions. If matchAll is true, then models that match all the given +// conditions are selected by the Conditional. If matchAll is false, then any model +// that matches one of the conditions is selected. +func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional { + if len(cond) == 0 { + return newErrorConditional(fmt.Errorf("at least one condition is required")) + } + tableName, err := a.getTableFromModel(m) + if tableName == "" { + return newErrorConditional(err) + } + conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// Get is a generic Get function capable of returning (through a provided pointer) +// a instance of any row in the cache. +// 'result' must be a pointer to an Model that exists in the ClientDBModel +// +// The way the cache is searched depends on the fields already populated in 'result' +// Any table index (including _uuid) will be used for comparison +func (a api) Get(ctx context.Context, m model.Model) error { + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + _, found, err := tableCache.RowByModel(m) + if err != nil { + return err + } else if found == nil { + return ErrNotFound + } + + model.CloneInto(found, m) + + return nil +} + +// Create is a generic function capable of creating any row in the DB +// A valid Model (pointer to object) must be provided. +func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + + for _, model := range models { + var realUUID, namedUUID string + var err error + + tableName, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + // Read _uuid field, and use it as named-uuid + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + if uuid, err := info.FieldByColumn("_uuid"); err == nil { + tmpUUID := uuid.(string) + if ovsdb.IsNamedUUID(tmpUUID) { + namedUUID = tmpUUID + } else if ovsdb.IsValidUUID(tmpUUID) { + realUUID = tmpUUID + } + } else { + return nil, err + } + + row, err := a.cache.Mapper().NewRow(info) + if err != nil { + return nil, err + } + // UUID is given in the operation, not the object + delete(row, "_uuid") + + operations = append(operations, ovsdb.Operation{ + Op: ovsdb.OperationInsert, + Table: tableName, + Row: row, + UUID: realUUID, + UUIDName: namedUUID, + }) + } + return operations, nil +} + +// Mutate returns the operations needed to transform the one Model into another one +func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) { + var mutations []ovsdb.Mutation + var operations []ovsdb.Operation + + if len(mutationObjs) < 1 { + return nil, fmt.Errorf("at least one Mutation must be provided") + } + + tableName := a.cache.DatabaseModel().FindTable(reflect.ValueOf(model).Type()) + if tableName == "" { + return nil, fmt.Errorf("table not found for object") + } + table := a.cache.Mapper().Schema.Table(tableName) + if table == nil { + return nil, fmt.Errorf("schema error: table not found in Database Model for type %s", reflect.TypeOf(model)) + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + for _, mobj := range mutationObjs { + col, err := info.ColumnByPtr(mobj.Field) + if err != nil { + return nil, err + } + + mutation, err := a.cache.Mapper().NewMutation(info, col, mobj.Mutator, mobj.Value) + if err != nil { + return nil, err + } + mutations = append(mutations, *mutation) + } + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: tableName, + Mutations: mutations, + Where: condition, + }, + ) + } + + return operations, nil +} + +// Update is a generic function capable of updating any mutable field in any row in the database +// Additional fields can be passed (variadic opts) to indicate fields to be updated +// All immutable fields will be ignored +func (a api) Update(model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + table, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + tableSchema := a.cache.Mapper().Schema.Table(table) + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + if len(fields) > 0 { + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + if !tableSchema.Columns[colName].Mutable() { + return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, table) + } + } + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + + for colName, column := range tableSchema.Columns { + if !column.Mutable() { + a.logger.V(2).Info("removing immutable field", "name", colName) + delete(row, colName) + } + } + delete(row, "_uuid") + + if len(row) == 0 { + return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable") + } + + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: table, + Row: row, + Where: condition, + }, + ) + } + return operations, nil +} + +// Delete returns the Operation needed to delete the selected models from the database +func (a api) Delete() ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationDelete, + Table: a.cond.Table(), + Where: condition, + }, + ) + } + + return operations, nil +} + +func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + + /* + Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6 + + lb := &nbdb.LoadBalancer{} + condition := model.Condition{ + Field: &lb.Name, + Function: ovsdb.ConditionEqual, + Value: "lbName", + } + timeout0 := 0 + client.Where(lb, condition).Wait( + ovsdb.WaitConditionNotEqual, // Until + &timeout0, // Timeout + &lb, // Row (and Table) + &lb.Name, // Cols (aka fields) + ) + */ + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + table, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + var columnNames []string + if len(fields) > 0 { + columnNames = make([]string, 0, len(fields)) + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + columnNames = append(columnNames, colName) + } + } + + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + rows := []ovsdb.Row{row} + + for _, condition := range conditions { + operation := ovsdb.Operation{ + Op: ovsdb.OperationWait, + Table: table, + Where: condition, + Until: string(untilConFun), + Columns: columnNames, + Rows: rows, + } + + if timeout != nil { + operation.Timeout = timeout + } + + operations = append(operations, operation) + } + + return operations, nil +} + +// getTableFromModel returns the table name from a Model object after performing +// type verifications on the model +func (a api) getTableFromModel(m interface{}) (string, error) { + if _, ok := m.(model.Model); !ok { + return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"} + } + table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m)) + if table == "" { + return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"} + } + return table, nil +} + +// getTableFromModel returns the table name from a the predicate after performing +// type verifications +func (a api) getTableFromFunc(predicate interface{}) (string, error) { + predType := reflect.TypeOf(predicate) + if predType == nil || predType.Kind() != reflect.Func { + return "", &ErrWrongType{predType, "Expected function"} + } + if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool { + return "", &ErrWrongType{predType, "Expected func(Model) bool"} + } + + modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem() + modelType := predType.In(0) + if !modelType.Implements(modelInterface) { + return "", &ErrWrongType{predType, + fmt.Sprintf("Type %s does not implement Model interface", modelType.String())} + } + + table := a.cache.DatabaseModel().FindTable(modelType) + if table == "" { + return "", &ErrWrongType{predType, + fmt.Sprintf("Model %s not found in Database Model", modelType.String())} + } + return table, nil +} + +// newAPI returns a new API to interact with the database +func newAPI(cache *cache.TableCache, logger *logr.Logger) API { + return api{ + cache: cache, + logger: logger, + } +} + +// newConditionalAPI returns a new ConditionalAPI to interact with the database +func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger) ConditionalAPI { + return api{ + cache: cache, + cond: cond, + logger: logger, + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go new file mode 100644 index 000000000..36ea476e0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go @@ -0,0 +1,167 @@ +package client + +import ( + "encoding/json" + "testing" + + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/stretchr/testify/assert" +) + +var apiTestSchema = []byte(`{ + "name": "OVN_Northbound", + "version": "5.31.0", + "cksum": "2352750632 28701", + "tables": { + "Logical_Switch": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "acls": {"type": {"key": {"type": "uuid", + "refTable": "ACL", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "qos_rules": {"type": {"key": {"type": "uuid", + "refTable": "QoS", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "load_balancer": {"type": {"key": {"type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "dns_records": {"type": {"key": {"type": "uuid", + "refTable": "DNS", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "forwarding_groups": { + "type": {"key": {"type": "uuid", + "refTable": "Forwarding_Group", + "refType": "strong"}, + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Logical_Switch_Port": { + "columns": { + "name": {"type": "string"}, + "type": {"type": "string"}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, + "tag_request": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "tag": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "addresses": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "dynamic_addresses": {"type": {"key": "string", + "min": 0, + "max": 1}}, + "port_security": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "dhcpv4_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "dhcpv6_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "ha_chassis_group": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong"}, + "min": 0, + "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false} + } + }`) + +type testLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` + Ports []string `ovsdb:"ports"` + ExternalIds map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + QosRules []string `ovsdb:"qos_rules"` + LoadBalancer []string `ovsdb:"load_balancer"` + DNSRecords []string `ovsdb:"dns_records"` + OtherConfig map[string]string `ovsdb:"other_config"` + ForwardingGroups []string `ovsdb:"forwarding_groups"` + Acls []string `ovsdb:"acls"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitch) Table() string { + return "Logical_Switch" +} + +//LogicalSwitchPort struct defines an object in Logical_Switch_Port table +type testLogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Up *bool `ovsdb:"up"` + Dhcpv4Options *string `ovsdb:"dhcpv4_options"` + Name string `ovsdb:"name"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + Options map[string]string `ovsdb:"options"` + Enabled *bool `ovsdb:"enabled"` + Addresses []string `ovsdb:"addresses"` + Dhcpv6Options *string `ovsdb:"dhcpv6_options"` + TagRequest *int `ovsdb:"tag_request"` + Tag *int `ovsdb:"tag"` + PortSecurity []string `ovsdb:"port_security"` + ExternalIds map[string]string `ovsdb:"external_ids"` + Type string `ovsdb:"type"` + ParentName *string `ovsdb:"parent_name"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitchPort) Table() string { + return "Logical_Switch_Port" +} + +func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache { + var schema ovsdb.DatabaseSchema + err := json.Unmarshal(apiTestSchema, &schema) + assert.Nil(t, err) + db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{"Logical_Switch": &testLogicalSwitch{}, "Logical_Switch_Port": &testLogicalSwitchPort{}}) + assert.Nil(t, err) + dbModel, errs := model.NewDatabaseModel(schema, db) + assert.Empty(t, errs) + cache, err := cache.NewTableCache(dbModel, data, nil) + assert.Nil(t, err) + return cache +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/client.go b/vendor/github.com/ovn-org/libovsdb/client/client.go new file mode 100644 index 000000000..10ea757ec --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/client.go @@ -0,0 +1,1480 @@ +package client + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "log" + "net" + "net/url" + "os" + "reflect" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/rpc2" + "github.com/cenkalti/rpc2/jsonrpc" + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/libovsdb/ovsdb/serverdb" +) + +// Constants defined for libovsdb +const ( + SSL = "ssl" + TCP = "tcp" + UNIX = "unix" +) + +const serverDB = "_Server" + +// ErrNotConnected is an error returned when the client is not connected +var ErrNotConnected = errors.New("not connected") + +// ErrAlreadyConnected is an error returned when the client is already connected +var ErrAlreadyConnected = errors.New("already connected") + +// ErrUnsupportedRPC is an error returned when an unsupported RPC method is called +var ErrUnsupportedRPC = errors.New("unsupported rpc") + +// Client represents an OVSDB Client Connection +// It provides all the necessary functionality to Connect to a server, +// perform transactions, and build your own replica of the database with +// Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB +// update notifications. +type Client interface { + Connect(context.Context) error + Disconnect() + Close() + Schema() ovsdb.DatabaseSchema + Cache() *cache.TableCache + UpdateEndpoints([]string) + SetOption(Option) error + Connected() bool + DisconnectNotify() chan struct{} + Echo(context.Context) error + Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error) + Monitor(context.Context, *Monitor) (MonitorCookie, error) + MonitorAll(context.Context) (MonitorCookie, error) + MonitorCancel(ctx context.Context, cookie MonitorCookie) error + NewMonitor(...MonitorOption) *Monitor + CurrentEndpoint() string + API +} + +type bufferedUpdate struct { + updates *ovsdb.TableUpdates + updates2 *ovsdb.TableUpdates2 + lastTxnID string +} + +type epInfo struct { + address string + serverID string +} + +// ovsdbClient is an OVSDB client +type ovsdbClient struct { + options *options + metrics metrics + connected bool + rpcClient *rpc2.Client + rpcMutex sync.RWMutex + // endpoints contains all possible endpoints; the first element is + // the active endpoint if connected=true + endpoints []*epInfo + + // The name of the "primary" database - that is to say, the DB + // that the user expects to interact with. + primaryDBName string + databases map[string]*database + + errorCh chan error + stopCh chan struct{} + disconnect chan struct{} + shutdown bool + shutdownMutex sync.Mutex + + handlerShutdown *sync.WaitGroup + + trafficSeen chan struct{} + + logger *logr.Logger +} + +// database is everything needed to map between go types and an ovsdb Database +type database struct { + // model encapsulates the database schema and model of the database we're connecting to + model model.DatabaseModel + // modelMutex protects model from being replaced (via reconnect) while in use + modelMutex sync.RWMutex + + // cache is used to store the updates for monitored tables + cache *cache.TableCache + // cacheMutex protects cache from being replaced (via reconnect) while in use + cacheMutex sync.RWMutex + + api API + + // any ongoing monitors, so we can re-create them if we disconnect + monitors map[string]*Monitor + monitorsMutex sync.Mutex + + // tracks any outstanding updates while waiting for a monitor response + deferUpdates bool + deferredUpdates []*bufferedUpdate +} + +// NewOVSDBClient creates a new OVSDB Client with the provided +// database model. The client can be configured using one or more Option(s), +// like WithTLSConfig. If no WithEndpoint option is supplied, the default of +// unix:/var/run/openvswitch/ovsdb.sock is used +func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) { + return newOVSDBClient(clientDBModel, opts...) +} + +// newOVSDBClient creates a new ovsdbClient +func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) { + ovs := &ovsdbClient{ + primaryDBName: clientDBModel.Name(), + databases: map[string]*database{ + clientDBModel.Name(): { + model: model.NewPartialDatabaseModel(clientDBModel), + monitors: make(map[string]*Monitor), + deferUpdates: true, + deferredUpdates: make([]*bufferedUpdate, 0), + }, + }, + errorCh: make(chan error), + handlerShutdown: &sync.WaitGroup{}, + disconnect: make(chan struct{}), + } + var err error + ovs.options, err = newOptions(opts...) + if err != nil { + return nil, err + } + for _, address := range ovs.options.endpoints { + ovs.endpoints = append(ovs.endpoints, &epInfo{address: address}) + } + + if ovs.options.logger == nil { + // create a new logger to log to stdout + l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("libovsdb").WithValues( + "database", ovs.primaryDBName, + ) + stdr.SetVerbosity(5) + ovs.logger = &l + } else { + // add the "database" value to the structured logger + // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb) + l := ovs.options.logger.WithValues( + "database", ovs.primaryDBName, + ) + ovs.logger = &l + } + ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem) + ovs.registerMetrics() + + // if we should only connect to the leader, then add the special "_Server" database as well + if ovs.options.leaderOnly { + sm, err := serverdb.FullDatabaseModel() + if err != nil { + return nil, fmt.Errorf("could not initialize model _Server: %w", err) + } + ovs.databases[serverDB] = &database{ + model: model.NewPartialDatabaseModel(sm), + monitors: make(map[string]*Monitor), + } + } + + return ovs, nil +} + +// Connect opens a connection to an OVSDB Server using the +// endpoint provided when the Client was created. +// The connection can be configured using one or more Option(s), like WithTLSConfig +// If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used +func (o *ovsdbClient) Connect(ctx context.Context) error { + if err := o.connect(ctx, false); err != nil { + if err == ErrAlreadyConnected { + return nil + } + return err + } + if o.options.leaderOnly { + if err := o.watchForLeaderChange(); err != nil { + return err + } + } + return nil +} + +// moveEndpointFirst makes the endpoint requested by active the first element +// in the endpoints slice, indicating it is the active endpoint +func (o *ovsdbClient) moveEndpointFirst(i int) { + firstEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append([]*epInfo{firstEp}, othereps...) +} + +// moveEndpointLast moves the requested endpoint to the end of the list +func (o *ovsdbClient) moveEndpointLast(i int) { + lastEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append(othereps, lastEp) +} + +func (o *ovsdbClient) resetRPCClient() { + if o.rpcClient != nil { + o.rpcClient.Close() + o.rpcClient = nil + } +} + +func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient != nil { + return ErrAlreadyConnected + } + + connected := false + connectErrors := []error{} + for i, endpoint := range o.endpoints { + u, err := url.Parse(endpoint.address) + if err != nil { + return err + } + if sid, err := o.tryEndpoint(ctx, u); err != nil { + o.resetRPCClient() + connectErrors = append(connectErrors, + fmt.Errorf("failed to connect to %s: %w", endpoint.address, err)) + continue + } else { + o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid) + endpoint.serverID = sid + o.moveEndpointFirst(i) + connected = true + break + } + } + + if !connected { + if len(connectErrors) == 1 { + return connectErrors[0] + } + var combined []string + for _, e := range connectErrors { + combined = append(combined, e.Error()) + } + + return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". ")) + } + + // if we're reconnecting, re-start all the monitors + if reconnect { + o.logger.V(3).Info("reconnected - restarting monitors") + for dbName, db := range o.databases { + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + + // Purge entire cache if no monitors exist to update dynamically + if len(db.monitors) == 0 { + db.cache.Purge(db.model) + continue + } + + // Restart all monitors; each monitor will handle purging + // the cache if necessary + for id, request := range db.monitors { + err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request) + if err != nil { + o.resetRPCClient() + return err + } + } + } + } + + go o.handleDisconnectNotification() + if o.options.inactivityTimeout > 0 { + o.handlerShutdown.Add(1) + go o.handleInactivityProbes() + } + for _, db := range o.databases { + o.handlerShutdown.Add(1) + eventStopChan := make(chan struct{}) + go o.handleClientErrors(eventStopChan) + o.handlerShutdown.Add(1) + go func(db *database) { + defer o.handlerShutdown.Done() + db.cache.Run(o.stopCh) + close(eventStopChan) + }(db) + } + + o.connected = true + return nil +} + +// tryEndpoint connects to a single database endpoint. Returns the +// server ID (if clustered) on success, or an error. +func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) { + o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u)) + var dialer net.Dialer + var err error + var c net.Conn + + switch u.Scheme { + case UNIX: + c, err = dialer.DialContext(ctx, u.Scheme, u.Path) + case TCP: + c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque) + case SSL: + dialer := tls.Dialer{ + Config: o.options.tlsConfig, + } + c, err = dialer.DialContext(ctx, "tcp", u.Opaque) + default: + err = fmt.Errorf("unknown network protocol %s", u.Scheme) + } + if err != nil { + return "", fmt.Errorf("failed to open connection: %w", err) + } + + o.createRPC2Client(c) + + serverDBNames, err := o.listDbs(ctx) + if err != nil { + return "", err + } + + // for every requested database, ensure the DB exists in the server and + // that the schema matches what we expect. + for dbName, db := range o.databases { + // check the server has what we want + found := false + for _, name := range serverDBNames { + if name == dbName { + found = true + break + } + } + if !found { + return "", fmt.Errorf("target database %s not found", dbName) + } + + // load and validate the schema + schema, err := o.getSchema(ctx, dbName) + if err != nil { + return "", err + } + + db.modelMutex.Lock() + var errors []error + db.model, errors = model.NewDatabaseModel(schema, db.model.Client()) + db.modelMutex.Unlock() + if len(errors) > 0 { + var combined []string + for _, err := range errors { + combined = append(combined, err.Error()) + } + return "", fmt.Errorf("database %s validation error (%d): %s", + dbName, len(errors), strings.Join(combined, ". ")) + } + + db.cacheMutex.Lock() + if db.cache == nil { + db.cache, err = cache.NewTableCache(db.model, nil, o.logger) + if err != nil { + db.cacheMutex.Unlock() + return "", err + } + db.api = newAPI(db.cache, o.logger) + } + db.cacheMutex.Unlock() + } + + // check that this is the leader + var sid string + if o.options.leaderOnly { + var leader bool + leader, sid, err = o.isEndpointLeader(ctx) + if err != nil { + return "", err + } + if !leader { + return "", fmt.Errorf("endpoint is not leader") + } + } + return sid, nil +} + +// createRPC2Client creates an rpcClient using the provided connection +// It is also responsible for setting up go routines for client-side event handling +// Should only be called when the mutex is held +func (o *ovsdbClient) createRPC2Client(conn net.Conn) { + o.stopCh = make(chan struct{}) + if o.options.inactivityTimeout > 0 { + o.trafficSeen = make(chan struct{}) + } + o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn)) + o.rpcClient.SetBlocking(true) + o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []interface{}, reply *[]interface{}) error { + return o.echo(args, reply) + }) + o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update(args, reply) + }) + o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update2(args, reply) + }) + o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update3(args, reply) + }) + go o.rpcClient.Run() +} + +// isEndpointLeader returns true if the currently connected endpoint is leader, +// otherwise false or an error. If the currently connected endpoint is the leader +// and the database is clustered, also returns the database's Server ID. +// Assumes rpcMutex is held. +func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) { + op := ovsdb.Operation{ + Op: ovsdb.OperationSelect, + Table: "Database", + Columns: []string{"name", "model", "leader", "sid"}, + } + results, err := o.transact(ctx, serverDB, true, op) + if err != nil { + return false, "", fmt.Errorf("could not check if server was leader: %w", err) + } + // for now, if no rows are returned, just accept this server + if len(results) != 1 { + return true, "", nil + } + result := results[0] + if len(result.Rows) == 0 { + return true, "", nil + } + + for _, row := range result.Rows { + dbName, ok := row["name"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse name") + } + if dbName != o.primaryDBName { + continue + } + + model, ok := row["model"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse model") + } + + // the database reports whether or not it is part of a cluster via the + // "model" column. If it's not clustered, it is by definition leader. + if model != serverdb.DatabaseModelClustered { + return true, "", nil + } + + // Clustered database must have a Server ID + sid, ok := row["sid"].(ovsdb.UUID) + if !ok { + return false, "", fmt.Errorf("could not parse server id") + } + + leader, ok := row["leader"].(bool) + if !ok { + return false, "", fmt.Errorf("could not parse leader") + } + + return leader, sid.GoUUID, nil + } + + // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed) + // for now, just continue + o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName) + return true, "", nil +} + +func (o *ovsdbClient) primaryDB() *database { + return o.databases[o.primaryDBName] +} + +// Schema returns the DatabaseSchema that is being used by the client +// it will be nil until a connection has been established +func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema { + db := o.primaryDB() + db.modelMutex.RLock() + defer db.modelMutex.RUnlock() + return db.model.Schema +} + +// Cache returns the TableCache that is populated from +// ovsdb update notifications. It will be nil until a connection +// has been established, and empty unless you call Monitor +func (o *ovsdbClient) Cache() *cache.TableCache { + db := o.primaryDB() + db.cacheMutex.RLock() + defer db.cacheMutex.RUnlock() + return db.cache +} + +// UpdateEndpoints sets client endpoints +// It is intended to be called at runtime +func (o *ovsdbClient) UpdateEndpoints(endpoints []string) { + o.logger.V(3).Info("update endpoints", "endpoints", endpoints) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if len(endpoints) == 0 { + endpoints = []string{defaultUnixEndpoint} + } + o.options.endpoints = endpoints + originEps := o.endpoints[:] + var newEps []*epInfo + activeIdx := -1 + for i, address := range o.options.endpoints { + var serverID string + for j, origin := range originEps { + if address == origin.address { + if j == 0 { + activeIdx = i + } + serverID = origin.serverID + break + } + } + newEps = append(newEps, &epInfo{address: address, serverID: serverID}) + } + o.endpoints = newEps + if activeIdx > 0 { + o.moveEndpointFirst(activeIdx) + } else if activeIdx == -1 { + o._disconnect() + } +} + +// SetOption sets a new value for an option. +// It may only be called when the client is not connected +func (o *ovsdbClient) SetOption(opt Option) error { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient != nil { + return fmt.Errorf("cannot set option when client is connected") + } + return opt(o.options) +} + +// Connected returns whether or not the client is currently connected to the server +func (o *ovsdbClient) Connected() bool { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + return o.connected +} + +func (o *ovsdbClient) CurrentEndpoint() string { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return "" + } + return o.endpoints[0].address +} + +// DisconnectNotify returns a channel which will notify the caller when the +// server has disconnected +func (o *ovsdbClient) DisconnectNotify() chan struct{} { + return o.disconnect +} + +// RFC 7047 : Section 4.1.6 : Echo +func (o *ovsdbClient) echo(args []interface{}, reply *[]interface{}) error { + *reply = args + return nil +} + +// RFC 7047 : Update Notification Section 4.1.6 +// params is an array of length 2: [json-value, table-updates] +// - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie" +// - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris +func (o *ovsdbClient) update(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 2 { + return fmt.Errorf("update requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc() + for tableName := range updates { + o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc() + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update(cookie.ID, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update2 handling from ovsdb-server.7 +func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 2 { + return fmt.Errorf("update2 requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update3 handling from ovsdb-server.7 +func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 3 { + return fmt.Errorf("update requires exactly 3 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var lastTransactionID string + err = json.Unmarshal(params[1], &lastTransactionID) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[2], &updates) + if err != nil { + return err + } + + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err == nil { + db.monitorsMutex.Lock() + mon := db.monitors[cookie.ID] + mon.LastTransactionID = lastTransactionID + db.monitorsMutex.Unlock() + } + + return err +} + +// getSchema returns the schema in use for the provided database name +// RFC 7047 : get_schema +// Should only be called when mutex is held +func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) { + args := ovsdb.NewGetSchemaArgs(dbName) + var reply ovsdb.DatabaseSchema + err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ovsdb.DatabaseSchema{}, ErrNotConnected + } + return ovsdb.DatabaseSchema{}, err + } + return reply, err +} + +// listDbs returns the list of databases on the server +// RFC 7047 : list_dbs +// Should only be called when mutex is held +func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) { + var dbs []string + err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, fmt.Errorf("listdbs failure - %v", err) + } + return dbs, err +} + +// logFromContext returns a Logger from ctx or return the default logger +func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger { + if logger, err := logr.FromContext(ctx); err == nil { + return &logger + } + return o.logger +} + +// Transact performs the provided Operations on the database +// RFC 7047 : transact +func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + o.rpcMutex.RLock() + if o.rpcClient == nil || !o.connected { + o.rpcMutex.RUnlock() + if o.options.reconnect { + logger.V(5).Info("blocking transaction until reconnected", "operations", + fmt.Sprintf("%+v", operation)) + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + ReconnectWaitLoop: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err()) + case <-ticker.C: + o.rpcMutex.RLock() + if o.rpcClient != nil && o.connected { + break ReconnectWaitLoop + } + o.rpcMutex.RUnlock() + } + } + } else { + return nil, ErrNotConnected + } + } + defer o.rpcMutex.RUnlock() + return o.transact(ctx, o.primaryDBName, false, operation...) +} + +func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + var reply []ovsdb.OperationResult + db := o.databases[dbName] + db.modelMutex.RLock() + schema := o.databases[dbName].model.Schema + db.modelMutex.RUnlock() + if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) { + return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName) + } + if ok := schema.ValidateOperations(operation...); !ok { + return nil, fmt.Errorf("validation failed for the operation") + } + + args := ovsdb.NewTransactArgs(dbName, operation...) + if o.rpcClient == nil { + return nil, ErrNotConnected + } + dbgLogger := logger.WithValues("database", dbName).V(4) + if dbgLogger.Enabled() { + dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation)) + } + err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, err + } + + if !skipChWrite && o.trafficSeen != nil { + o.trafficSeen <- struct{}{} + } + return reply, nil +} + +// MonitorAll is a convenience method to monitor every table/column +func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) { + m := newMonitor() + for name := range o.primaryDB().model.Types() { + m.Tables = append(m.Tables, TableMonitor{Table: name}) + } + return o.Monitor(ctx, m) +} + +// MonitorCancel will request cancel a previously issued monitor request +// RFC 7047 : monitor_cancel +func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error { + var reply ovsdb.OperationResult + args := ovsdb.NewMonitorCancelArgs(cookie) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + return err + } + if reply.Error != "" { + return fmt.Errorf("error while executing transaction: %s", reply.Error) + } + o.primaryDB().monitorsMutex.Lock() + defer o.primaryDB().monitorsMutex.Unlock() + delete(o.primaryDB().monitors, cookie.ID) + o.metrics.numMonitors.Dec() + return nil +} + +// Monitor will provide updates for a given table/column +// and populate the cache with them. Subsequent updates will be processed +// by the Update Notifications +// RFC 7047 : monitor +func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) { + cookie := newMonitorCookie(o.primaryDBName) + db := o.databases[o.primaryDBName] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return cookie, o.monitor(ctx, cookie, false, monitor) +} + +// If fields is provided, the request will be constrained to the provided columns +// If no fields are provided, all columns will be used +func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) { + var columns []string + if len(fields) > 0 { + columns = append(columns, fields...) + } else { + for c := range data.Metadata.TableSchema.Columns { + columns = append(columns, c) + } + } + return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil +} + +// monitor must only be called with a lock on monitorsMutex +// +//gocyclo:ignore +func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error { + // if we're reconnecting, we already hold the rpcMutex + if !reconnecting { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + } + if o.rpcClient == nil { + return ErrNotConnected + } + if len(monitor.Errors) != 0 { + var errString []string + for _, err := range monitor.Errors { + errString = append(errString, err.Error()) + } + return fmt.Errorf(strings.Join(errString, ". ")) + } + if len(monitor.Tables) == 0 { + return fmt.Errorf("at least one table should be monitored") + } + dbName := cookie.DatabaseName + db := o.databases[dbName] + db.modelMutex.RLock() + typeMap := db.model.Types() + requests := make(map[string]ovsdb.MonitorRequest) + for _, o := range monitor.Tables { + _, ok := typeMap[o.Table] + if !ok { + return fmt.Errorf("type for table %s does not exist in model", o.Table) + } + model, err := db.model.NewModel(o.Table) + if err != nil { + return err + } + info, err := db.model.NewModelInfo(model) + if err != nil { + return err + } + request, err := newMonitorRequest(info, o.Fields, o.Conditions) + if err != nil { + return err + } + requests[o.Table] = *request + } + db.modelMutex.RUnlock() + + var args []interface{} + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + // If we are reconnecting a CondSince monitor that is the only + // monitor, then we can use its LastTransactionID since it is + // valid (because we're reconnecting) and we can safely keep + // the cache intact (because it's the only monitor). + transactionID := emptyUUID + if reconnecting && len(db.monitors) == 1 { + transactionID = monitor.LastTransactionID + } + args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID) + } else { + args = ovsdb.NewMonitorArgs(dbName, cookie, requests) + } + var err error + var tableUpdates interface{} + + var lastTransactionFound bool + switch monitor.Method { + case ovsdb.MonitorRPC: + var reply ovsdb.TableUpdates + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorRPC: + var reply ovsdb.TableUpdates2 + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorSinceRPC: + var reply ovsdb.MonitorCondSinceReply + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + if err == nil && reply.Found { + monitor.LastTransactionID = reply.LastTransactionID + lastTransactionFound = true + } + tableUpdates = reply.Updates + default: + return fmt.Errorf("unsupported monitor method: %v", monitor.Method) + } + + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + if err.Error() == "unknown method" { + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond") + monitor.Method = ovsdb.ConditionalMonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + if monitor.Method == ovsdb.ConditionalMonitorRPC { + o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor") + monitor.Method = ovsdb.MonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + } + return err + } + + if !reconnecting { + db.monitors[cookie.ID] = monitor + o.metrics.numMonitors.Inc() + } + + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + + // On reconnect, purge the cache _unless_ the only monitor is a + // MonitorCondSince one, whose LastTransactionID was known to the + // server. In this case the reply contains only updates to the existing + // cache data, while otherwise it includes complete DB data so we must + // purge to get rid of old rows. + if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) { + db.cache.Purge(db.model) + } + + if monitor.Method == ovsdb.MonitorRPC { + u := tableUpdates.(ovsdb.TableUpdates) + err = db.cache.Populate(u) + } else { + u := tableUpdates.(ovsdb.TableUpdates2) + err = db.cache.Populate2(u) + } + + if err != nil { + return err + } + + // populate any deferred updates + db.deferUpdates = false + for _, update := range db.deferredUpdates { + if update.updates != nil { + if err = db.cache.Populate(*update.updates); err != nil { + return err + } + } + + if update.updates2 != nil { + if err = db.cache.Populate2(*update.updates2); err != nil { + return err + } + } + if len(update.lastTxnID) > 0 { + db.monitors[cookie.ID].LastTransactionID = update.lastTxnID + } + } + // clear deferred updates for next time + db.deferredUpdates = make([]*bufferedUpdate, 0) + + return err +} + +// Echo tests the liveness of the OVSDB connetion +func (o *ovsdbClient) Echo(ctx context.Context) error { + args := ovsdb.NewEchoArgs() + var reply []interface{} + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + } + if !reflect.DeepEqual(args, reply) { + return fmt.Errorf("incorrect server response: %v, %v", args, reply) + } + return nil +} + +// watchForLeaderChange will trigger a reconnect if the connected endpoint +// ever loses leadership +func (o *ovsdbClient) watchForLeaderChange() error { + updates := make(chan model.Model) + o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, _, new model.Model) { + if table == "Database" { + updates <- new + } + }, + }) + + m := newMonitor() + // NOTE: _Server does not support monitor_cond_since + m.Method = ovsdb.ConditionalMonitorRPC + m.Tables = []TableMonitor{{Table: "Database"}} + db := o.databases[serverDB] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m) + if err != nil { + return err + } + + go func() { + for m := range updates { + dbInfo, ok := m.(*serverdb.Database) + if !ok { + continue + } + + // Ignore the dbInfo for _Server + if dbInfo.Name != o.primaryDBName { + continue + } + + // Only handle leadership changes for clustered databases + if dbInfo.Model != serverdb.DatabaseModelClustered { + continue + } + + // Clustered database servers must have a valid Server ID + var sid string + if dbInfo.Sid != nil { + sid = *dbInfo.Sid + } + if sid == "" { + o.logger.V(3).Info("clustered database update contained invalid server ID") + continue + } + + o.rpcMutex.Lock() + if !dbInfo.Leader && o.connected { + activeEndpoint := o.endpoints[0] + if sid == activeEndpoint.serverID { + o.logger.V(3).Info("endpoint lost leader, reconnecting", + "endpoint", activeEndpoint.address, "sid", sid) + // don't immediately reconnect to the active endpoint since it's no longer leader + o.moveEndpointLast(0) + o._disconnect() + } else { + o.logger.V(3).Info("endpoint lost leader but had unexpected server ID", + "endpoint", activeEndpoint.address, + "expected", activeEndpoint.serverID, "found", sid) + } + } + o.rpcMutex.Unlock() + } + }() + return nil +} + +func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) { + defer o.handlerShutdown.Done() + var errColumnNotFound *mapper.ErrColumnNotFound + var errCacheInconsistent *cache.ErrCacheInconsistent + var errIndexExists *cache.ErrIndexExists + for { + select { + case <-stopCh: + return + case err := <-o.errorCh: + if errors.As(err, &errColumnNotFound) { + o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!") + } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) { + // trigger a reconnect, which will purge the cache + // hopefully a rebuild will fix any inconsistency + o.logger.V(3).Error(err, "triggering reconnect to rebuild cache") + // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we + // need to reset the last txn ID + for _, db := range o.databases { + db.monitorsMutex.Lock() + for _, mon := range db.monitors { + mon.LastTransactionID = emptyUUID + } + db.monitorsMutex.Unlock() + } + o.Disconnect() + } else { + o.logger.V(3).Error(err, "error updating cache") + } + } + } +} + +func (o *ovsdbClient) sendEcho(args []interface{}, reply *[]interface{}) *rpc2.Call { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return nil + } + return o.rpcClient.Go("echo", args, reply, make(chan *rpc2.Call, 1)) +} + +func (o *ovsdbClient) handleInactivityProbes() { + defer o.handlerShutdown.Done() + echoReplied := make(chan string) + var lastEcho string + stopCh := o.stopCh + trafficSeen := o.trafficSeen + for { + select { + case <-stopCh: + return + case <-trafficSeen: + // We got some traffic from the server, restart our timer + case ts := <-echoReplied: + // Got a response from the server, check it against lastEcho; if same clear lastEcho; if not same Disconnect() + if ts != lastEcho { + o.Disconnect() + return + } + lastEcho = "" + case <-time.After(o.options.inactivityTimeout): + // If there's a lastEcho already, then we didn't get a server reply, disconnect + if lastEcho != "" { + o.Disconnect() + return + } + // Otherwise send an echo + thisEcho := fmt.Sprintf("%d", time.Now().UnixMicro()) + args := []interface{}{"libovsdb echo", thisEcho} + var reply []interface{} + // Can't use o.Echo() because it blocks; we need the Call object direct from o.rpcClient.Go() + call := o.sendEcho(args, &reply) + if call == nil { + o.Disconnect() + return + } + lastEcho = thisEcho + go func() { + // Wait for the echo reply + select { + case <-stopCh: + return + case <-call.Done: + if call.Error != nil { + // RPC timeout; disconnect + o.logger.V(3).Error(call.Error, "server echo reply error") + o.Disconnect() + } else if !reflect.DeepEqual(args, reply) { + o.logger.V(3).Info("warning: incorrect server echo reply", + "expected", args, "reply", reply) + o.Disconnect() + } else { + // Otherwise stuff thisEcho into the echoReplied channel + echoReplied <- thisEcho + } + } + }() + } + } +} + +func (o *ovsdbClient) handleDisconnectNotification() { + <-o.rpcClient.DisconnectNotify() + // close the stopCh, which will stop the cache event processor + close(o.stopCh) + if o.trafficSeen != nil { + close(o.trafficSeen) + } + o.metrics.numDisconnects.Inc() + // wait for client related handlers to shutdown + o.handlerShutdown.Wait() + o.rpcMutex.Lock() + if o.options.reconnect && !o.shutdown { + o.rpcClient = nil + o.rpcMutex.Unlock() + suppressionCounter := 1 + connect := func() error { + // need to ensure deferredUpdates is cleared on every reconnect attempt + for _, db := range o.databases { + db.cacheMutex.Lock() + db.deferredUpdates = make([]*bufferedUpdate, 0) + db.deferUpdates = true + db.cacheMutex.Unlock() + } + ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout) + defer cancel() + err := o.connect(ctx, true) + if err != nil { + if suppressionCounter < 5 { + o.logger.V(2).Error(err, "failed to reconnect") + } else if suppressionCounter == 5 { + o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+ + "for future attempts") + } + } + suppressionCounter++ + return err + } + o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address) + err := backoff.Retry(connect, o.options.backoff) + if err != nil { + // TODO: We should look at passing this back to the + // caller to handle + panic(err) + } + // this goroutine finishes, and is replaced with a new one (from Connect) + return + } + + // clear connection state + o.rpcClient = nil + o.rpcMutex.Unlock() + + for _, db := range o.databases { + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + db.cache = nil + // need to defer updates if/when we reconnect and clear any stale updates + db.deferUpdates = true + db.deferredUpdates = make([]*bufferedUpdate, 0) + + db.modelMutex.Lock() + defer db.modelMutex.Unlock() + db.model = model.NewPartialDatabaseModel(db.model.Client()) + + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + db.monitors = make(map[string]*Monitor) + } + o.metrics.numMonitors.Set(0) + + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = false + + select { + case o.disconnect <- struct{}{}: + // sent disconnect notification to client + default: + // client is not listening to the channel + } +} + +// _disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards. Assumes rpcMutex is held. +func (o *ovsdbClient) _disconnect() { + o.connected = false + if o.rpcClient == nil { + return + } + o.rpcClient.Close() +} + +// Disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards +func (o *ovsdbClient) Disconnect() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o._disconnect() +} + +// Close will close the connection to the OVSDB server +// It will remove all stored state ready for the next connection +// Even If the client was created with WithReconnect it will not reconnect afterwards +func (o *ovsdbClient) Close() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o.connected = false + if o.rpcClient == nil { + return + } + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = true + o.rpcClient.Close() +} + +// Ensures the cache is consistent by evaluating that the client is connected +// and the monitor is fully setup, with the cache populated. Caller must hold +// the database's cache mutex for reading. +func isCacheConsistent(db *database) bool { + // This works because when a client is disconnected the deferUpdates variable + // will be set to true. deferUpdates is also protected by the db.cacheMutex. + // When the client reconnects and then re-establishes the monitor; the final step + // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex + return !db.deferUpdates +} + +// best effort to ensure cache is in a good state for reading. RLocks the +// database's cache before returning; caller must always unlock. +func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) { + if !hasMonitors(db) { + db.cacheMutex.RLock() + return + } + // Check immediately as a fastpath + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.V(3).Info("warning: unable to ensure cache consistency for reading", + "database", dbName) + db.cacheMutex.RLock() + return + case <-ticker.C: + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + } + } +} + +func hasMonitors(db *database) bool { + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return len(db.monitors) > 0 +} + +// Client API interface wrapper functions +// We add this wrapper to allow users to access the API directly on the +// client object + +// Get implements the API interface's Get function +func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error { + primaryDB := o.primaryDB() + waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) + defer primaryDB.cacheMutex.RUnlock() + return primaryDB.api.Get(ctx, model) +} + +// Create implements the API interface's Create function +func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) { + return o.primaryDB().api.Create(models...) +} + +// List implements the API interface's List function +func (o *ovsdbClient) List(ctx context.Context, result interface{}) error { + primaryDB := o.primaryDB() + waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) + defer primaryDB.cacheMutex.RUnlock() + return primaryDB.api.List(ctx, result) +} + +// Where implements the API interface's Where function +func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI { + return o.primaryDB().api.Where(models...) +} + +// WhereAny implements the API interface's WhereAny function +func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAny(m, conditions...) +} + +// WhereAll implements the API interface's WhereAll function +func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAll(m, conditions...) +} + +// WhereCache implements the API interface's WhereCache function +func (o *ovsdbClient) WhereCache(predicate interface{}) ConditionalAPI { + return o.primaryDB().api.WhereCache(predicate) +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/condition.go b/vendor/github.com/ovn-org/libovsdb/client/condition.go new file mode 100644 index 000000000..1dfabda02 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/condition.go @@ -0,0 +1,248 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Conditional is the interface used by the ConditionalAPI to match on cache objects +// and generate ovsdb conditions +type Conditional interface { + // Generate returns a list of lists of conditions to be used in Operations + // Each element in the (outer) list corresponds to an operation + Generate() ([][]ovsdb.Condition, error) + // Returns the models that match the conditions + Matches() (map[string]model.Model, error) + // returns the table that this condition is associated with + Table() string +} + +func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) { + anyConditions := make([][]ovsdb.Condition, 0, len(models)) + for _, model := range models { + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + allConditions, err := dbModel.Mapper.NewEqualityCondition(info) + if err != nil { + return nil, err + } + anyConditions = append(anyConditions, allConditions) + } + return anyConditions, nil +} + +func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) { + anyConditions := [][]ovsdb.Condition{} + if singleOp { + anyConditions = append(anyConditions, []ovsdb.Condition{}) + } + for _, condition := range conditions { + ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value) + if err != nil { + return nil, err + } + allConditions := []ovsdb.Condition{*ovsdbCond} + if singleOp { + anyConditions[0] = append(anyConditions[0], allConditions...) + } else { + anyConditions = append(anyConditions, allConditions) + } + } + return anyConditions, nil +} + +// equalityConditional uses the indexes available in a provided model to find a +// matching model in the database. +type equalityConditional struct { + tableName string + models []model.Model + cache *cache.TableCache +} + +func (c *equalityConditional) Table() string { + return c.tableName +} + +// Returns the models that match the indexes available through the provided +// model. +func (c *equalityConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + return tableCache.RowsByModels(c.models) +} + +// Generate conditions based on the equality of the first available index. If +// the index can be matched against a model in the cache, the condition will be +// based on the UUID of the found model. Otherwise, the conditions will be based +// on the index. +func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, generate condition from models we were given + modelMap := make(map[string]model.Model, len(c.models)) + for i, m := range c.models { + // generateConditionsFromModels() ignores the map keys + // so just use the range index + modelMap[fmt.Sprintf("%d", i)] = m + } + return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap) + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// NewEqualityCondition creates a new equalityConditional +func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) { + return &equalityConditional{ + tableName: table, + models: models, + cache: cache, + }, nil +} + +// explicitConditional generates conditions based on the provided Condition list +type explicitConditional struct { + tableName string + anyConditions [][]ovsdb.Condition + cache *cache.TableCache +} + +func (c *explicitConditional) Table() string { + return c.tableName +} + +// Returns the models that match the conditions +func (c *explicitConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + for _, allConditions := range c.anyConditions { + models, err := tableCache.RowsByCondition(allConditions) + if err != nil { + return nil, err + } + for uuid, model := range models { + found[uuid] = model + } + } + return found, nil +} + +// Generate returns conditions based on the provided Condition list +func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, return conditions we were given + return c.anyConditions, nil + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newExplicitConditional creates a new explicitConditional +func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) { + dbModel := cache.DatabaseModel() + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll) + if err != nil { + return nil, err + } + return &explicitConditional{ + tableName: table, + anyConditions: anyConditions, + cache: cache, + }, nil +} + +// predicateConditional is a Conditional that calls a provided function pointer +// to match on models. +type predicateConditional struct { + tableName string + predicate interface{} + cache *cache.TableCache +} + +// matches returns the result of the execution of the predicate +// Type verifications are not performed +// Returns the models that match the conditions +func (c *predicateConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + // run the predicate on a shallow copy of the models for speed and only + // clone the matches + for u, m := range tableCache.RowsShallow() { + ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)}) + if ret[0].Bool() { + found[u] = model.Clone(m) + } + } + return found, nil +} + +func (c *predicateConditional) Table() string { + return c.tableName +} + +// generate returns a list of conditions that match, by _uuid equality, all the objects that +// match the predicate +func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil { + return nil, err + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newPredicateConditional creates a new predicateConditional +func newPredicateConditional(table string, cache *cache.TableCache, predicate interface{}) (Conditional, error) { + return &predicateConditional{ + tableName: table, + predicate: predicate, + cache: cache, + }, nil +} + +// errorConditional is a conditional that encapsulates an error +// It is used to delay the reporting of errors from conditional creation to API method call +type errorConditional struct { + err error +} + +func (e *errorConditional) Matches() (map[string]model.Model, error) { + return nil, e.err +} + +func (e *errorConditional) Table() string { + return "" +} + +func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) { + return nil, e.err +} + +func newErrorConditional(err error) Conditional { + return &errorConditional{ + err: fmt.Errorf("conditionerror: %s", err.Error()), + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/config.go b/vendor/github.com/ovn-org/libovsdb/client/config.go new file mode 100644 index 000000000..a9c00f56a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/config.go @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +package client + +import ( + "crypto/tls" +) + +// Config is a structure used in provisioning a connection to ovsdb. +type Config struct { + Addr string + TLSConfig *tls.Config +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/doc.go b/vendor/github.com/ovn-org/libovsdb/client/doc.go new file mode 100644 index 000000000..90e409ee7 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/doc.go @@ -0,0 +1,164 @@ +/* +Package client connects to, monitors and interacts with OVSDB servers (RFC7047). + +This package uses structs, that contain the 'ovs' field tag to determine which field goes to +which column in the database. We refer to pointers to this structs as Models. Example: + + type MyLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + Config map[string]string `ovsdb:"other_config"` + } + +Based on these Models a Database Model (see ClientDBModel type) is built to represent +the entire OVSDB: + + clientDBModel, _ := client.NewClientDBModel("OVN_Northbound", + map[string]client.Model{ + "Logical_Switch": &MyLogicalSwitch{}, + }) + + +The ClientDBModel represents the entire Database (or the part of it we're interested in). +Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages +and store them in Model instances. +A client instance is created by simply specifying the connection information and the database model: + + ovs, _ := client.Connect(context.Background(), clientDBModel) + +Main API + +After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like +to interact with the database: +List(), Get(), Create(), Update(), Mutate(), Delete(). + +The specific database table that the operation targets is automatically determined based on the type +of the parameter. + +In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(), +interact with the database so they return list of ovsdb.Operation objects that can be grouped together +and passed to client.Transact(). + +Others, such as List() and Get(), interact with the client's internal cache and are able to +return Model instances (or a list thereof) directly. + +Conditions + +Some API functions (Create() and Get()), can be run directly. Others, require us to use +a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as +uses the Conditions to search the internal cache. + +The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions. + +Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances. +Conditions must refer to fields of the provided Model (via pointer to fields). Example: + + ls = &MyLogicalSwitch {} + ovs.Where(ls, client.Condition { + Field: &ls.Ports, + Function: ovsdb.ConditionIncludes, + Value: []string{"portUUID"}, + }) + +If no client.Condition is provided, the client will use any of fields that correspond to indexes to +generate an appropriate condition. Therefore the following two statements are equivalent: + + ls = &MyLogicalSwitch {UUID:"myUUID"} + + ovs.Where(ls) + + ovs.Where(ls, client.Condition { + Field: &ls.UUID, + Function: ovsdb.ConditionEqual, + Value: "myUUID"}, + }) + +Where() accepts multiple Condition instances (through variadic arguments). +If provided, the client will generate multiple operations each matching one condition. +For example, the following operation will delete all the Logical Switches named "foo" OR "bar": + + ops, err := ovs.Where(ls, + client.Condition { + Field: &ls.Name + Function: ovsdb.ConditionEqual, + Value: "foo", + },client.Condition { + Field: &ls.Port, + Function: ovsdb.ConditionIncludes, + Value: "bar", + }).Delete() + +To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll(). + +Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate +conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions +will be based on the index or condition fields themselves. + +A more flexible mechanism to search the cache is available: WhereCache() + +WhereCache() accepts a function that takes any Model as argument and returns a boolean. +It is used to search the cache so commonly used with List() function. For example: + + lsList := &[]LogicalSwitch{} + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching +cache element, an operation will be created matching on the "_uuid" column. The number of operations can be +quite large depending on the cache size and the provided function. Most likely there is a way to express the +same condition using Where() or WhereAll() which will be more efficient. + +Get + +Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g: + + ls := &LogicalSwitch{UUID:"myUUID"} + err := ovs.Get(ls) + fmt.Printf("Name of the switch is: &s", ls.Name) + +List + +List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache() + + lsList := &[]LogicalSwitch{} + err := ovs.List(lsList) // List all elements + + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +Create + +Create returns a list of operations to create the models provided. E.g: + + ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"}) + +Update +Update returns a list of operations to update the matching rows to match the values of the provided model. E.g: + + ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}} + ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs} + +Mutate + +Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g: + + ls := &LogicalSwitch{} + ops, err := ovs.Where(...).Mutate(&ls, client.Mutation { + Field: &ls.Config, + Mutator: ovsdb.MutateOperationInsert, + Value: map[string]string{"foo":"bar"}, + }) + +Delete + +Delete returns a list of operations needed to delete the matching rows. E.g: + + ops, err := ovs.Where(...).Delete() + +*/ +package client diff --git a/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/vendor/github.com/ovn-org/libovsdb/client/metrics.go new file mode 100644 index 000000000..8c4e5f6f2 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/metrics.go @@ -0,0 +1,88 @@ +package client + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +const libovsdbName = "libovsdb" + +type metrics struct { + numUpdates *prometheus.CounterVec + numTableUpdates *prometheus.CounterVec + numDisconnects prometheus.Counter + numMonitors prometheus.Gauge + registerOnce sync.Once +} + +func (m *metrics) init(modelName string, namespace, subsystem string) { + // labels that are the same across all metrics + constLabels := prometheus.Labels{"primary_model": modelName} + + if namespace == "" { + namespace = libovsdbName + subsystem = "" + } + + m.numUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "update_messages_total", + Help: "Count of libovsdb monitor update messages processed, partitioned by database", + ConstLabels: constLabels, + }, + []string{"database"}, + ) + + m.numTableUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "table_updates_total", + Help: "Count of libovsdb monitor update messages per table", + ConstLabels: constLabels, + }, + []string{"database", "table"}, + ) + + m.numDisconnects = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "disconnects_total", + Help: "Count of libovsdb disconnects encountered", + ConstLabels: constLabels, + }, + ) + + m.numMonitors = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "monitors", + Help: "Number of running libovsdb ovsdb monitors", + ConstLabels: constLabels, + }, + ) +} + +func (m *metrics) register(r prometheus.Registerer) { + m.registerOnce.Do(func() { + r.MustRegister( + m.numUpdates, + m.numTableUpdates, + m.numDisconnects, + m.numMonitors, + ) + }) +} + +func (o *ovsdbClient) registerMetrics() { + if !o.options.shouldRegisterMetrics || o.options.registry == nil { + return + } + o.metrics.register(o.options.registry) + o.options.shouldRegisterMetrics = false +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/vendor/github.com/ovn-org/libovsdb/client/monitor.go new file mode 100644 index 000000000..4a0270a87 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/monitor.go @@ -0,0 +1,136 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/google/uuid" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +const emptyUUID = "00000000-0000-0000-0000-000000000000" + +// Monitor represents a monitor +type Monitor struct { + Method string + Tables []TableMonitor + Errors []error + LastTransactionID string +} + +// newMonitor creates a new *Monitor with default values +func newMonitor() *Monitor { + return &Monitor{ + Method: ovsdb.ConditionalMonitorSinceRPC, + Errors: make([]error, 0), + LastTransactionID: emptyUUID, + } +} + +// NewMonitor creates a new Monitor with the provided options +func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor { + m := newMonitor() + for _, opt := range opts { + err := opt(o, m) + if err != nil { + m.Errors = append(m.Errors, err) + } + } + return m +} + +// MonitorOption adds Tables to a Monitor +type MonitorOption func(o *ovsdbClient, m *Monitor) error + +// MonitorCookie is the struct we pass to correlate from updates back to their +// originating Monitor request. +type MonitorCookie struct { + DatabaseName string `json:"databaseName"` + ID string `json:"id"` +} + +func newMonitorCookie(dbName string) MonitorCookie { + return MonitorCookie{ + DatabaseName: dbName, + ID: uuid.NewString(), + } +} + +// TableMonitor is a table to be monitored +type TableMonitor struct { + // Table is the table to be monitored + Table string + // Conditions are the conditions under which the table should be monitored + Conditions []ovsdb.Condition + // Fields are the fields in the model to monitor + // If none are supplied, all fields will be used + Fields []string +} + +func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []interface{}) (*TableMonitor, error) { + dbModel := o.primaryDB().model + tableName := dbModel.FindTable(reflect.TypeOf(m)) + if tableName == "" { + return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m)) + } + + var columns []string + var ovsdbConds []ovsdb.Condition + + if len(fields) == 0 && len(conditions) == 0 { + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil + } + + data, err := dbModel.NewModelInfo(m) + if err != nil { + return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err) + } + for _, f := range fields { + column, err := data.ColumnByPtr(f) + if err != nil { + return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err) + } + columns = append(columns, column) + } + db := o.databases[o.primaryDBName] + mmapper := db.model.Mapper + for _, modelCond := range conditions { + ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value) + if err != nil { + return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err) + } + ovsdbConds = append(ovsdbConds, *ovsdbCond) + } + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil +} + +func WithTable(m model.Model, fields ...interface{}) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} + +func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...interface{}) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, conditions, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/options.go b/vendor/github.com/ovn-org/libovsdb/client/options.go new file mode 100644 index 000000000..81ccffe20 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/options.go @@ -0,0 +1,164 @@ +package client + +import ( + "crypto/tls" + "net/url" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultTCPEndpoint = "tcp:127.0.0.1:6640" + defaultSSLEndpoint = "ssl:127.0.0.1:6640" + defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock" +) + +type options struct { + endpoints []string + tlsConfig *tls.Config + reconnect bool + leaderOnly bool + timeout time.Duration + backoff backoff.BackOff + logger *logr.Logger + registry prometheus.Registerer + shouldRegisterMetrics bool // in case metrics are changed after-the-fact + metricNamespace string // prometheus metric namespace + metricSubsystem string // prometheus metric subsystem + inactivityTimeout time.Duration +} + +type Option func(o *options) error + +func newOptions(opts ...Option) (*options, error) { + o := &options{} + for _, opt := range opts { + if err := opt(o); err != nil { + return nil, err + } + } + // if no endpoints are supplied, use the default unix socket + if len(o.endpoints) == 0 { + o.endpoints = []string{defaultUnixEndpoint} + } + return o, nil +} + +// WithTLSConfig sets the tls.Config for use by the client +func WithTLSConfig(cfg *tls.Config) Option { + return func(o *options) error { + o.tlsConfig = cfg + return nil + } +} + +// WithEndpoint sets the endpoint to be used by the client +// It can be used multiple times, and the first endpoint that +// successfully connects will be used. +// Endpoints are specified in OVSDB Connection Format +// For more details, see the ovsdb(7) man page +func WithEndpoint(endpoint string) Option { + return func(o *options) error { + ep, err := url.Parse(endpoint) + if err != nil { + return err + } + switch ep.Scheme { + case UNIX: + if len(ep.Path) == 0 { + o.endpoints = append(o.endpoints, defaultUnixEndpoint) + return nil + } + case TCP: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultTCPEndpoint) + return nil + } + case SSL: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultSSLEndpoint) + return nil + } + } + o.endpoints = append(o.endpoints, endpoint) + return nil + } +} + +// WithLeaderOnly tells the client to treat endpoints that are clustered +// and not the leader as down. +func WithLeaderOnly(leaderOnly bool) Option { + return func(o *options) error { + o.leaderOnly = leaderOnly + return nil + } +} + +// WithReconnect tells the client to automatically reconnect when +// disconnected. The timeout is used to construct the context on +// each call to Connect, while backoff dictates the backoff +// algorithm to use. Using WithReconnect implies that +// requested transactions will block until the client has fully reconnected, +// rather than immediately returning an error if there is no connection. +func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option { + return func(o *options) error { + o.reconnect = true + o.timeout = timeout + o.backoff = backoff + return nil + } +} + +// WithInactivityCheck tells the client to send Echo request to ovsdb server periodically +// upon inactivityTimeout. When Echo request fails, then it attempts to reconnect +// with server. The inactivity check is performed as long as the connection is established. +// The reconnectTimeout argument is used to construct the context on each call to Connect, +// while reconnectBackoff dictates the backoff algorithm to use. +func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration, + reconnectBackoff backoff.BackOff) Option { + return func(o *options) error { + o.reconnect = true + o.timeout = reconnectTimeout + o.backoff = reconnectBackoff + o.inactivityTimeout = inactivityTimeout + return nil + } +} + +// WithLogger allows setting a specific log sink. Otherwise, the default +// go log package is used. +func WithLogger(l *logr.Logger) Option { + return func(o *options) error { + o.logger = l + return nil + } +} + +// WithMetricsRegistry allows the user to specify a Prometheus metrics registry. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistry(r prometheus.Registerer) Option { + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + return nil + } +} + +// WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry +// and Prometheus metric namespace and subsystem of the component utilizing libovsdb. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option { + if namespace == "" || subsystem == "" { + panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty") + } + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + o.metricNamespace = namespace + o.metricSubsystem = subsystem + return nil + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/database/database.go b/vendor/github.com/ovn-org/libovsdb/database/database.go new file mode 100644 index 000000000..12f1222f1 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/database.go @@ -0,0 +1,33 @@ +package database + +import ( + "github.com/google/uuid" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Database abstracts a database that a server can use to store and transact data +type Database interface { + CreateDatabase(database string, model ovsdb.DatabaseSchema) error + Exists(database string) bool + NewTransaction(database string) Transaction + Commit(database string, id uuid.UUID, update Update) error + CheckIndexes(database string, table string, m model.Model) error + List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) + Get(database, table string, uuid string) (model.Model, error) + GetReferences(database, table, row string) (References, error) +} + +// Transaction abstracts a database transaction that can generate database +// updates +type Transaction interface { + Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update) +} + +// Update abstracts an update that can be committed to a database +type Update interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error + ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error + ForReferenceUpdates(do func(references References) error) error +} diff --git a/vendor/github.com/ovn-org/libovsdb/database/doc.go b/vendor/github.com/ovn-org/libovsdb/database/doc.go new file mode 100644 index 000000000..c0a858c20 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/doc.go @@ -0,0 +1,5 @@ +/* +Package database collects database related types, interfaces and +implementations. +*/ +package database diff --git a/vendor/github.com/ovn-org/libovsdb/database/references.go b/vendor/github.com/ovn-org/libovsdb/database/references.go new file mode 100644 index 000000000..d8181a7a5 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/references.go @@ -0,0 +1,71 @@ +package database + +// References tracks the references to rows from other rows at specific +// locations in the schema. +type References map[ReferenceSpec]Reference + +// ReferenceSpec specifies details about where in the schema a reference occurs. +type ReferenceSpec struct { + // ToTable is the table of the row to which the reference is made + ToTable string + + // FromTable is the table of the row from which the reference is made + FromTable string + + // FromColumn is the column of the row from which the reference is made + FromColumn string + + // FromValue flags if the reference is made on a map key or map value when + // the column is a map + FromValue bool +} + +// Reference maps the UUIDs of rows to which the reference is made to the +// rows it is made from +type Reference map[string][]string + +// GetReferences gets references to a row +func (rs References) GetReferences(table, uuid string) References { + refs := References{} + for spec, values := range rs { + if spec.ToTable != table { + continue + } + if _, ok := values[uuid]; ok { + refs[spec] = Reference{uuid: values[uuid]} + } + } + return refs +} + +// UpdateReferences updates the references with the provided ones. Dangling +// references, that is, the references of rows that are no longer referenced +// from anywhere, are cleaned up. +func (rs References) UpdateReferences(other References) { + for spec, otherRefs := range other { + for to, from := range otherRefs { + rs.updateReference(spec, to, from) + } + } +} + +// updateReference updates the references to a row at a specific location in the +// schema +func (rs References) updateReference(spec ReferenceSpec, to string, from []string) { + thisRefs, ok := rs[spec] + if !ok && len(from) > 0 { + // add references from a previously untracked location + rs[spec] = Reference{to: from} + return + } + if len(from) > 0 { + // replace references to this row at this specific location + thisRefs[to] = from + return + } + // otherwise remove previously tracked references + delete(thisRefs, to) + if len(thisRefs) == 0 { + delete(rs, spec) + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/vendor/github.com/ovn-org/libovsdb/mapper/info.go new file mode 100644 index 000000000..8ac436c79 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/mapper/info.go @@ -0,0 +1,179 @@ +package mapper + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ErrColumnNotFound is an error that can occur when the column does not exist for a table +type ErrColumnNotFound struct { + column string + table string +} + +// Error implements the error interface +func (e *ErrColumnNotFound) Error() string { + return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table) +} + +func NewErrColumnNotFound(column, table string) *ErrColumnNotFound { + return &ErrColumnNotFound{ + column: column, + table: table, + } +} + +// Info is a struct that wraps an object with its metadata +type Info struct { + // FieldName indexed by column + Obj interface{} + Metadata Metadata +} + +// Metadata represents the information needed to know how to map OVSDB columns into an objetss fields +type Metadata struct { + Fields map[string]string // Map of ColumnName -> FieldName + TableSchema *ovsdb.TableSchema // TableSchema associated + TableName string // Table name +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) FieldByColumn(column string) (interface{}, error) { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return nil, NewErrColumnNotFound(column, i.Metadata.TableName) + } + return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) hasColumn(column string) bool { + _, ok := i.Metadata.Fields[column] + return ok +} + +// SetField sets the field in the column to the specified value +func (i *Info) SetField(column string, value interface{}) error { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return fmt.Errorf("SetField: column %s not found in orm info", column) + } + fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName) + + if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) { + return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)", + column, value, reflect.TypeOf(value), fieldName, fieldValue.Type()) + } + fieldValue.Set(reflect.ValueOf(value)) + return nil +} + +// ColumnByPtr returns the column name that corresponds to the field by the field's pointer +func (i *Info) ColumnByPtr(fieldPtr interface{}) (string, error) { + fieldPtrVal := reflect.ValueOf(fieldPtr) + if fieldPtrVal.Kind() != reflect.Ptr { + return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr) + } + offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer() + objType := reflect.TypeOf(i.Obj).Elem() + for j := 0; j < objType.NumField(); j++ { + if objType.Field(j).Offset == offset { + column := objType.Field(j).Tag.Get("ovsdb") + if _, ok := i.Metadata.Fields[column]; !ok { + return "", fmt.Errorf("field does not have orm column information") + } + return column, nil + } + } + return "", fmt.Errorf("field pointer does not correspond to orm struct") +} + +// getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch +// the object has non-default values +func (i *Info) getValidIndexes() ([][]string, error) { + var validIndexes [][]string + var possibleIndexes [][]string + + possibleIndexes = append(possibleIndexes, []string{"_uuid"}) + possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...) + + // Iterate through indexes and validate them +OUTER: + for _, idx := range possibleIndexes { + for _, col := range idx { + if !i.hasColumn(col) { + continue OUTER + } + columnSchema := i.Metadata.TableSchema.Column(col) + if columnSchema == nil { + continue OUTER + } + field, err := i.FieldByColumn(col) + if err != nil { + return nil, err + } + if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) { + continue OUTER + } + } + validIndexes = append(validIndexes, idx) + } + return validIndexes, nil +} + +// NewInfo creates a MapperInfo structure around an object based on a given table schema +func NewInfo(tableName string, table *ovsdb.TableSchema, obj interface{}) (*Info, error) { + objPtrVal := reflect.ValueOf(obj) + if objPtrVal.Type().Kind() != reflect.Ptr { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objVal := reflect.Indirect(objPtrVal) + if objVal.Kind() != reflect.Struct { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objType := objVal.Type() + + fields := make(map[string]string, objType.NumField()) + for i := 0; i < objType.NumField(); i++ { + field := objType.Field(i) + colName := field.Tag.Get("ovsdb") + if colName == "" { + // Untagged fields are ignored + continue + } + column := table.Column(colName) + if column == nil { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: "Column does not exist in schema", + } + } + + // Perform schema-based type checking + expType := ovsdb.NativeType(column) + if expType != field.Type { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: fmt.Sprintf("Wrong type, column expects %s", expType), + } + } + fields[colName] = field.Name + } + + return &Info{ + Obj: obj, + Metadata: Metadata{ + Fields: fields, + TableSchema: table, + TableName: tableName, + }, + }, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go new file mode 100644 index 000000000..5ca7a412b --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go @@ -0,0 +1,317 @@ +package mapper + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Mapper offers functions to interact with libovsdb through user-provided native structs. +// The way to specify what field of the struct goes +// to what column in the database id through field a field tag. +// The tag used is "ovsdb" and has the following structure +// 'ovsdb:"${COLUMN_NAME}"' +// where COLUMN_NAME is the name of the column and must match the schema +// +//Example: +// type MyObj struct { +// Name string `ovsdb:"name"` +// } +type Mapper struct { + Schema ovsdb.DatabaseSchema +} + +// ErrMapper describes an error in an Mapper type +type ErrMapper struct { + objType string + field string + fieldType string + fieldTag string + reason string +} + +func (e *ErrMapper) Error() string { + return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s", + e.objType, e.field, e.fieldType, e.fieldTag, e.reason) +} + +// NewMapper returns a new mapper +func NewMapper(schema ovsdb.DatabaseSchema) Mapper { + return Mapper{ + Schema: schema, + } +} + +// GetRowData transforms a Row to a struct based on its tags +// The result object must be given as pointer to an object with the right tags +func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error { + if row == nil { + return nil + } + return m.getData(*row, result) +} + +// getData transforms a map[string]interface{} containing OvS types (e.g: a ResultRow +// has this format) to orm struct +// The result object must be given as pointer to an object with the right tags +func (m Mapper) getData(ovsData ovsdb.Row, result *Info) error { + for name, column := range result.Metadata.TableSchema.Columns { + if !result.hasColumn(name) { + // If provided struct does not have a field to hold this value, skip it + continue + } + + ovsElem, ok := ovsData[name] + if !ok { + // Ignore missing columns + continue + } + + nativeElem, err := ovsdb.OvsToNative(column, ovsElem) + if err != nil { + return fmt.Errorf("table %s, column %s: failed to extract native element: %s", + result.Metadata.TableName, name, err.Error()) + } + + if err := result.SetField(name, nativeElem); err != nil { + return err + } + } + return nil +} + +// NewRow transforms an orm struct to a map[string] interface{} that can be used as libovsdb.Row +// By default, default or null values are skipped. This behavior can be modified by specifying +// a list of fields (pointers to fields in the struct) to be added to the row +func (m Mapper) NewRow(data *Info, fields ...interface{}) (ovsdb.Row, error) { + columns := make(map[string]*ovsdb.ColumnSchema) + for k, v := range data.Metadata.TableSchema.Columns { + columns[k] = v + } + columns["_uuid"] = &ovsdb.UUIDColumn + ovsRow := make(map[string]interface{}, len(columns)) + for name, column := range columns { + nativeElem, err := data.FieldByColumn(name) + if err != nil { + // If provided struct does not have a field to hold this value, skip it + continue + } + + // add specific fields + if len(fields) > 0 { + found := false + for _, f := range fields { + col, err := data.ColumnByPtr(f) + if err != nil { + return nil, err + } + if col == name { + found = true + break + } + } + if !found { + continue + } + } + if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) { + continue + } + ovsElem, err := ovsdb.NativeToOvs(column, nativeElem) + if err != nil { + return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error()) + } + ovsRow[name] = ovsElem + } + return ovsRow, nil +} + +// NewEqualityCondition returns a list of equality conditions that match a given object +// A list of valid columns that shall be used as a index can be provided. +// If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag +// If it does not exist or is null (""), then we will traverse all of the table indexes and +// use the first index (list of simultaneously unique columns) for which the provided mapper +// object has valid data. The order in which they are traversed matches the order defined +// in the schema. +// By `valid data` we mean non-default data. +func (m Mapper) NewEqualityCondition(data *Info, fields ...interface{}) ([]ovsdb.Condition, error) { + var conditions []ovsdb.Condition + var condIndex [][]string + + // If index is provided, use it. If not, obtain the valid indexes from the mapper info + if len(fields) > 0 { + providedIndex := []string{} + for i := range fields { + if col, err := data.ColumnByPtr(fields[i]); err == nil { + providedIndex = append(providedIndex, col) + } else { + return nil, err + } + } + condIndex = append(condIndex, providedIndex) + } else { + var err error + condIndex, err = data.getValidIndexes() + if err != nil { + return nil, err + } + } + + if len(condIndex) == 0 { + return nil, fmt.Errorf("failed to find a valid index") + } + + // Pick the first valid index + for _, col := range condIndex[0] { + field, err := data.FieldByColumn(col) + if err != nil { + return nil, err + } + + column := data.Metadata.TableSchema.Column(col) + if column == nil { + return nil, fmt.Errorf("column %s not found", col) + } + ovsVal, err := ovsdb.NativeToOvs(column, field) + if err != nil { + return nil, err + } + conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal)) + } + return conditions, nil +} + +// EqualFields compares two mapped objects. +// The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond +// to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one) +func (m Mapper) EqualFields(one, other *Info, fields ...interface{}) (bool, error) { + indexes := []string{} + for _, f := range fields { + col, err := one.ColumnByPtr(f) + if err != nil { + return false, err + } + indexes = append(indexes, col) + } + return m.equalIndexes(one, other, indexes...) +} + +// NewCondition returns a ovsdb.Condition based on the model +func (m Mapper) NewCondition(data *Info, field interface{}, function ovsdb.ConditionFunction, value interface{}) (*ovsdb.Condition, error) { + column, err := data.ColumnByPtr(field) + if err != nil { + return nil, err + } + + // Check that the condition is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil { + return nil, err + } + + ovsValue, err := ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + + ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue) + + return &ovsdbCondition, nil + +} + +// NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format) +// It takes care of field validation against the column type +func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value interface{}) (*ovsdb.Mutation, error) { + // Check the column exists in the object + if !data.hasColumn(column) { + return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data) + } + // Check that the mutation is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil { + return nil, err + } + + var ovsValue interface{} + var err error + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map { + // It's OK to cast the value to a list of elements because validation has passed + ovsSet, err := ovsdb.NewOvsSet(value) + if err != nil { + return nil, err + } + ovsValue = ovsSet + } else { + ovsValue, err = ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + } + + return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil +} + +// equalIndexes returns whether both models are equal from the DB point of view +// Two objects are considered equal if any of the following conditions is true +// They have a field tagged with column name '_uuid' and their values match +// For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal +// (as per RFC7047) +// The values of all of the optional indexes passed as variadic parameter to this function are equal. +func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) { + match := false + + oneIndexes, err := one.getValidIndexes() + if err != nil { + return false, err + } + + otherIndexes, err := other.getValidIndexes() + if err != nil { + return false, err + } + + oneIndexes = append(oneIndexes, indexes) + otherIndexes = append(otherIndexes, indexes) + + for _, lidx := range oneIndexes { + for _, ridx := range otherIndexes { + if reflect.DeepEqual(ridx, lidx) { + // All columns in an index must be simultaneously equal + for _, col := range lidx { + if !one.hasColumn(col) || !other.hasColumn(col) { + break + } + lfield, err := one.FieldByColumn(col) + if err != nil { + return false, err + } + rfield, err := other.FieldByColumn(col) + if err != nil { + return false, err + } + if reflect.DeepEqual(lfield, rfield) { + match = true + } else { + match = false + break + } + } + if match { + return true, nil + } + } + } + } + return false, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/client.go b/vendor/github.com/ovn-org/libovsdb/model/client.go new file mode 100644 index 000000000..5eb686244 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/client.go @@ -0,0 +1,171 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ColumnKey addresses a column and optionally a key within a column +type ColumnKey struct { + Column string + Key interface{} +} + +// ClientIndex defines a client index by a set of columns +type ClientIndex struct { + Columns []ColumnKey +} + +// ClientDBModel contains the client information needed to build a DatabaseModel +type ClientDBModel struct { + name string + types map[string]reflect.Type + indexes map[string][]ClientIndex +} + +// NewModel returns a new instance of a model from a specific string +func (db ClientDBModel) newModel(table string) (Model, error) { + mtype, ok := db.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Name returns the database name +func (db ClientDBModel) Name() string { + return db.name +} + +// Indexes returns the client indexes for a model +func (db ClientDBModel) Indexes(table string) []ClientIndex { + if len(db.indexes) == 0 { + return nil + } + if _, ok := db.indexes[table]; ok { + return copyIndexes(db.indexes)[table] + } + return nil +} + +// SetIndexes sets the client indexes. Client indexes are optional, similar to +// schema indexes and are only tracked in the specific client instances that are +// provided with this client model. A client index may point to multiple models +// as uniqueness is not enforced. They are defined per table and multiple +// indexes can be defined for a table. Each index consists of a set of columns. +// If the column is a map, specific keys of that map can be addressed for the +// index. +func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) { + db.indexes = copyIndexes(indexes) +} + +// Validate validates the DatabaseModel against the input schema +// Returns all the errors detected +func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error { + var errors []error + if db.name != schema.Name { + errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)", + db.name, schema.Name)) + } + + infos := make(map[string]*mapper.Info, len(db.types)) + for tableName := range db.types { + tableSchema := schema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName)) + continue + } + model, err := db.newModel(tableName) + if err != nil { + errors = append(errors, err) + continue + } + info, err := mapper.NewInfo(tableName, tableSchema, model) + if err != nil { + errors = append(errors, err) + continue + } + infos[tableName] = info + } + + for tableName, indexSets := range db.indexes { + info, ok := infos[tableName] + if !ok { + errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName)) + continue + } + for _, indexSet := range indexSets { + for _, indexColumn := range indexSet.Columns { + f, err := info.FieldByColumn(indexColumn.Column) + if err != nil { + errors = append( + errors, + fmt.Errorf("database model contains a client index for column %s that does not exist in table %s", + indexColumn.Column, + tableName)) + continue + } + if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map { + errors = append( + errors, + fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map", + indexColumn.Key, + indexColumn.Column, + tableName)) + continue + } + } + } + } + return errors +} + +// NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name +func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) { + types := make(map[string]reflect.Type, len(models)) + for table, model := range models { + modelType := reflect.TypeOf(model) + if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct { + return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct") + } + hasUUID := false + for i := 0; i < modelType.Elem().NumField(); i++ { + if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + hasUUID = true + break + } + } + if !hasUUID { + return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid") + } + + types[table] = modelType + } + return ClientDBModel{ + types: types, + name: name, + }, nil +} + +func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex { + if len(src) == 0 { + return nil + } + dst := make(map[string][]ClientIndex, len(src)) + for table, indexSets := range src { + dst[table] = make([]ClientIndex, 0, len(indexSets)) + for _, indexSet := range indexSets { + indexSetCopy := ClientIndex{ + Columns: make([]ColumnKey, len(indexSet.Columns)), + } + copy(indexSetCopy.Columns, indexSet.Columns) + dst[table] = append(dst[table], indexSetCopy) + } + } + return dst +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/database.go b/vendor/github.com/ovn-org/libovsdb/model/database.go new file mode 100644 index 000000000..0857d903f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/database.go @@ -0,0 +1,118 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// A DatabaseModel represents libovsdb's metadata about the database. +// It's the result of combining the client's ClientDBModel and the server's Schema +type DatabaseModel struct { + client ClientDBModel + Schema ovsdb.DatabaseSchema + Mapper mapper.Mapper + metadata map[reflect.Type]mapper.Metadata +} + +// NewDatabaseModel returns a new DatabaseModel +func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) { + dbModel := &DatabaseModel{ + Schema: schema, + client: client, + } + errs := client.validate(schema) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.Mapper = mapper.NewMapper(schema) + var metadata map[reflect.Type]mapper.Metadata + metadata, errs = generateModelInfo(schema, client.types) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.metadata = metadata + return *dbModel, nil +} + +// NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet +func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel { + return DatabaseModel{ + client: client, + } +} + +// Valid returns whether the DatabaseModel is fully functional +func (db DatabaseModel) Valid() bool { + return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{}) +} + +// Client returns the DatabaseModel's client dbModel +func (db DatabaseModel) Client() ClientDBModel { + return db.client +} + +// NewModel returns a new instance of a model from a specific string +func (db DatabaseModel) NewModel(table string) (Model, error) { + mtype, ok := db.client.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Types returns the DatabaseModel Types +// the DatabaseModel types is a map of reflect.Types indexed by string +// The reflect.Type is a pointer to a struct that contains 'ovs' tags +// as described above. Such pointer to struct also implements the Model interface +func (db DatabaseModel) Types() map[string]reflect.Type { + return db.client.types +} + +// FindTable returns the string associated with a reflect.Type or "" +func (db DatabaseModel) FindTable(mType reflect.Type) string { + for table, tType := range db.client.types { + if tType == mType { + return table + } + } + return "" +} + +// generateModelMetadata creates metadata objects from all models included in the +// database and caches them for future re-use +func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) { + errors := []error{} + metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes)) + for tableName, tType := range modelTypes { + tableSchema := dbSchema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName)) + continue + } + + obj := reflect.New(tType.Elem()).Interface().(Model) + info, err := mapper.NewInfo(tableName, tableSchema, obj) + if err != nil { + errors = append(errors, err) + continue + } + metadata[tType] = info.Metadata + } + return metadata, errors +} + +// NewModelInfo returns a mapper.Info object based on a provided model +func (db DatabaseModel) NewModelInfo(obj interface{}) (*mapper.Info, error) { + meta, ok := db.metadata[reflect.TypeOf(obj)] + if !ok { + return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj) + } + return &mapper.Info{ + Obj: obj, + Metadata: meta, + }, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/model.go b/vendor/github.com/ovn-org/libovsdb/model/model.go new file mode 100644 index 000000000..c8575f5bf --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/model.go @@ -0,0 +1,130 @@ +package model + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// A Model is the base interface used to build Database Models. It is used +// to express how data from a specific Database Table shall be translated into structs +// A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag +// The value of 'ovs' field must be a valid column name in the OVS Database +// A field associated with the "_uuid" column mandatory. The rest of the columns are optional +// The struct may also have non-tagged fields (which will be ignored by the API calls) +// The Model interface must be implemented by the pointer to such type +// Example: +//type MyLogicalRouter struct { +// UUID string `ovsdb:"_uuid"` +// Name string `ovsdb:"name"` +// ExternalIDs map[string]string `ovsdb:"external_ids"` +// LoadBalancers []string `ovsdb:"load_balancer"` +//} +type Model interface{} + +type CloneableModel interface { + CloneModel() Model + CloneModelInto(Model) +} + +type ComparableModel interface { + EqualsModel(Model) bool +} + +// Clone creates a deep copy of a model +func Clone(a Model) Model { + if cloner, ok := a.(CloneableModel); ok { + return cloner.CloneModel() + } + + val := reflect.Indirect(reflect.ValueOf(a)) + b := reflect.New(val.Type()).Interface() + aBytes, _ := json.Marshal(a) + _ = json.Unmarshal(aBytes, b) + return b +} + +// CloneInto deep copies a model into another one +func CloneInto(src, dst Model) { + if cloner, ok := src.(CloneableModel); ok { + cloner.CloneModelInto(dst) + return + } + + aBytes, _ := json.Marshal(src) + _ = json.Unmarshal(aBytes, dst) +} + +func Equal(l, r Model) bool { + if comparator, ok := l.(ComparableModel); ok { + return comparator.EqualsModel(r) + } + + return reflect.DeepEqual(l, r) +} + +func modelSetUUID(model Model, uuid string) error { + modelVal := reflect.ValueOf(model).Elem() + for i := 0; i < modelVal.NumField(); i++ { + if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + modelVal.Field(i).Set(reflect.ValueOf(uuid)) + return nil + } + } + return fmt.Errorf("model is expected to have a string field mapped to column _uuid") +} + +// Condition is a model-based representation of an OVSDB Condition +type Condition struct { + // Pointer to the field of the model where the operation applies + Field interface{} + // Condition function + Function ovsdb.ConditionFunction + // Value to use in the condition + Value interface{} +} + +// Mutation is a model-based representation of an OVSDB Mutation +type Mutation struct { + // Pointer to the field of the model that shall be mutated + Field interface{} + // String representing the mutator (as per RFC7047) + Mutator ovsdb.Mutator + // Value to use in the mutation + Value interface{} +} + +// CreateModel creates a new Model instance based on an OVSDB Row information +func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("database model not valid") + } + + table := dbModel.Schema.Table(tableName) + if table == nil { + return nil, fmt.Errorf("table %s not found", tableName) + } + model, err := dbModel.NewModel(tableName) + if err != nil { + return nil, err + } + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + err = dbModel.Mapper.GetRowData(row, info) + if err != nil { + return nil, err + } + + if uuid != "" { + if err := info.SetField("_uuid", uuid); err != nil { + return nil, err + } + } + + return model, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go new file mode 100644 index 000000000..aebe2c2d0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go @@ -0,0 +1,427 @@ +package ovsdb + +import ( + "fmt" + "reflect" +) + +var ( + intType = reflect.TypeOf(0) + realType = reflect.TypeOf(0.0) + boolType = reflect.TypeOf(true) + strType = reflect.TypeOf("") +) + +// ErrWrongType describes typing error +type ErrWrongType struct { + from string + expected string + got interface{} +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)", + e.from, e.expected, e.got, reflect.TypeOf(e.got)) +} + +// NewErrWrongType creates a new ErrWrongType +func NewErrWrongType(from, expected string, got interface{}) error { + return &ErrWrongType{ + from: from, + expected: expected, + got: got, + } +} + +// NativeTypeFromAtomic returns the native type that can hold a value of an +// AtomicType +func NativeTypeFromAtomic(basicType string) reflect.Type { + switch basicType { + case TypeInteger: + return intType + case TypeReal: + return realType + case TypeBoolean: + return boolType + case TypeString: + return strType + case TypeUUID: + return strType + default: + panic("Unknown basic type %s basicType") + } +} + +// NativeType returns the reflect.Type that can hold the value of a column +// OVS Type to Native Type convertions: +// +// OVS sets -> go slices or a go native type depending on the key +// OVS uuid -> go strings +// OVS map -> go map +// OVS enum -> go native type depending on the type of the enum key +func NativeType(column *ColumnSchema) reflect.Type { + switch column.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString: + return NativeTypeFromAtomic(column.Type) + case TypeEnum: + return NativeTypeFromAtomic(column.TypeObj.Key.Type) + case TypeMap: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type) + return reflect.MapOf(keyType, valueType) + case TypeSet: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + // optional type + if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 { + return reflect.PtrTo(keyType) + } + // non-optional type with max 1 + if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 { + return keyType + } + return reflect.SliceOf(keyType) + default: + panic(fmt.Errorf("unknown extended type %s", column.Type)) + } +} + +// OvsToNativeAtomic returns the native type of the basic ovs type +func OvsToNativeAtomic(basicType string, ovsElem interface{}) (interface{}, error) { + switch basicType { + case TypeReal, TypeString, TypeBoolean: + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(ovsElem) != naType { + return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem) + } + return ovsElem, nil + case TypeInteger: + naType := NativeTypeFromAtomic(basicType) + // Default decoding of numbers is float64, convert them to int + if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) { + return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem) + } + return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil + case TypeUUID: + uuid, ok := ovsElem.(UUID) + if !ok { + return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem) + } + return uuid.GoUUID, nil + default: + panic(fmt.Errorf("unknown atomic type %s", basicType)) + } +} + +func OvsToNativeSlice(baseType string, ovsElem interface{}) (interface{}, error) { + naType := NativeTypeFromAtomic(baseType) + var nativeSet reflect.Value + switch ovsSet := ovsElem.(type) { + case OvsSet: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet)) + for _, v := range ovsSet.GoSet { + nv, err := OvsToNativeAtomic(baseType, v) + if err != nil { + return nil, err + } + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + + default: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1) + nv, err := OvsToNativeAtomic(baseType, ovsElem) + if err != nil { + return nil, err + } + + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + return nativeSet.Interface(), nil +} + +// OvsToNative transforms an ovs type to native one based on the column type information +func OvsToNative(column *ColumnSchema, ovsElem interface{}) (interface{}, error) { + switch column.Type { + case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID: + return OvsToNativeAtomic(column.Type, ovsElem) + case TypeEnum: + return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + case TypeSet: + naType := NativeType(column) + // The inner slice is []interface{} + // We need to convert it to the real type os slice + switch naType.Kind() { + case reflect.Ptr: + switch ovsSet := ovsElem.(type) { + case OvsSet: + if len(ovsSet.GoSet) > 1 { + return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet)) + } + if len(ovsSet.GoSet) == 0 { + return reflect.Zero(naType).Interface(), nil + } + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0]) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + default: + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + } + case reflect.Slice: + return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem) + default: + return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind()) + } + case TypeMap: + naType := NativeType(column) + ovsMap, ok := ovsElem.(OvsMap) + if !ok { + return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem) + } + // The inner slice is map[interface]interface{} + // We need to convert it to the real type os slice + nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap)) + for k, v := range ovsMap.GoMap { + nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k) + if err != nil { + return nil, err + } + nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v) + if err != nil { + return nil, err + } + nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv)) + } + return nativeMap.Interface(), nil + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// NativeToOvsAtomic returns the OVS type of the atomic native value +func NativeToOvsAtomic(basicType string, nativeElem interface{}) (interface{}, error) { + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(nativeElem) != naType { + return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem) + } + switch basicType { + case TypeUUID: + return UUID{GoUUID: nativeElem.(string)}, nil + default: + return nativeElem, nil + } +} + +// NativeToOvs transforms an native type to a ovs type based on the column type information +func NativeToOvs(column *ColumnSchema, rawElem interface{}) (interface{}, error) { + naType := NativeType(column) + if t := reflect.TypeOf(rawElem); t != naType { + return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem) + } + + switch column.Type { + case TypeInteger, TypeReal, TypeString, TypeBoolean, TypeEnum: + return rawElem, nil + case TypeUUID: + return UUID{GoUUID: rawElem.(string)}, nil + case TypeSet: + var ovsSet OvsSet + if column.TypeObj.Key.Type == TypeUUID { + ovsSlice := []interface{}{} + if _, ok := rawElem.([]string); ok { + for _, v := range rawElem.([]string) { + uuid := UUID{GoUUID: v} + ovsSlice = append(ovsSlice, uuid) + } + } else if _, ok := rawElem.(*string); ok { + v := rawElem.(*string) + if v != nil { + uuid := UUID{GoUUID: *v} + ovsSlice = append(ovsSlice, uuid) + } + } else { + return nil, fmt.Errorf("uuid slice was neither []string or *string") + } + ovsSet = OvsSet{GoSet: ovsSlice} + + } else { + var err error + ovsSet, err = NewOvsSet(rawElem) + if err != nil { + return nil, err + } + } + return ovsSet, nil + case TypeMap: + nativeMapVal := reflect.ValueOf(rawElem) + ovsMap := make(map[interface{}]interface{}, nativeMapVal.Len()) + for _, key := range nativeMapVal.MapKeys() { + ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface()) + if err != nil { + return nil, err + } + ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface()) + if err != nil { + return nil, err + } + ovsMap[ovsKey] = ovsVal + } + return OvsMap{GoMap: ovsMap}, nil + + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// IsDefaultValue checks if a provided native element corresponds to the default value of its +// designated column type +func IsDefaultValue(column *ColumnSchema, nativeElem interface{}) bool { + switch column.Type { + case TypeEnum: + return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type) + default: + return isDefaultBaseValue(nativeElem, column.Type) + } +} + +// ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType +func validateMutationAtomic(atype string, mutator Mutator, value interface{}) error { + nType := NativeTypeFromAtomic(atype) + if reflect.TypeOf(value) != nType { + return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value) + } + + switch atype { + case TypeUUID, TypeString, TypeBoolean: + return fmt.Errorf("atomictype %s does not support mutation", atype) + case TypeReal: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide: + return nil + default: + return fmt.Errorf("wrong mutator for real type %s", mutator) + } + case TypeInteger: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo: + return nil + default: + return fmt.Errorf("wrong mutator for integer type: %s", mutator) + } + default: + panic("Unsupported Atomic Type") + } +} + +// ValidateMutation checks if the mutation value and mutator string area appropriate +// for a given column based on the rules specified RFC7047 +func ValidateMutation(column *ColumnSchema, mutator Mutator, value interface{}) error { + if !column.Mutable() { + return fmt.Errorf("column is not mutable") + } + switch column.Type { + case TypeSet: + switch mutator { + case MutateOperationInsert, MutateOperationDelete: + // RFC7047 says a may be an with a single + // element. Check if we can store this value in our column + if reflect.TypeOf(value).Kind() != reflect.Slice { + if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column), + NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String()) + } + return nil + } + if NativeType(column) != reflect.TypeOf(value) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + default: + return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value) + } + case TypeMap: + switch mutator { + case MutateOperationInsert: + // Value must be a map of the same kind + if reflect.TypeOf(value) != NativeType(column) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + case MutateOperationDelete: + // Value must be a map of the same kind or a set of keys to delete + if reflect.TypeOf(value) != NativeType(column) && + reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + "compatible map type", value) + } + return nil + + default: + return fmt.Errorf("wrong mutator for map type: %s", mutator) + } + case TypeEnum: + // RFC does not clarify what to do with enums. + return fmt.Errorf("enums do not support mutation") + default: + return validateMutationAtomic(column.Type, mutator, value) + } +} + +func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue interface{}) error { + if NativeType(column) != reflect.TypeOf(nativeValue) { + return NewErrWrongType(fmt.Sprintf("Condition for column %s", column), + NativeType(column).String(), nativeValue) + } + + switch column.Type { + case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID: + switch function { + case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes: + return nil + default: + return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type) + } + case TypeInteger, TypeReal: + // All functions are valid + return nil + default: + panic("Unsupported Type") + } +} + +func isDefaultBaseValue(elem interface{}, etype ExtendedType) bool { + value := reflect.ValueOf(elem) + if !value.IsValid() { + return true + } + if reflect.TypeOf(elem).Kind() == reflect.Ptr { + return reflect.ValueOf(elem).IsZero() + } + switch etype { + case TypeUUID: + return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == "" + case TypeMap, TypeSet: + if value.Kind() == reflect.Array { + return value.Len() == 0 + } + return value.IsNil() || value.Len() == 0 + case TypeString: + return elem.(string) == "" + case TypeInteger: + return elem.(int) == 0 + case TypeReal: + return elem.(float64) == 0 + default: + return false + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go new file mode 100644 index 000000000..783ac0f55 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go @@ -0,0 +1,223 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +type ConditionFunction string +type WaitCondition string + +const ( + // ConditionLessThan is the less than condition + ConditionLessThan ConditionFunction = "<" + // ConditionLessThanOrEqual is the less than or equal condition + ConditionLessThanOrEqual ConditionFunction = "<=" + // ConditionEqual is the equal condition + ConditionEqual ConditionFunction = "==" + // ConditionNotEqual is the not equal condition + ConditionNotEqual ConditionFunction = "!=" + // ConditionGreaterThan is the greater than condition + ConditionGreaterThan ConditionFunction = ">" + // ConditionGreaterThanOrEqual is the greater than or equal condition + ConditionGreaterThanOrEqual ConditionFunction = ">=" + // ConditionIncludes is the includes condition + ConditionIncludes ConditionFunction = "includes" + // ConditionExcludes is the excludes condition + ConditionExcludes ConditionFunction = "excludes" + + // WaitConditionEqual is the equal condition + WaitConditionEqual WaitCondition = "==" + // WaitConditionNotEqual is the not equal condition + WaitConditionNotEqual WaitCondition = "!=" +) + +// Condition is described in RFC 7047: 5.1 +type Condition struct { + Column string + Function ConditionFunction + Value interface{} +} + +func (c Condition) String() string { + return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value) +} + +// NewCondition returns a new condition +func NewCondition(column string, function ConditionFunction, value interface{}) Condition { + return Condition{ + Column: column, + Function: function, + Value: value, + } +} + +// MarshalJSON marshals a condition to a 3 element JSON array +func (c Condition) MarshalJSON() ([]byte, error) { + v := []interface{}{c.Column, c.Function, c.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Condition +func (c *Condition) UnmarshalJSON(b []byte) error { + var v []interface{} + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + c.Column = v[0].(string) + function := ConditionFunction(v[1].(string)) + switch function { + case ConditionEqual, + ConditionNotEqual, + ConditionIncludes, + ConditionExcludes, + ConditionGreaterThan, + ConditionGreaterThanOrEqual, + ConditionLessThan, + ConditionLessThanOrEqual: + c.Function = function + default: + return fmt.Errorf("%s is not a valid function", function) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + c.Value = vv + return nil +} + +// Evaluate will evaluate the condition on the two provided values +// The conditions operately differently depending on the type of +// the provided values. The behavior is as described in RFC7047 +func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) { + x := reflect.ValueOf(a) + y := reflect.ValueOf(b) + if x.Kind() != y.Kind() { + return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind()) + } + switch c { + case ConditionEqual: + return reflect.DeepEqual(a, b), nil + case ConditionNotEqual: + return !reflect.DeepEqual(a, b), nil + case ConditionIncludes: + switch x.Kind() { + case reflect.Slice: + return sliceContains(x, y), nil + case reflect.Map: + return mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionExcludes: + switch x.Kind() { + case reflect.Slice: + return !sliceContains(x, y), nil + case reflect.Map: + return !mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return !reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThan: + switch x.Kind() { + case reflect.Int: + return x.Int() > y.Int(), nil + case reflect.Float64: + return x.Float() > y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() >= y.Int(), nil + case reflect.Float64: + return x.Float() >= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThan: + switch x.Kind() { + case reflect.Int: + return x.Int() < y.Int(), nil + case reflect.Float64: + return x.Float() < y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() <= y.Int(), nil + case reflect.Float64: + return x.Float() <= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + default: + return false, fmt.Errorf("unsupported condition function %s", c) + } + // we should never get here + return false, fmt.Errorf("unreachable condition") +} + +func sliceContains(x, y reflect.Value) bool { + for i := 0; i < y.Len(); i++ { + found := false + vy := y.Index(i) + for j := 0; j < x.Len(); j++ { + vx := x.Index(j) + if vy.Kind() == reflect.Interface { + if vy.Elem() == vx.Elem() { + found = true + break + } + } else { + if vy.Interface() == vx.Interface() { + found = true + break + } + } + } + if !found { + return false + } + } + return true +} + +func mapContains(x, y reflect.Value) bool { + iter := y.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + vx := x.MapIndex(k) + if !vx.IsValid() { + return false + } + if v.Kind() != reflect.Interface { + if v.Interface() != vx.Interface() { + return false + } + } else { + if v.Elem() != vx.Elem() { + return false + } + } + } + return true +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go new file mode 100644 index 000000000..4a85c541c --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go @@ -0,0 +1,373 @@ +package ovsdb + +import "fmt" + +const ( + referentialIntegrityViolation = "referential integrity violation" + constraintViolation = "constraint violation" + resourcesExhausted = "resources exhausted" + ioError = "I/O error" + duplicateUUIDName = "duplicate uuid name" + domainError = "domain error" + rangeError = "range error" + timedOut = "timed out" + notSupported = "not supported" + aborted = "aborted" + notOwner = "not owner" +) + +// errorFromResult returns an specific OVSDB error type from +// an OperationResult +func errorFromResult(op *Operation, r OperationResult) OperationError { + if r.Error == "" { + return nil + } + switch r.Error { + case referentialIntegrityViolation: + return &ReferentialIntegrityViolation{r.Details, op} + case constraintViolation: + return &ConstraintViolation{r.Details, op} + case resourcesExhausted: + return &ResourcesExhausted{r.Details, op} + case ioError: + return &IOError{r.Details, op} + case duplicateUUIDName: + return &DuplicateUUIDName{r.Details, op} + case domainError: + return &DomainError{r.Details, op} + case rangeError: + return &RangeError{r.Details, op} + case timedOut: + return &TimedOut{r.Details, op} + case notSupported: + return &NotSupported{r.Details, op} + case aborted: + return &Aborted{r.Details, op} + case notOwner: + return &NotOwner{r.Details, op} + default: + return &Error{r.Error, r.Details, op} + } +} + +func ResultFromError(err error) OperationResult { + if err == nil { + panic("Program error: passed nil error to resultFromError") + } + switch e := err.(type) { + case *ReferentialIntegrityViolation: + return OperationResult{Error: referentialIntegrityViolation, Details: e.details} + case *ConstraintViolation: + return OperationResult{Error: constraintViolation, Details: e.details} + case *ResourcesExhausted: + return OperationResult{Error: resourcesExhausted, Details: e.details} + case *IOError: + return OperationResult{Error: ioError, Details: e.details} + case *DuplicateUUIDName: + return OperationResult{Error: duplicateUUIDName, Details: e.details} + case *DomainError: + return OperationResult{Error: domainError, Details: e.details} + case *RangeError: + return OperationResult{Error: rangeError, Details: e.details} + case *TimedOut: + return OperationResult{Error: timedOut, Details: e.details} + case *NotSupported: + return OperationResult{Error: notSupported, Details: e.details} + case *Aborted: + return OperationResult{Error: aborted, Details: e.details} + case *NotOwner: + return OperationResult{Error: notOwner, Details: e.details} + default: + return OperationResult{Error: e.Error()} + } +} + +// CheckOperationResults checks whether the provided operation was a success +// If the operation was a success, it will return nil, nil +// If the operation failed, due to a error committing the transaction it will +// return nil, error. +// Finally, in the case where one or more of the operations in the transaction +// failed, we return []OperationErrors, error +// Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in +// the original Operations struct. You may also perform type assertions against +// the error so the caller can decide how best to handle it +func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) { + // this shouldn't happen, but we'll cover the case to be certain + if len(result) < len(ops) { + return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result)) + } + var errs []OperationError + for i, op := range result { + // RFC 7047: if all of the operations succeed, but the results cannot + // be committed, then "result" will have one more element than "params", + // with the additional element being an . + if i >= len(ops) { + return errs, errorFromResult(nil, op) + } + if err := errorFromResult(&ops[i], op); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errs, fmt.Errorf("%d ovsdb operations failed", len(errs)) + } + return nil, nil +} + +// OperationError represents an error that occurred as part of an +// OVSDB Operation +type OperationError interface { + error + // Operation is a pointer to the operation which caused the error + Operation() *Operation +} + +// ReferentialIntegrityViolation is explained in RFC 7047 4.1.3 +type ReferentialIntegrityViolation struct { + details string + operation *Operation +} + +func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation { + return &ReferentialIntegrityViolation{details: details} +} + +// Error implements the error interface +func (e *ReferentialIntegrityViolation) Error() string { + msg := referentialIntegrityViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ReferentialIntegrityViolation) Operation() *Operation { + return e.operation +} + +// ConstraintViolation is described in RFC 7047: 4.1.3 +type ConstraintViolation struct { + details string + operation *Operation +} + +func NewConstraintViolation(details string) *ConstraintViolation { + return &ConstraintViolation{details: details} +} + +// Error implements the error interface +func (e *ConstraintViolation) Error() string { + msg := constraintViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ConstraintViolation) Operation() *Operation { + return e.operation +} + +// ResourcesExhausted is described in RFC 7047: 4.1.3 +type ResourcesExhausted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *ResourcesExhausted) Error() string { + msg := resourcesExhausted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ResourcesExhausted) Operation() *Operation { + return e.operation +} + +// IOError is described in RFC7047: 4.1.3 +type IOError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *IOError) Error() string { + msg := ioError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *IOError) Operation() *Operation { + return e.operation +} + +// DuplicateUUIDName is described in RFC7047 5.2.1 +type DuplicateUUIDName struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DuplicateUUIDName) Error() string { + msg := duplicateUUIDName + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DuplicateUUIDName) Operation() *Operation { + return e.operation +} + +// DomainError is described in RFC 7047: 5.2.4 +type DomainError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DomainError) Error() string { + msg := domainError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DomainError) Operation() *Operation { + return e.operation +} + +// RangeError is described in RFC 7047: 5.2.4 +type RangeError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *RangeError) Error() string { + msg := rangeError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *RangeError) Operation() *Operation { + return e.operation +} + +// TimedOut is described in RFC 7047: 5.2.6 +type TimedOut struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *TimedOut) Error() string { + msg := timedOut + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *TimedOut) Operation() *Operation { + return e.operation +} + +// NotSupported is described in RFC 7047: 5.2.7 +type NotSupported struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotSupported) Error() string { + msg := notSupported + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotSupported) Operation() *Operation { + return e.operation +} + +// Aborted is described in RFC 7047: 5.2.8 +type Aborted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *Aborted) Error() string { + msg := aborted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Aborted) Operation() *Operation { + return e.operation +} + +// NotOwner is described in RFC 7047: 5.2.9 +type NotOwner struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotOwner) Error() string { + msg := notOwner + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotOwner) Operation() *Operation { + return e.operation +} + +// Error is a generic OVSDB Error type that implements the +// OperationError and error interfaces +type Error struct { + name string + details string + operation *Operation +} + +// Error implements the error interface +func (e *Error) Error() string { + msg := e.name + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Error) Operation() *Operation { + return e.operation +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go new file mode 100644 index 000000000..893a9774f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go @@ -0,0 +1,92 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsMap is the JSON map structure used for OVSDB +// RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps. +// A 2-element JSON array that represents a database map value. The +// first element of the array must be the string "map", and the +// second element must be an array of zero or more s giving the +// values in the map. All of the s must have the same key and +// value types. +type OvsMap struct { + GoMap map[interface{}]interface{} +} + +// MarshalJSON marshalls an OVSDB style Map to a byte array +func (o OvsMap) MarshalJSON() ([]byte, error) { + if len(o.GoMap) > 0 { + var ovsMap, innerMap []interface{} + ovsMap = append(ovsMap, "map") + for key, val := range o.GoMap { + var mapSeg []interface{} + mapSeg = append(mapSeg, key) + mapSeg = append(mapSeg, val) + innerMap = append(innerMap, mapSeg) + } + ovsMap = append(ovsMap, innerMap) + return json.Marshal(ovsMap) + } + return []byte("[\"map\",[]]"), nil +} + +// UnmarshalJSON unmarshals an OVSDB style Map from a byte array +func (o *OvsMap) UnmarshalJSON(b []byte) (err error) { + var oMap []interface{} + o.GoMap = make(map[interface{}]interface{}) + if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 { + innerSlice := oMap[1].([]interface{}) + for _, val := range innerSlice { + f := val.([]interface{}) + var k interface{} + switch f[0].(type) { + case []interface{}: + vSet := f[0].([]interface{}) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + k = goSlice + default: + k = f[0] + } + switch f[1].(type) { + case []interface{}: + vSet := f[1].([]interface{}) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + o.GoMap[k] = goSlice + default: + o.GoMap[k] = f[1] + } + } + } + return err +} + +// NewOvsMap will return an OVSDB style map from a provided Golang Map +func NewOvsMap(goMap interface{}) (OvsMap, error) { + v := reflect.ValueOf(goMap) + if v.Kind() != reflect.Map { + return OvsMap{}, fmt.Errorf("ovsmap supports only go map types") + } + + genMap := make(map[interface{}]interface{}) + keys := v.MapKeys() + for _, key := range keys { + genMap[key.Interface()] = v.MapIndex(key).Interface() + } + return OvsMap{genMap}, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go new file mode 100644 index 000000000..b97e06285 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go @@ -0,0 +1,88 @@ +package ovsdb + +import "encoding/json" + +// MonitorSelect represents a monitor select according to RFC7047 +type MonitorSelect struct { + initial *bool + insert *bool + delete *bool + modify *bool +} + +// NewMonitorSelect returns a new MonitorSelect with the provided values +func NewMonitorSelect(initial, insert, delete, modify bool) *MonitorSelect { + return &MonitorSelect{ + initial: &initial, + insert: &insert, + delete: &delete, + modify: &modify, + } +} + +// NewDefaultMonitorSelect returns a new MonitorSelect with default values +func NewDefaultMonitorSelect() *MonitorSelect { + return NewMonitorSelect(true, true, true, true) +} + +// Initial returns whether or not an initial response will be sent +func (m MonitorSelect) Initial() bool { + if m.initial == nil { + return true + } + return *m.initial +} + +// Insert returns whether we will receive updates for inserts +func (m MonitorSelect) Insert() bool { + if m.insert == nil { + return true + } + return *m.insert +} + +// Delete returns whether we will receive updates for deletions +func (m MonitorSelect) Delete() bool { + if m.delete == nil { + return true + } + return *m.delete +} + +// Modify returns whether we will receive updates for modifications +func (m MonitorSelect) Modify() bool { + if m.modify == nil { + return true + } + return *m.modify +} + +type monitorSelect struct { + Initial *bool `json:"initial,omitempty"` + Insert *bool `json:"insert,omitempty"` + Delete *bool `json:"delete,omitempty"` + Modify *bool `json:"modify,omitempty"` +} + +func (m MonitorSelect) MarshalJSON() ([]byte, error) { + ms := monitorSelect{ + Initial: m.initial, + Insert: m.insert, + Delete: m.delete, + Modify: m.modify, + } + return json.Marshal(ms) +} + +func (m *MonitorSelect) UnmarshalJSON(data []byte) error { + var ms monitorSelect + err := json.Unmarshal(data, &ms) + if err != nil { + return err + } + m.initial = ms.Initial + m.insert = ms.Insert + m.delete = ms.Delete + m.modify = ms.Modify + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go new file mode 100644 index 000000000..dc8b0f6d4 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go @@ -0,0 +1,87 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type Mutator string + +const ( + // MutateOperationDelete is the delete mutator + MutateOperationDelete Mutator = "delete" + // MutateOperationInsert is the insert mutator + MutateOperationInsert Mutator = "insert" + // MutateOperationAdd is the add mutator + MutateOperationAdd Mutator = "+=" + // MutateOperationSubtract is the subtract mutator + MutateOperationSubtract Mutator = "-=" + // MutateOperationMultiply is the multiply mutator + MutateOperationMultiply Mutator = "*=" + // MutateOperationDivide is the divide mutator + MutateOperationDivide Mutator = "/=" + // MutateOperationModulo is the modulo mutator + MutateOperationModulo Mutator = "%=" +) + +// Mutation is described in RFC 7047: 5.1 +type Mutation struct { + Column string + Mutator Mutator + Value interface{} +} + +// NewMutation returns a new mutation +func NewMutation(column string, mutator Mutator, value interface{}) *Mutation { + return &Mutation{ + Column: column, + Mutator: mutator, + Value: value, + } +} + +// MarshalJSON marshals a mutation to a 3 element JSON array +func (m Mutation) MarshalJSON() ([]byte, error) { + v := []interface{}{m.Column, m.Mutator, m.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Mutation +func (m *Mutation) UnmarshalJSON(b []byte) error { + var v []interface{} + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + ok := false + m.Column, ok = v[0].(string) + if !ok { + return fmt.Errorf("expected column name %v to be a valid string", v[0]) + } + mutatorString, ok := v[1].(string) + if !ok { + return fmt.Errorf("expected mutator %v to be a valid string", v[1]) + } + mutator := Mutator(mutatorString) + switch mutator { + case MutateOperationDelete, + MutateOperationInsert, + MutateOperationAdd, + MutateOperationSubtract, + MutateOperationMultiply, + MutateOperationDivide, + MutateOperationModulo: + m.Mutator = mutator + default: + return fmt.Errorf("%s is not a valid mutator", mutator) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + m.Value = vv + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go new file mode 100644 index 000000000..29034ee9d --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go @@ -0,0 +1,165 @@ +package ovsdb + +import ( + "fmt" +) + +// ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types +// throughout the operation. The caller must ensure each input operation has +// a valid UUID, which may be replaced if a previous operation created a +// matching named UUID mapping. Returns the updated operations or an error. +func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) { + uuidMap := make(map[string]string) + + // Pass 1: replace the named UUID with a real UUID for each operation and + // build the substitution map + for i := range ops { + op := &ops[i] + if op.Op != OperationInsert { + // Only Insert operations can specify a Named UUID + continue + } + + if err := ValidateUUID(op.UUID); err != nil { + return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err) + } + + if op.UUIDName != "" { + if uuid, ok := uuidMap[op.UUIDName]; ok { + if op.UUID != "" && op.UUID != uuid { + return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q", + op.UUIDName, uuid, op.UUID) + } + // If there's already a mapping for this named UUID use it + op.UUID = uuid + } else { + uuidMap[op.UUIDName] = op.UUID + } + op.UUIDName = "" + } + } + + // Pass 2: replace named UUIDs in operation fields with the real UUID + for i := range ops { + op := &ops[i] + tableSchema := schema.Table(op.Table) + if tableSchema == nil { + return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name) + } + + for i, condition := range op.Where { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap) + if err != nil { + return nil, err + } + op.Where[i].Value = newVal + } + for i, mutation := range op.Mutations { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap) + if err != nil { + return nil, err + } + op.Mutations[i].Value = newVal + } + for _, row := range op.Rows { + for k, v := range row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + row[k] = newVal + } + } + for k, v := range op.Row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + op.Row[k] = newVal + } + } + + return ops, nil +} + +func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value interface{}, uuidMap map[string]string) (interface{}, error) { + column := tableSchema.Column(columnName) + if column == nil { + return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName) + } + return expandNamedUUID(column, value, uuidMap), nil +} + +func expandNamedUUID(column *ColumnSchema, value interface{}, namedUUIDs map[string]string) interface{} { + var keyType, valType ExtendedType + + switch column.Type { + case TypeUUID: + keyType = column.Type + case TypeSet: + keyType = column.TypeObj.Key.Type + case TypeMap: + keyType = column.TypeObj.Key.Type + valType = column.TypeObj.Value.Type + } + + if valType == TypeUUID { + if m, ok := value.(OvsMap); ok { + for k, v := range m.GoMap { + if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok { + m.GoMap[newUUID] = m.GoMap[k] + delete(m.GoMap, k) + k = newUUID + } + if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok { + m.GoMap[k] = newUUID + } + } + } + } else if keyType == TypeUUID { + if ovsSet, ok := value.(OvsSet); ok { + for i, s := range ovsSet.GoSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + ovsSet.GoSet[i] = newUUID + } + } + return value + } else if strSet, ok := value.([]string); ok { + for i, s := range strSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + strSet[i] = newUUID.(string) + } + } + return value + } else if uuidSet, ok := value.([]UUID); ok { + for i, s := range uuidSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + uuidSet[i] = newUUID.(UUID) + } + } + return value + } + + if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok { + return newUUID + } + } + + // No expansion required; return original value + return value +} + +func expandNamedUUIDAtomic(valueType ExtendedType, value interface{}, namedUUIDs map[string]string) (interface{}, bool) { + if valueType == TypeUUID { + if uuid, ok := value.(UUID); ok { + if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok { + return UUID{GoUUID: newUUID}, true + } + } else if uuid, ok := value.(string); ok { + if newUUID, ok := namedUUIDs[uuid]; ok { + return newUUID, true + } + } + } + return value, false +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go new file mode 100644 index 000000000..afad87cdc --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go @@ -0,0 +1,129 @@ +package ovsdb + +import ( + "encoding/json" +) + +const ( + // OperationInsert is an insert operation + OperationInsert = "insert" + // OperationSelect is a select operation + OperationSelect = "select" + // OperationUpdate is an update operation + OperationUpdate = "update" + // OperationMutate is a mutate operation + OperationMutate = "mutate" + // OperationDelete is a delete operation + OperationDelete = "delete" + // OperationWait is a wait operation + OperationWait = "wait" + // OperationCommit is a commit operation + OperationCommit = "commit" + // OperationAbort is an abort operation + OperationAbort = "abort" + // OperationComment is a comment operation + OperationComment = "comment" + // OperationAssert is an assert operation + OperationAssert = "assert" +) + +// Operation represents an operation according to RFC7047 section 5.2 +type Operation struct { + Op string `json:"op"` + Table string `json:"table,omitempty"` + Row Row `json:"row,omitempty"` + Rows []Row `json:"rows,omitempty"` + Columns []string `json:"columns,omitempty"` + Mutations []Mutation `json:"mutations,omitempty"` + Timeout *int `json:"timeout,omitempty"` + Where []Condition `json:"where,omitempty"` + Until string `json:"until,omitempty"` + Durable *bool `json:"durable,omitempty"` + Comment *string `json:"comment,omitempty"` + Lock *string `json:"lock,omitempty"` + UUID string `json:"uuid,omitempty"` + UUIDName string `json:"uuid-name,omitempty"` +} + +// MarshalJSON marshalls 'Operation' to a byte array +// For 'select' operations, we don't omit the 'Where' field +// to allow selecting all rows of a table +func (o Operation) MarshalJSON() ([]byte, error) { + type OpAlias Operation + switch o.Op { + case "select": + where := o.Where + if where == nil { + where = make([]Condition, 0) + } + return json.Marshal(&struct { + Where []Condition `json:"where"` + OpAlias + }{ + Where: where, + OpAlias: (OpAlias)(o), + }) + default: + return json.Marshal(&struct { + OpAlias + }{ + OpAlias: (OpAlias)(o), + }) + } +} + +// MonitorRequests represents a group of monitor requests according to RFC7047 +// We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it. +// The only option is to go with raw map[string]interface{} option :-( that sucks ! +// Refer to client.go : MonitorAll() function for more details +type MonitorRequests struct { + Requests map[string]MonitorRequest `json:"requests"` +} + +// MonitorRequest represents a monitor request according to RFC7047 +type MonitorRequest struct { + Columns []string `json:"columns,omitempty"` + Where []Condition `json:"where,omitempty"` + Select *MonitorSelect `json:"select,omitempty"` +} + +// TransactResponse represents the response to a Transact Operation +type TransactResponse struct { + Result []OperationResult `json:"result"` + Error string `json:"error"` +} + +// OperationResult is the result of an Operation +type OperationResult struct { + Count int `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Details string `json:"details,omitempty"` + UUID UUID `json:"uuid,omitempty"` + Rows []Row `json:"rows,omitempty"` +} + +func ovsSliceToGoNotation(val interface{}) (interface{}, error) { + switch sl := val.(type) { + case []interface{}: + bsliced, err := json.Marshal(sl) + if err != nil { + return nil, err + } + switch sl[0] { + case "uuid", "named-uuid": + var uuid UUID + err = json.Unmarshal(bsliced, &uuid) + return uuid, err + case "set": + var oSet OvsSet + err = json.Unmarshal(bsliced, &oSet) + return oSet, err + case "map": + var oMap OvsMap + err = json.Unmarshal(bsliced, &oMap) + return oMap, err + } + return val, nil + } + return val, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go new file mode 100644 index 000000000..9a253f74f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go @@ -0,0 +1,26 @@ +package ovsdb + +import "encoding/json" + +// Row is a table Row according to RFC7047 +type Row map[string]interface{} + +// UnmarshalJSON unmarshalls a byte array to an OVSDB Row +func (r *Row) UnmarshalJSON(b []byte) (err error) { + *r = make(map[string]interface{}) + var raw map[string]interface{} + err = json.Unmarshal(b, &raw) + for key, val := range raw { + val, err = ovsSliceToGoNotation(val) + if err != nil { + return err + } + (*r)[key] = val + } + return err +} + +// NewRow returns a new empty row +func NewRow() Row { + return Row(make(map[string]interface{})) +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go new file mode 100644 index 000000000..f1e598005 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go @@ -0,0 +1,79 @@ +package ovsdb + +const ( + // MonitorRPC is the monitor RPC method + MonitorRPC = "monitor" + // ConditionalMonitorRPC is the monitor_cond + ConditionalMonitorRPC = "monitor_cond" + // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method + ConditionalMonitorSinceRPC = "monitor_cond_since" +) + +// NewEchoArgs creates a new set of arguments for an echo RPC +func NewEchoArgs() []interface{} { + return []interface{}{"libovsdb echo"} +} + +// NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC +func NewGetSchemaArgs(schema string) []interface{} { + return []interface{}{schema} +} + +// NewTransactArgs creates a new set of arguments for a transact RPC +func NewTransactArgs(database string, operations ...Operation) []interface{} { + dbSlice := make([]interface{}, 1) + dbSlice[0] = database + + opsSlice := make([]interface{}, len(operations)) + for i, d := range operations { + opsSlice[i] = d + } + + ops := append(dbSlice, opsSlice...) + return ops +} + +// NewCancelArgs creates a new set of arguments for a cancel RPC +func NewCancelArgs(id interface{}) []interface{} { + return []interface{}{id} +} + +// NewMonitorArgs creates a new set of arguments for a monitor RPC +func NewMonitorArgs(database string, value interface{}, requests map[string]MonitorRequest) []interface{} { + return []interface{}{database, value, requests} +} + +// NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC +func NewMonitorCondSinceArgs(database string, value interface{}, requests map[string]MonitorRequest, lastTransactionID string) []interface{} { + return []interface{}{database, value, requests, lastTransactionID} +} + +// NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC +func NewMonitorCancelArgs(value interface{}) []interface{} { + return []interface{}{value} +} + +// NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC +func NewLockArgs(id interface{}) []interface{} { + return []interface{}{id} +} + +// NotificationHandler is the interface that must be implemented to receive notifications +type NotificationHandler interface { + // RFC 7047 section 4.1.6 Update Notification + Update(context interface{}, tableUpdates TableUpdates) + + // ovsdb-server.7 update2 notifications + Update2(context interface{}, tableUpdates TableUpdates2) + + // RFC 7047 section 4.1.9 Locked Notification + Locked([]interface{}) + + // RFC 7047 section 4.1.10 Stolen Notification + Stolen([]interface{}) + + // RFC 7047 section 4.1.11 Echo Notification + Echo([]interface{}) + + Disconnected() +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go new file mode 100644 index 000000000..285d1e02a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go @@ -0,0 +1,641 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "strings" +) + +// DatabaseSchema is a database schema according to RFC7047 +type DatabaseSchema struct { + Name string `json:"name"` + Version string `json:"version"` + Tables map[string]TableSchema `json:"tables"` + allTablesRoot *bool +} + +// UUIDColumn is a static column that represents the _uuid column, common to all tables +var UUIDColumn = ColumnSchema{ + Type: TypeUUID, +} + +// Table returns a TableSchema Schema for a given table and column name +func (schema DatabaseSchema) Table(tableName string) *TableSchema { + if table, ok := schema.Tables[tableName]; ok { + return &table + } + return nil +} + +// IsRoot whether a table is root or not +func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) { + t := schema.Table(tableName) + if t == nil { + return false, fmt.Errorf("Table %s not in schame", tableName) + } + if t.IsRoot { + return true, nil + } + // As per RFC7047, for compatibility with schemas created before + // "isRoot" was introduced, if "isRoot" is omitted or false in every + // in a given , then every table is part + // of the root set. + if schema.allTablesRoot == nil { + allTablesRoot := true + for _, tSchema := range schema.Tables { + if tSchema.IsRoot { + allTablesRoot = false + break + } + } + schema.allTablesRoot = &allTablesRoot + } + return *schema.allTablesRoot, nil +} + +// Print will print the contents of the DatabaseSchema +func (schema DatabaseSchema) Print(w io.Writer) { + fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version) + for table, tableSchema := range schema.Tables { + fmt.Fprintf(w, "\t %s", table) + if len(tableSchema.Indexes) > 0 { + fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes) + } else { + fmt.Fprintf(w, "\n") + } + for column, columnSchema := range tableSchema.Columns { + fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema) + } + } +} + +// SchemaFromFile returns a DatabaseSchema from a file +func SchemaFromFile(f *os.File) (DatabaseSchema, error) { + data, err := ioutil.ReadAll(f) + if err != nil { + return DatabaseSchema{}, err + } + var schema DatabaseSchema + err = json.Unmarshal(data, &schema) + if err != nil { + return DatabaseSchema{}, err + } + return schema, nil +} + +// ValidateOperations performs basic validation for operations against a DatabaseSchema +func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool { + for _, op := range operations { + switch op.Op { + case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait: + continue + case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete: + table, ok := schema.Tables[op.Table] + if ok { + for column := range op.Row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + for _, row := range op.Rows { + for column := range row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } + for _, column := range op.Columns { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } else { + return false + } + } + } + return true +} + +// TableSchema is a table schema according to RFC7047 +type TableSchema struct { + Columns map[string]*ColumnSchema `json:"columns"` + Indexes [][]string `json:"indexes,omitempty"` + IsRoot bool `json:"isRoot,omitempty"` +} + +// Column returns the Column object for a specific column name +func (t TableSchema) Column(columnName string) *ColumnSchema { + if columnName == "_uuid" { + return &UUIDColumn + } + if column, ok := t.Columns[columnName]; ok { + return column + } + return nil +} + +/*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type +can also hold other more complex types such as set, enum and map. The way to determine the type +depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage +of this library, we define an ExtendedType that includes all possible column types (including +atomic fields). +*/ + +// ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set +type ExtendedType = string + +// RefType is used to define the possible RefTypes +type RefType = string + +// unlimited is not constant as we can't take the address of int constants +var ( + // Unlimited is used to express unlimited "Max" + Unlimited = -1 +) + +const ( + unlimitedString = "unlimited" + //Strong RefType + Strong RefType = "strong" + //Weak RefType + Weak RefType = "weak" + + //ExtendedType associated with Atomic Types + + //TypeInteger is equivalent to 'int' + TypeInteger ExtendedType = "integer" + //TypeReal is equivalent to 'float64' + TypeReal ExtendedType = "real" + //TypeBoolean is equivalent to 'bool' + TypeBoolean ExtendedType = "boolean" + //TypeString is equivalent to 'string' + TypeString ExtendedType = "string" + //TypeUUID is equivalent to 'libovsdb.UUID' + TypeUUID ExtendedType = "uuid" + + //Extended Types used to summarize the internal type of the field. + + //TypeEnum is an enumerator of type defined by Key.Type + TypeEnum ExtendedType = "enum" + //TypeMap is a map whose type depend on Key.Type and Value.Type + TypeMap ExtendedType = "map" + //TypeSet is a set whose type depend on Key.Type + TypeSet ExtendedType = "set" +) + +// BaseType is a base-type structure as per RFC7047 +type BaseType struct { + Type string + Enum []interface{} + minReal *float64 + maxReal *float64 + minInteger *int + maxInteger *int + minLength *int + maxLength *int + refTable *string + refType *RefType +} + +func (b *BaseType) simpleAtomic() bool { + return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil +} + +// MinReal returns the minimum real value +// RFC7047 does not define a default, but we assume this to be +// the smallest non zero value a float64 could hold +func (b *BaseType) MinReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.minReal != nil { + return *b.minReal, nil + } + return math.SmallestNonzeroFloat64, nil +} + +// MaxReal returns the maximum real value +// RFC7047 does not define a default, but this would be the maximum +// value held by a float64 +func (b *BaseType) MaxReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.maxReal != nil { + return *b.maxReal, nil + } + return math.MaxFloat64, nil +} + +// MinInteger returns the minimum integer value +// RFC7047 specifies the minimum to be -2^63 +func (b *BaseType) MinInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.minInteger != nil { + return *b.minInteger, nil + } + return int(math.Pow(-2, 63)), nil +} + +// MaxInteger returns the minimum integer value +// RFC7047 specifies the minimum to be 2^63-1 +func (b *BaseType) MaxInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.maxInteger != nil { + return *b.maxInteger, nil + } + return int(math.Pow(2, 63)) - 1, nil +} + +// MinLength returns the minimum string length +// RFC7047 doesn't specify a default, but we assume +// that it must be >= 0 +func (b *BaseType) MinLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not an string", b.Type) + } + if b.minLength != nil { + return *b.minLength, nil + } + return 0, nil +} + +// MaxLength returns the maximum string length +// RFC7047 doesn't specify a default, but we assume +// that it must 2^63-1 +func (b *BaseType) MaxLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not an string", b.Type) + } + if b.maxLength != nil { + return *b.maxLength, nil + } + return int(math.Pow(2, 63)) - 1, nil +} + +// RefTable returns the table to which a UUID type refers +// It will return an empty string if not set +func (b *BaseType) RefTable() (string, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refTable != nil { + return *b.refTable, nil + } + return "", nil +} + +// RefType returns the reference type for a UUID field +// RFC7047 infers the RefType is strong if omitted +func (b *BaseType) RefType() (RefType, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refType != nil { + return *b.refType, nil + } + return Strong, nil +} + +// UnmarshalJSON unmarshals a json-formatted base type +func (b *BaseType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + b.Type = s + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + // temporary type to avoid recursive call to unmarshal + var bt struct { + Type string `json:"type"` + Enum interface{} `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + } + err := json.Unmarshal(data, &bt) + if err != nil { + return err + } + + if bt.Enum != nil { + // 'enum' is a list or a single element representing a list of exactly one element + switch bt.Enum.(type) { + case []interface{}: + // it's an OvsSet + oSet := bt.Enum.([]interface{}) + innerSet := oSet[1].([]interface{}) + b.Enum = make([]interface{}, len(innerSet)) + copy(b.Enum, innerSet) + default: + b.Enum = []interface{}{bt.Enum} + } + } + b.Type = bt.Type + b.minReal = bt.MinReal + b.maxReal = bt.MaxReal + b.minInteger = bt.MinInteger + b.maxInteger = bt.MaxInteger + b.minLength = bt.MaxLength + b.maxLength = bt.MaxLength + b.refTable = bt.RefTable + b.refType = bt.RefType + return nil +} + +// MarshalJSON marshals a base type to JSON +func (b BaseType) MarshalJSON() ([]byte, error) { + j := struct { + Type string `json:"type,omitempty"` + Enum *OvsSet `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + }{ + Type: b.Type, + MinReal: b.minReal, + MaxReal: b.maxReal, + MinInteger: b.minInteger, + MaxInteger: b.maxInteger, + MinLength: b.maxLength, + MaxLength: b.maxLength, + RefTable: b.refTable, + RefType: b.refType, + } + if len(b.Enum) > 0 { + set, err := NewOvsSet(b.Enum) + if err != nil { + return nil, err + } + j.Enum = &set + } + return json.Marshal(j) +} + +// ColumnType is a type object as per RFC7047 +// "key": required +// "value": optional +// "min": optional (default: 1) +// "max": or "unlimited" optional (default: 1) +type ColumnType struct { + Key *BaseType + Value *BaseType + min *int + max *int +} + +// Max returns the maximum value of a ColumnType. -1 is Unlimited +func (c *ColumnType) Max() int { + if c.max == nil { + return 1 + } + return *c.max +} + +// Min returns the minimum value of a ColumnType +func (c *ColumnType) Min() int { + if c.min == nil { + return 1 + } + return *c.min +} + +// UnmarshalJSON unmarshals a json-formatted column type +func (c *ColumnType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + c.Key = &BaseType{Type: s} + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + var colType struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value"` + Min *int `json:"min"` + Max interface{} `json:"max"` + } + err := json.Unmarshal(data, &colType) + if err != nil { + return err + } + c.Key = colType.Key + c.Value = colType.Value + c.min = colType.Min + switch v := colType.Max.(type) { + case string: + if v == unlimitedString { + c.max = &Unlimited + } else { + return fmt.Errorf("unexpected string value in max field") + } + case float64: + i := int(v) + c.max = &i + default: + c.max = nil + } + return nil +} + +// MarshalJSON marshalls a column type to JSON +func (c ColumnType) MarshalJSON() ([]byte, error) { + if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() { + return json.Marshal(c.Key.Type) + } + if c.Max() == Unlimited { + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max string `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: unlimitedString, + } + return json.Marshal(&colType) + } + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: c.max, + } + return json.Marshal(&colType) +} + +// ColumnSchema is a column schema according to RFC7047 +type ColumnSchema struct { + // According to RFC7047, "type" field can be, either an + // Or a ColumnType defined below. To try to simplify the usage, the + // json message will be parsed manually and Type will indicate the "extended" + // type. Depending on its value, more information may be available in TypeObj. + // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values + Type ExtendedType + TypeObj *ColumnType + ephemeral *bool + mutable *bool +} + +// Mutable returns whether a column is mutable +func (c *ColumnSchema) Mutable() bool { + if c.mutable != nil { + return *c.mutable + } + // default true + return true +} + +// Ephemeral returns whether a column is ephemeral +func (c *ColumnSchema) Ephemeral() bool { + if c.ephemeral != nil { + return *c.ephemeral + } + // default false + return false +} + +// UnmarshalJSON unmarshals a json-formatted column +func (c *ColumnSchema) UnmarshalJSON(data []byte) error { + // ColumnJSON represents the known json values for a Column + var colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + + // Unmarshal known keys + if err := json.Unmarshal(data, &colJSON); err != nil { + return fmt.Errorf("cannot parse column object %s", err) + } + + c.ephemeral = colJSON.Ephemeral + c.mutable = colJSON.Mutable + c.TypeObj = colJSON.Type + + // Infer the ExtendedType from the TypeObj + if c.TypeObj.Value != nil { + c.Type = TypeMap + } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 { + c.Type = TypeSet + } else if len(c.TypeObj.Key.Enum) > 0 { + c.Type = TypeEnum + } else { + c.Type = c.TypeObj.Key.Type + } + return nil +} + +// MarshalJSON marshalls a column schema to JSON +func (c ColumnSchema) MarshalJSON() ([]byte, error) { + type colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + column := colJSON{ + Type: c.TypeObj, + Ephemeral: c.ephemeral, + Mutable: c.mutable, + } + return json.Marshal(column) +} + +// String returns a string representation of the (native) column type +func (c *ColumnSchema) String() string { + var flags []string + var flagStr string + var typeStr string + if c.Ephemeral() { + flags = append(flags, "E") + } + if c.Mutable() { + flags = append(flags, "M") + } + if len(flags) > 0 { + flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ",")) + } + + switch c.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeString: + typeStr = string(c.Type) + case TypeUUID: + if c.TypeObj != nil && c.TypeObj.Key != nil { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype := "" + if s, err := c.TypeObj.Key.RefType(); err != nil { + reftype = s + } + typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype) + } else { + typeStr = "uuid" + } + + case TypeEnum: + typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum) + case TypeMap: + typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type) + case TypeSet: + var keyStr string + if c.TypeObj.Key.Type == TypeUUID { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype, _ := c.TypeObj.Key.RefType() + keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype) + } else { + keyStr = string(c.TypeObj.Key.Type) + } + typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max()) + default: + panic(fmt.Sprintf("Unsupported type %s", c.Type)) + } + + return strings.Join([]string{typeStr, flagStr}, " ") +} + +func isAtomicType(atype string) bool { + switch atype { + case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID: + return true + default: + return false + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore new file mode 100644 index 000000000..33f8bff56 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema \ No newline at end of file diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go new file mode 100644 index 000000000..274a7164f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go @@ -0,0 +1,182 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import "github.com/ovn-org/libovsdb/model" + +const DatabaseTable = "Database" + +type ( + DatabaseModel = string +) + +var ( + DatabaseModelStandalone DatabaseModel = "standalone" + DatabaseModelClustered DatabaseModel = "clustered" + DatabaseModelRelay DatabaseModel = "relay" +) + +// Database defines an object in Database table +type Database struct { + UUID string `ovsdb:"_uuid"` + Cid *string `ovsdb:"cid"` + Connected bool `ovsdb:"connected"` + Index *int `ovsdb:"index"` + Leader bool `ovsdb:"leader"` + Model DatabaseModel `ovsdb:"model"` + Name string `ovsdb:"name"` + Schema *string `ovsdb:"schema"` + Sid *string `ovsdb:"sid"` +} + +func (a *Database) GetUUID() string { + return a.UUID +} + +func (a *Database) GetCid() *string { + return a.Cid +} + +func copyDatabaseCid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseCid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetConnected() bool { + return a.Connected +} + +func (a *Database) GetIndex() *int { + return a.Index +} + +func copyDatabaseIndex(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseIndex(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetLeader() bool { + return a.Leader +} + +func (a *Database) GetModel() DatabaseModel { + return a.Model +} + +func (a *Database) GetName() string { + return a.Name +} + +func (a *Database) GetSchema() *string { + return a.Schema +} + +func copyDatabaseSchema(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSchema(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetSid() *string { + return a.Sid +} + +func copyDatabaseSid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) DeepCopyInto(b *Database) { + *b = *a + b.Cid = copyDatabaseCid(a.Cid) + b.Index = copyDatabaseIndex(a.Index) + b.Schema = copyDatabaseSchema(a.Schema) + b.Sid = copyDatabaseSid(a.Sid) +} + +func (a *Database) DeepCopy() *Database { + b := new(Database) + a.DeepCopyInto(b) + return b +} + +func (a *Database) CloneModelInto(b model.Model) { + c := b.(*Database) + a.DeepCopyInto(c) +} + +func (a *Database) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Database) Equals(b *Database) bool { + return a.UUID == b.UUID && + equalDatabaseCid(a.Cid, b.Cid) && + a.Connected == b.Connected && + equalDatabaseIndex(a.Index, b.Index) && + a.Leader == b.Leader && + a.Model == b.Model && + a.Name == b.Name && + equalDatabaseSchema(a.Schema, b.Schema) && + equalDatabaseSid(a.Sid, b.Sid) +} + +func (a *Database) EqualsModel(b model.Model) bool { + c := b.(*Database) + return a.Equals(c) +} + +var _ model.CloneableModel = &Database{} +var _ model.ComparableModel = &Database{} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go new file mode 100644 index 000000000..5923af60a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go @@ -0,0 +1,6 @@ +package serverdb + +// server_model is a database model for the special _Server database that all +// ovsdb instances export. It reports back status of the server process itself. + +//go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go new file mode 100644 index 000000000..3c117faa2 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go @@ -0,0 +1,99 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("_Server", map[string]model.Model{ + "Database": &Database{}, + }) +} + +var schema = `{ + "name": "_Server", + "version": "1.2.0", + "tables": { + "Database": { + "columns": { + "cid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + }, + "connected": { + "type": "boolean" + }, + "index": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "leader": { + "type": "boolean" + }, + "model": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "standalone", + "clustered", + "relay" + ] + ] + } + } + }, + "name": { + "type": "string" + }, + "schema": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "sid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go new file mode 100644 index 000000000..ae1ec59ae --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go @@ -0,0 +1,109 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsSet is an OVSDB style set +// RFC 7047 has a weird (but understandable) notation for set as described as : +// Either an , representing a set with exactly one element, or +// a 2-element JSON array that represents a database set value. The +// first element of the array must be the string "set", and the +// second element must be an array of zero or more s giving the +// values in the set. All of the s must have the same type. +type OvsSet struct { + GoSet []interface{} +} + +// NewOvsSet creates a new OVSDB style set from a Go interface (object) +func NewOvsSet(obj interface{}) (OvsSet, error) { + ovsSet := make([]interface{}, 0) + var v reflect.Value + if reflect.TypeOf(obj).Kind() == reflect.Ptr { + v = reflect.ValueOf(obj).Elem() + if v.Kind() == reflect.Invalid { + // must be a nil pointer, so just return an empty set + return OvsSet{ovsSet}, nil + } + } else { + v = reflect.ValueOf(obj) + } + + switch v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + ovsSet = append(ovsSet, v.Index(i).Interface()) + } + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Bool: + ovsSet = append(ovsSet, v.Interface()) + case reflect.Struct: + if v.Type() == reflect.TypeOf(UUID{}) { + ovsSet = append(ovsSet, v.Interface()) + } else { + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + default: + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + return OvsSet{ovsSet}, nil +} + +// MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array +func (o OvsSet) MarshalJSON() ([]byte, error) { + switch l := len(o.GoSet); { + case l == 1: + return json.Marshal(o.GoSet[0]) + case l > 0: + var oSet []interface{} + oSet = append(oSet, "set") + oSet = append(oSet, o.GoSet) + return json.Marshal(oSet) + } + return []byte("[\"set\",[]]"), nil +} + +// UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set +func (o *OvsSet) UnmarshalJSON(b []byte) (err error) { + o.GoSet = make([]interface{}, 0) + addToSet := func(o *OvsSet, v interface{}) error { + goVal, err := ovsSliceToGoNotation(v) + if err == nil { + o.GoSet = append(o.GoSet, goVal) + } + return err + } + + var inter interface{} + if err = json.Unmarshal(b, &inter); err != nil { + return err + } + switch inter.(type) { + case []interface{}: + var oSet []interface{} + oSet = inter.([]interface{}) + // it's a single uuid object + if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") { + return addToSet(o, UUID{GoUUID: oSet[1].(string)}) + } + if oSet[0] != "set" { + // it is a slice, but is not a set + return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)} + } + innerSet := oSet[1].([]interface{}) + for _, val := range innerSet { + err := addToSet(o, val) + if err != nil { + return err + } + } + return err + default: + // it is a single object + return addToSet(o, inter) + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go new file mode 100644 index 000000000..a24ce64ad --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go @@ -0,0 +1,51 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type MonitorCondSinceReply struct { + Found bool + LastTransactionID string + Updates TableUpdates2 +} + +func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) { + v := []interface{}{m.Found, m.LastTransactionID, m.Updates} + return json.Marshal(v) +} + +func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error { + var v []json.RawMessage + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + + var found bool + err = json.Unmarshal(v[0], &found) + if err != nil { + return err + } + + var lastTransactionID string + err = json.Unmarshal(v[1], &lastTransactionID) + if err != nil { + return err + } + + var updates TableUpdates2 + err = json.Unmarshal(v[2], &updates) + if err != nil { + return err + } + + m.Found = found + m.LastTransactionID = lastTransactionID + m.Updates = updates + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go new file mode 100644 index 000000000..5a47d0c44 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go @@ -0,0 +1,35 @@ +package ovsdb + +// TableUpdates is an object that maps from a table name to a +// TableUpdate +type TableUpdates map[string]TableUpdate + +// TableUpdate is an object that maps from the row's UUID to a +// RowUpdate +type TableUpdate map[string]*RowUpdate + +// RowUpdate represents a row update according to RFC7047 +type RowUpdate struct { + New *Row `json:"new,omitempty"` + Old *Row `json:"old,omitempty"` +} + +// Insert returns true if this is an update for an insert operation +func (r RowUpdate) Insert() bool { + return r.New != nil && r.Old == nil +} + +// Modify returns true if this is an update for a modify operation +func (r RowUpdate) Modify() bool { + return r.New != nil && r.Old != nil +} + +// Delete returns true if this is an update for a delete operation +func (r RowUpdate) Delete() bool { + return r.New == nil && r.Old != nil +} + +func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) { + r.Old = ru2.Old + r.New = ru2.New +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go new file mode 100644 index 000000000..a040894c9 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go @@ -0,0 +1,19 @@ +package ovsdb + +// TableUpdates2 is an object that maps from a table name to a +// TableUpdate2 +type TableUpdates2 map[string]TableUpdate2 + +// TableUpdate2 is an object that maps from the row's UUID to a +// RowUpdate2 +type TableUpdate2 map[string]*RowUpdate2 + +// RowUpdate2 represents a row update according to ovsdb-server.7 +type RowUpdate2 struct { + Initial *Row `json:"initial,omitempty"` + Insert *Row `json:"insert,omitempty"` + Modify *Row `json:"modify,omitempty"` + Delete *Row `json:"delete,omitempty"` + Old *Row `json:"-"` + New *Row `json:"-"` +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go new file mode 100644 index 000000000..6bc463653 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go @@ -0,0 +1,59 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "regexp" +) + +var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) + +// UUID is a UUID according to RFC7047 +type UUID struct { + GoUUID string `json:"uuid"` +} + +// MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array +func (u UUID) MarshalJSON() ([]byte, error) { + var uuidSlice []string + err := ValidateUUID(u.GoUUID) + if err == nil { + uuidSlice = []string{"uuid", u.GoUUID} + } else { + uuidSlice = []string{"named-uuid", u.GoUUID} + } + + return json.Marshal(uuidSlice) +} + +// UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID +func (u *UUID) UnmarshalJSON(b []byte) (err error) { + var ovsUUID []string + if err := json.Unmarshal(b, &ovsUUID); err == nil { + u.GoUUID = ovsUUID[1] + } + return err +} + +func ValidateUUID(uuid string) error { + if len(uuid) != 36 { + return fmt.Errorf("uuid exceeds 36 characters") + } + + if !validUUID.MatchString(uuid) { + return fmt.Errorf("uuid does not match regexp") + } + + return nil +} + +func IsNamedUUID(uuid string) bool { + return len(uuid) > 0 && !validUUID.MatchString(uuid) +} + +func IsValidUUID(uuid string) bool { + if err := ValidateUUID(uuid); err != nil { + return false + } + return true +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/vendor/github.com/ovn-org/libovsdb/updates/difference.go new file mode 100644 index 000000000..7ebfe8bb5 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/difference.go @@ -0,0 +1,209 @@ +package updates + +import "reflect" + +// difference between value 'a' and value 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the +// difference is 'b' in which case 'b' is returned unmodified. Also returns a +// boolean indicating if there is an actual difference. +func difference(a, b interface{}) (interface{}, bool) { + return mergeDifference(nil, a, b) +} + +// applyDifference returns the result of applying difference 'd' to value 'v' +// along with a boolean indicating if 'v' was changed. +func applyDifference(v, d interface{}) (interface{}, bool) { + if d == nil { + return v, false + } + // difference can be applied with the same algorithm used to calculate it + // f(x,f(x,y))=y + result, changed := difference(v, d) + dv := reflect.ValueOf(d) + switch dv.Kind() { + case reflect.Slice: + fallthrough + case reflect.Map: + // but we need to tweak the interpretation of change for map and slices: + // when there is no difference between the value and non-empty delta, it + // actually means the value needs to be emptied so there is actually a + // change + if !changed && dv.Len() > 0 { + return result, true + } + // there are no changes when delta is empty + return result, changed && dv.Len() > 0 + } + return result, changed +} + +// mergeDifference, given an original value 'o' and two differences 'a' and 'b', +// returns a new equivalent difference that when applied on 'o' it would have +// the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func mergeDifference(o, a, b interface{}) (interface{}, bool) { + kind := reflect.ValueOf(b).Kind() + if kind == reflect.Invalid { + kind = reflect.ValueOf(a).Kind() + } + switch kind { + case reflect.Invalid: + return nil, false + case reflect.Slice: + // set differences are transitive + return setDifference(a, b) + case reflect.Map: + return mergeMapDifference(o, a, b) + case reflect.Array: + panic("Not implemented") + default: + return mergeAtomicDifference(o, a, b) + } +} + +// setDifference calculates the difference between set 'a' and set 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the difference +// is 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func setDifference(a, b interface{}) (interface{}, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two sets are all elements that only belong to one + // of the sets. + difference := make(map[interface{}]struct{}, bv.Len()) + for i := 0; i < bv.Len(); i++ { + // supossedly we are working with comparable atomic types with no + // pointers so we can use the values as map key + difference[bv.Index(i).Interface()] = struct{}{} + } + j := av.Len() + for i := 0; i < j; { + vv := av.Index(i) + vi := vv.Interface() + if _, ok := difference[vi]; ok { + // this value of 'a' is in 'b', so remove it from 'a'; to do that, + // overwrite it with the last value and re-evaluate + vv.Set(av.Index(j - 1)) + // decrease where the last 'a' value is at + j-- + // remove from 'b' values + delete(difference, vi) + } else { + // this value of 'a' is not in 'b', evaluate the next value + i++ + } + } + // trim the slice to the actual values held + av = av.Slice(0, j) + for item := range difference { + // this value of 'b' is not in 'a', so add it + av = reflect.Append(av, reflect.ValueOf(item)) + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeMapDifference, given an original map 'o' and two differences 'a' and +// 'b', returns a new equivalent difference that when applied on 'o' it would +// have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. +// Returns a boolean indicating if there is an actual difference. +func mergeMapDifference(o, a, b interface{}) (interface{}, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + ov := reflect.ValueOf(o) + if !ov.IsValid() { + ov = reflect.Zero(av.Type()) + } + + // From + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two maps are all key-value pairs whose keys + // appears in only one of the maps, plus the key-value pairs whose keys + // appear in both maps but with different values. For the latter elements, + // includes the value from the new column. + + // We can assume that difference is a transitive operation so we calculate + // the difference between 'a' and 'b' but we need to handle exceptions when + // the same key is present in all values. + for i := bv.MapRange(); i.Next(); { + kv := i.Key() + bvv := i.Value() + avv := av.MapIndex(kv) + ovv := ov.MapIndex(kv) + // supossedly we are working with comparable types with no pointers so + // we can compare directly here + switch { + case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would restore key to the original value, delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would remove key, set in 'a' with 'o' value + av.SetMapIndex(kv, ovv) + case avv.IsValid() && avv.Interface() == bvv.Interface(): + // key/value is in 'a' and 'b', delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + default: + // key/value in 'b' is not in 'a', set in 'a' with 'b' value + av.SetMapIndex(kv, bvv) + } + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeAtomicDifference, given an original atomic value 'o' and two differences +// 'a' and 'b', returns a new equivalent difference that when applied on 'o' it +// would have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// Returns a boolean indicating if there is an actual difference. +func mergeAtomicDifference(o, a, b interface{}) (interface{}, bool) { + if o != nil { + return b, !reflect.DeepEqual(o, b) + } + return b, !reflect.DeepEqual(a, b) +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/vendor/github.com/ovn-org/libovsdb/updates/doc.go new file mode 100644 index 000000000..3e6fe18a0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/doc.go @@ -0,0 +1,15 @@ +/* +Package updates provides an utility to perform and aggregate model updates. + +As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via +the corresponding Add methods. + +As output, it supports both OVSDB RowUpdate2 as well as model notation via the +corresponding ForEach iterative methods. + +Several updates can be added and will be merged with any previous updates even +if they are for the same model. If several updates for the same model are +aggregated, the user is responsible that the provided model to be updated +matches the updated model of the previous update. +*/ +package updates diff --git a/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/vendor/github.com/ovn-org/libovsdb/updates/merge.go new file mode 100644 index 000000000..562f22623 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/merge.go @@ -0,0 +1,160 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { + // handle model update + switch { + case b.old == nil && b.new == nil: + // noop + case a.old == nil && a.new == nil: + // first op + a.old = b.old + a.new = b.new + case a.new != nil && b.old != nil && b.new != nil: + // update after an insert or an update + a.new = b.new + case b.old != nil && b.new == nil: + // a final delete + a.new = nil + default: + return modelUpdate{}, fmt.Errorf("sequence of updates not supported") + } + + // handle row update + ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2) + if err != nil { + return modelUpdate{}, err + } + if ru2 == nil { + return modelUpdate{}, nil + } + a.rowUpdate2 = ru2 + + return a, nil +} + +func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) { + switch { + case b == nil: + // noop + case a == nil: + // first op + a = b + case a.Insert != nil && b.Modify != nil: + // update after an insert + a.New = b.New + a.Insert = b.New + case a.Modify != nil && b.Modify != nil: + // update after update + a.New = b.New + a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify) + if a.Modify == nil { + // we merged two modifications that brought back the row to its + // original value which is a no op + a = nil + } + case a.Insert != nil && b.Delete != nil: + // delete after insert + a = nil + case b.Delete != nil: + // a final delete + a.Initial = nil + a.Insert = nil + a.Modify = nil + a.New = nil + a.Delete = b.Delete + default: + return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported") + } + return a, nil +} + +// mergeModifyRow merges two modification rows 'a' and 'b' with respect an +// original row 'o'. Two modifications that restore the original value cancel +// each other and won't be included in the result. Returns nil if there are no +// resulting modifications. +func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row { + original := *o + aMod := *a + bMod := *b + for k, v := range bMod { + if _, ok := aMod[k]; !ok { + aMod[k] = v + continue + } + + var result interface{} + var changed bool + + // handle maps or sets first + switch v.(type) { + // difference only supports set or map values that are comparable with + // no pointers. This should be currently fine because the set or map + // values should only be non pointer atomic types or the UUID struct. + case ovsdb.OvsSet: + aSet := aMod[k].(ovsdb.OvsSet) + bSet := v.(ovsdb.OvsSet) + // handle sets of multiple values, single value sets are handled as + // atomic values + if ts.Column(k).TypeObj.Max() != 1 { + // set difference is a fully transitive operation so we dont + // need to do anything special to merge two differences + result, changed = setDifference(aSet.GoSet, bSet.GoSet) + result = ovsdb.OvsSet{GoSet: result.([]interface{})} + } + case ovsdb.OvsMap: + aMap := aMod[k].(ovsdb.OvsMap) + bMap := v.(ovsdb.OvsMap) + var originalMap ovsdb.OvsMap + if v, ok := original[k]; ok { + originalMap = v.(ovsdb.OvsMap) + } + // map difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap) + result = ovsdb.OvsMap{GoMap: result.(map[interface{}]interface{})} + } + + // was neither a map nor a set + if result == nil { + // atomic difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + o := original[k] + if o == nil { + // assume zero value if original does not have the column + o = reflect.Zero(reflect.TypeOf(v)).Interface() + } + if set, ok := o.(ovsdb.OvsSet); ok { + // atomic optional values are cleared out with an empty set + // if the original value was also cleared out, use an empty set + // instead of a nil set so that mergeAtomicDifference notices + // that we are returning to the original value + if set.GoSet == nil { + set.GoSet = []interface{}{} + } + o = set + } + result, changed = mergeAtomicDifference(o, aMod[k], v) + } + + if !changed { + delete(aMod, k) + continue + } + aMod[k] = result + } + + if len(aMod) == 0 { + return nil + } + + return a +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go new file mode 100644 index 000000000..1d87737fc --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go @@ -0,0 +1,297 @@ +package updates + +import ( + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len())) + return v, true + } + } + return a, false +} + +func insertToSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + return a, false + } + } + return reflect.Append(a, b), true +} + +func mutate(current interface{}, mutator ovsdb.Mutator, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case bool, string: + return current, value + } + switch mutator { + case ovsdb.MutateOperationInsert: + // for insert, the delta will be the new value added + return mutateInsert(current, value) + case ovsdb.MutateOperationDelete: + return mutateDelete(current, value) + case ovsdb.MutateOperationAdd: + // for add, the delta is the new value + new := mutateAdd(current, value) + return new, new + case ovsdb.MutateOperationSubtract: + // for subtract, the delta is the new value + new := mutateSubtract(current, value) + return new, new + case ovsdb.MutateOperationMultiply: + new := mutateMultiply(current, value) + return new, new + case ovsdb.MutateOperationDivide: + new := mutateDivide(current, value) + return new, new + case ovsdb.MutateOperationModulo: + new := mutateModulo(current, value) + return new, new + } + return current, value +} + +func mutateInsert(current, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case int, float64: + return current, current + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := insertToSlice(vc, vv) + var diff interface{} + if ok { + diff = value + } + return v.Interface(), diff + } + if !vc.IsValid() { + if vv.IsValid() { + return vv.Interface(), vv.Interface() + } + return nil, nil + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = insertToSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + if vc.IsNil() && vv.Len() > 0 { + return value, value + } + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + k := iter.Key() + if !vc.MapIndex(k).IsValid() { + vc.SetMapIndex(k, iter.Value()) + diff.SetMapIndex(k, iter.Value()) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateDelete(current, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case int, float64: + return current, nil + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := removeFromSlice(vc, vv) + diff := value + if !ok { + diff = nil + } + return v.Interface(), diff + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = removeFromSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) { + diff := reflect.MakeMap(vc.Type()) + for i := 0; i < vv.Len(); i++ { + if vc.MapIndex(vv.Index(i)).IsValid() { + diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i))) + vc.SetMapIndex(vv.Index(i), reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + vvk := iter.Key() + vvv := iter.Value() + vcv := vc.MapIndex(vvk) + if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) { + diff.SetMapIndex(vvk, vcv) + vc.SetMapIndex(vvk, reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateAdd(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i + v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i + v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j + v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j + v + } + return is + } + return current +} + +func mutateSubtract(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i - v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i - v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j - v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j - v + } + return is + } + return current +} + +func mutateMultiply(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i * v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i * v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j * v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j * v + } + return is + } + return current +} + +func mutateDivide(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i / v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i / v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j / v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j / v + } + return is + } + return current +} + +func mutateModulo(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i % v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j % v + } + return is + } + return current +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/references.go b/vendor/github.com/ovn-org/libovsdb/updates/references.go new file mode 100644 index 000000000..938d02aae --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/references.go @@ -0,0 +1,797 @@ +package updates + +import ( + "fmt" + + "github.com/ovn-org/libovsdb/database" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ReferenceProvider should be implemented by a database that tracks references +type ReferenceProvider interface { + // GetReferences provides the references to the provided row + GetReferences(database, table, uuid string) (database.References, error) + // Get provides the corresponding model + Get(database, table string, uuid string) (model.Model, error) +} + +// DatabaseUpdate bundles updates together with the updated +// reference information +type DatabaseUpdate struct { + ModelUpdates + referenceUpdates database.References +} + +func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error { + refsCopy := database.References{} + // since refsCopy is empty, this will just copy everything + applyReferenceModifications(refsCopy, u.referenceUpdates) + return do(refsCopy) +} + +func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate { + return DatabaseUpdate{ + ModelUpdates: updates, + referenceUpdates: references, + } +} + +// ProcessReferences tracks referential integrity for the provided set of +// updates. It returns an updated set of updates which includes additional +// updates and updated references as a result of the reference garbage +// collection described in RFC7047. These additional updates resulting from the +// reference garbage collection are also returned separately. Any constraint or +// referential integrity violation is returned as an error. +func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + referenceTracker := newReferenceTracker(dbModel, provider) + return referenceTracker.processReferences(updates) +} + +type referenceTracker struct { + dbModel model.DatabaseModel + provider ReferenceProvider + + // updates that are being processed + updates ModelUpdates + + // references are the updated references by the set of updates processed + references database.References + + // helper maps to track the rows that we are processing and their tables + tracked map[string]string + added map[string]string + deleted map[string]string +} + +func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker { + return &referenceTracker{ + dbModel: dbModel, + provider: provider, + } +} + +func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + rt.updates = updates + rt.tracked = make(map[string]string) + rt.added = make(map[string]string) + rt.deleted = make(map[string]string) + rt.references = make(database.References) + + referenceUpdates, err := rt.processReferencesLoop(updates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + // merge the updates generated from reference tracking into the main updates + err = updates.Merge(rt.dbModel, referenceUpdates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + return updates, referenceUpdates, rt.references, nil +} + +func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) { + referenceUpdates := ModelUpdates{} + + // references can be transitive and deleting them can lead to further + // references having to be removed so loop until there are no updates to be + // made + for len(updates.updates) > 0 { + // update the references from the updates + err := rt.processModelUpdates(updates) + if err != nil { + return ModelUpdates{}, err + } + + // process strong reference integrity + updates, err = rt.processStrongReferences() + if err != nil { + return ModelUpdates{}, err + } + + // process weak reference integrity + weakUpdates, err := rt.processWeakReferences() + if err != nil { + return ModelUpdates{}, err + } + + // merge strong and weak reference updates + err = updates.Merge(rt.dbModel, weakUpdates) + if err != nil { + return ModelUpdates{}, err + } + + // merge updates from this iteration to the overall reference updates + err = referenceUpdates.Merge(rt.dbModel, updates) + if err != nil { + return ModelUpdates{}, err + } + } + + return referenceUpdates, nil +} + +// processModelUpdates keeps track of the updated references by a set of updates +func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error { + tables := updates.GetUpdatedTables() + for _, table := range tables { + err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { + return rt.processRowUpdate(table, uuid, &row) + }) + if err != nil { + return err + } + } + return nil +} + +// processRowUpdate keeps track of the updated references by a given row update +func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error { + + // getReferencesFromRowModify extracts updated references from the + // modifications. Following the same strategy as the modify field of Update2 + // notification, it will extract a difference, that is, both old removed + // references and new added references are extracted. This difference will + // then be applied to currently tracked references to come up with the + // updated references. + + // For more info on the modify field of Update2 notification and the + // strategy used to apply differences, check + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + + var updateRefs database.References + switch { + case row.Delete != nil: + rt.deleted[uuid] = table + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old) + case row.Modify != nil: + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old) + case row.Insert != nil: + if !isRoot(&rt.dbModel, table) { + // track rows added that are not part of the root set, we might need + // to delete those later + rt.added[uuid] = table + rt.tracked[uuid] = table + } + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil) + } + + // (lazy) initialize existing references to the same rows from the database + for spec, refs := range updateRefs { + for to := range refs { + err := rt.initReferences(spec.ToTable, to) + if err != nil { + return err + } + } + } + + // apply the reference modifications to the initialized references + applyReferenceModifications(rt.references, updateRefs) + + return nil +} + +// processStrongReferences adds delete operations for rows that are not part of +// the root set and are no longer strongly referenced. Returns a referential +// integrity violation if a nonexistent row is strongly referenced or a strongly +// referenced row has been deleted. +func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to the deleted rows + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + // track if rows are referenced or not + isReferenced := map[string]bool{} + + // go over the updated references + for spec, refs := range rt.references { + + // we only care about strong references + if !isStrong(&rt.dbModel, spec) { + continue + } + + for to, from := range refs { + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if !exists { + for _, uuid := range from { + // strong reference to a row that does not exist + return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf( + "Table %s column %s row %s references nonexistent or deleted row %s in table %s", + spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable)) + } + // we deleted the row ourselves on a previous loop + continue + } + + // track if this row is referenced from this location spec + isReferenced[to] = isReferenced[to] || len(from) > 0 + } + } + + // inserted rows that are unreferenced and not part of the root set will + // silently be dropped from the updates + for uuid := range rt.added { + if isReferenced[uuid] { + continue + } + isReferenced[uuid] = false + } + + // delete rows that are not referenced + updates := ModelUpdates{} + for uuid, isReferenced := range isReferenced { + if isReferenced { + // row is still referenced, ignore + continue + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + table := rt.tracked[uuid] + if isRoot(&rt.dbModel, table) { + // table is part of the root set, ignore + continue + } + + // delete row that is not part of the root set and is no longer + // referenced + update, err := rt.deleteRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// processWeakReferences deletes weak references to rows that were deleted. +// Returns a constraint violation if this results in invalid values +func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to rows that might have + // been deleted as a result of strong reference garbage collection + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + tables := map[string]string{} + originalRows := map[string]ovsdb.Row{} + updatedRows := map[string]ovsdb.Row{} + + for spec, refs := range rt.references { + // fetch some reference information from the schema + extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + isEmptyAllowed := minLenAllowed == 0 + + if refType != ovsdb.Weak { + // we only care about weak references + continue + } + + for to, from := range refs { + if len(from) == 0 { + // not referenced from anywhere, ignore + continue + } + + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if exists { + // we only care about rows that have been deleted or otherwise + // don't exist + continue + } + + // generate the updates to remove the references to deleted rows + for _, uuid := range from { + if _, ok := updatedRows[uuid]; !ok { + updatedRows[uuid] = ovsdb.NewRow() + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + // fetch the original rows + if originalRows[uuid] == nil { + originalRow, err := rt.getRow(spec.FromTable, uuid) + if err != nil { + return ModelUpdates{}, err + } + if originalRow == nil { + return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid) + } + originalRows[uuid] = *originalRow + } + + var becomesLen int + switch extendedType { + case ovsdb.TypeMap: + // a map referencing the row + // generate the mutation to remove the entry form the map + originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap + var mutationMap map[interface{}]interface{} + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationMap = map[interface{}]interface{}{} + } else { + mutationMap = value.(ovsdb.OvsMap).GoMap + } + // copy the map entries referencing the row from the original map + mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to}) + + // track the new length of the map + if !isEmptyAllowed { + becomesLen = len(originalMap) - len(mutationMap) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap} + + case ovsdb.TypeSet: + // a set referencing the row + // generate the mutation to remove the entry form the set + var mutationSet []interface{} + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationSet = []interface{}{} + } else { + mutationSet = value.(ovsdb.OvsSet).GoSet + } + mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to}) + + // track the new length of the set + if !isEmptyAllowed { + originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet + becomesLen = len(originalSet) - len(mutationSet) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet} + + case ovsdb.TypeUUID: + // this is an atomic UUID value that needs to be cleared + updatedRows[uuid][spec.FromColumn] = nil + becomesLen = 0 + } + + if becomesLen < minLenAllowed { + return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf( + "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+ + "row %s caused this column to have an invalid length.", + spec.FromColumn, spec.FromTable, uuid)) + } + + // track the table of the row we are going to update + tables[uuid] = spec.FromTable + } + } + } + + // process the updates + updates := ModelUpdates{} + for uuid, rowUpdate := range updatedRows { + update, err := rt.updateRow(tables[uuid], uuid, rowUpdate) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +func copyMapKeyValues(from, to map[interface{}]interface{}, isKey bool, keyValue ovsdb.UUID) map[interface{}]interface{} { + if isKey { + to[keyValue] = from[keyValue] + return to + } + for key, value := range from { + if value.(ovsdb.UUID) == keyValue { + to[key] = from[key] + } + } + return to +} + +// initReferences initializes the references to the provided row from the +// database +func (rt *referenceTracker) initReferences(table, uuid string) error { + if _, ok := rt.tracked[uuid]; ok { + // already initialized + return nil + } + existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return err + } + rt.references.UpdateReferences(existingRefs) + rt.tracked[uuid] = table + return nil +} + +func (rt *referenceTracker) initReferencesOfDeletedRows() error { + for uuid, table := range rt.deleted { + err := rt.initReferences(table, uuid) + if err != nil { + return err + } + } + return nil +} + +// deleteRow adds an update to delete the provided row. +func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + row, err := rt.getRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + updates := ModelUpdates{} + update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row} + err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update) + + rt.deleted[uuid] = table + + return updates, err +} + +// updateRow generates updates for the provided row +func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + // In agreement with processWeakReferences, columns with values are assumed + // to be values of sets or maps that need to be mutated for deletion. + // Columns with no values are assumed to be atomic optional values that need + // to be cleared with an update. + + mutations := make([]ovsdb.Mutation, 0, len(row)) + update := ovsdb.Row{} + for column, value := range row { + if value != nil { + mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value)) + continue + } + update[column] = ovsdb.OvsSet{GoSet: []interface{}{}} + } + + updates := ModelUpdates{} + + if len(mutations) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: table, + Mutations: mutations, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + if len(update) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: table, + Row: update, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// getModel gets the model from the updates or the database +func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // model has been deleted + return nil, nil + } + // look for the model in the updates + model := rt.updates.GetModel(table, uuid) + if model != nil { + return model, nil + } + // look for the model in the database + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + return model, nil +} + +// getRow gets the row from the updates or the database +func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // row has been deleted + return nil, nil + } + // look for the row in the updates + row := rt.updates.GetRow(table, uuid) + if row != nil { + return row, nil + } + // look for the model in the database and build the row + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + info, err := rt.dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + newRow, err := rt.dbModel.Mapper.NewRow(info) + if err != nil { + return nil, err + } + return &newRow, nil +} + +// rowExists returns whether the row exists either in the updates or the database +func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) { + model, err := rt.getModel(table, uuid) + return model != nil, err +} + +func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References { + refs := database.References{} + for column, value := range *modify { + var oldValue interface{} + if old != nil { + oldValue = (*old)[column] + } + crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue) + refs.UpdateReferences(crefs) + } + return refs +} + +func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old interface{}) database.References { + switch v := modify.(type) { + case ovsdb.UUID: + var oldUUID ovsdb.UUID + if old != nil { + oldUUID = old.(ovsdb.UUID) + } + return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID) + case ovsdb.OvsSet: + var oldSet ovsdb.OvsSet + if old != nil { + oldSet = old.(ovsdb.OvsSet) + } + return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet) + case ovsdb.OvsMap: + return getReferenceModificationsFromMap(dbModel, table, uuid, column, v) + } + return nil +} + +func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References { + if len(value.GoMap) == 0 { + return nil + } + + // get the referenced table + keyRefTable := refTable(dbModel, table, column, false) + valueRefTable := refTable(dbModel, table, column, true) + if keyRefTable == "" && valueRefTable == "" { + return nil + } + + from := uuid + keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false} + valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true} + + refs := database.References{} + for k, v := range value.GoMap { + if keyRefTable != "" { + switch to := k.(type) { + case ovsdb.UUID: + if _, ok := refs[keySpec]; !ok { + refs[keySpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[keySpec][to.GoUUID]; !ok { + refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from) + } + } + } + if valueRefTable != "" { + switch to := v.(type) { + case ovsdb.UUID: + if _, ok := refs[valueSpec]; !ok { + refs[valueSpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[valueSpec][to.GoUUID]; !ok { + refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from) + } + } + } + } + + return refs +} + +func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References { + // if the modify set is empty, it means the op is clearing an atomic value + // so pick the old value instead + value := modify + if len(modify.GoSet) == 0 { + value = old + } + + if len(value.GoSet) == 0 { + return nil + } + + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + refs := database.References{spec: database.Reference{}} + for _, v := range value.GoSet { + switch to := v.(type) { + case ovsdb.UUID: + refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from) + } + } + return refs +} + +func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References { + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + to := modify.GoUUID + refs := database.References{spec: {to: {from}}} + if old.GoUUID != "" { + // extract the old value as well + refs[spec][old.GoUUID] = []string{from} + } + return refs +} + +// applyReferenceModifications updates references in 'a' from those in 'b' +func applyReferenceModifications(a, b database.References) { + for spec, bv := range b { + for to, bfrom := range bv { + if av, ok := a[spec]; ok { + if afrom, ok := av[to]; ok { + r, _ := applyDifference(afrom, bfrom) + av[to] = r.([]string) + } else { + // this reference is not in 'a', so add it + av[to] = bfrom + } + } else { + // this reference is not in 'a', so add it + a[spec] = database.Reference{to: bfrom} + } + } + } +} + +func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) { + tSchema := dbModel.Schema.Table(table) + if tSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table)) + } + + cSchema := tSchema.Column(column) + if cSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column)) + } + + cType := cSchema.TypeObj + if cType == nil { + // this is not a reference + return "", 0, "", "" + } + + var bType *ovsdb.BaseType + switch { + case !mapValue && cType.Key != nil: + bType = cType.Key + case mapValue && cType.Value != nil: + bType = cType.Value + default: + panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column)) + } + if bType.Type != ovsdb.TypeUUID { + // this is not a reference + return "", 0, "", "" + } + + // treat optional values represented with sets as atomic UUIDs + extendedType := cSchema.Type + if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 { + extendedType = ovsdb.TypeUUID + } + + rType, err := bType.RefType() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + rTable, err := bType.RefTable() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + return extendedType, cType.Min(), rType, rTable +} + +func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType { + _, _, _, refTable := refInfo(dbModel, table, column, mapValue) + return refTable +} + +func isRoot(dbModel *model.DatabaseModel, table string) bool { + isRoot, err := dbModel.Schema.IsRoot(table) + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + return isRoot +} + +func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool { + _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + return refType == ovsdb.Strong +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/vendor/github.com/ovn-org/libovsdb/updates/updates.go new file mode 100644 index 000000000..4ff2363a0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/updates.go @@ -0,0 +1,528 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +type rowUpdate2 = ovsdb.RowUpdate2 + +// modelUpdate contains an update in model and OVSDB RowUpdate2 notation +type modelUpdate struct { + rowUpdate2 *rowUpdate2 + old model.Model + new model.Model +} + +// isEmpty returns whether this update is empty +func (mu modelUpdate) isEmpty() bool { + return mu == modelUpdate{} +} + +// ModelUpdates contains updates indexed by table and uuid +type ModelUpdates struct { + updates map[string]map[string]modelUpdate +} + +// GetUpdatedTables returns the tables that have updates +func (u ModelUpdates) GetUpdatedTables() []string { + tables := make([]string, 0, len(u.updates)) + for table, updates := range u.updates { + if len(updates) > 0 { + tables = append(tables, table) + } + } + return tables +} + +// ForEachModelUpdate processes each row update of a given table in model +// notation +func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error { + models := u.updates[table] + for uuid, model := range models { + err := do(uuid, model.old, model.new) + if err != nil { + return err + } + } + return nil +} + +// ForEachRowUpdate processes each row update of a given table in OVSDB +// RowUpdate2 notation +func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error { + rows := u.updates[table] + for uuid, row := range rows { + err := do(uuid, *row.rowUpdate2) + if err != nil { + return err + } + } + return nil +} + +// GetModel returns the last known state of the requested model. If the model is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetModel(table, uuid string) model.Model { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.new + } + } + return nil +} + +// GetRow returns the last known state of the requested row. If the row is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.rowUpdate2.New + } + } + return nil +} + +// Merge a set of updates with an earlier set of updates +func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, new ModelUpdates) error { + for table, models := range new.updates { + for uuid, update := range models { + err := u.addUpdate(dbModel, table, uuid, update) + if err != nil { + return err + } + } + } + return nil +} + +// AddOperation adds an update for a model from a OVSDB Operation. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error { + switch op.Op { + case ovsdb.OperationInsert: + return u.addInsertOperation(dbModel, table, uuid, op) + case ovsdb.OperationUpdate: + return u.addUpdateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationMutate: + return u.addMutateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationDelete: + return u.addDeleteOperation(dbModel, table, uuid, current, op) + default: + return fmt.Errorf("database update from operation %#v not supported", op.Op) + } +} + +// AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error { + switch { + case ru.Old == nil && ru.New != nil: + new, err := model.CreateModel(dbModel, table, ru.New, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &rowUpdate2{New: ru.New}}) + if err != nil { + return err + } + case ru.Old != nil && ru.New != nil: + old := current + new := model.Clone(current) + info, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + changed, err := updateModel(dbModel, table, info, ru.New, nil) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}}) + if err != nil { + return err + } + case ru.New == nil: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}}) + if err != nil { + return err + } + } + return nil +} + +// AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error { + switch { + case ru2.Initial != nil: + ru2.Insert = ru2.Initial + fallthrough + case ru2.Insert != nil: + new, err := model.CreateModel(dbModel, table, ru2.Insert, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &ru2}) + if err != nil { + return err + } + case ru2.Modify != nil: + old := current + new := model.Clone(current) + info, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + changed, err := modifyModel(dbModel, table, info, ru2.Modify) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &ru2}) + if err != nil { + return err + } + default: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2}) + if err != nil { + return err + } + } + return nil +} + +func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error { + if u.updates == nil { + u.updates = map[string]map[string]modelUpdate{} + } + if _, ok := u.updates[table]; !ok { + u.updates[table] = make(map[string]modelUpdate) + } + + ts := dbModel.Schema.Table(table) + update, err := merge(ts, u.updates[table][uuid], update) + if err != nil { + return err + } + + if !update.isEmpty() { + u.updates[table][uuid] = update + return nil + } + + // If after the merge this amounts to no update, remove it from the list and + // clean up + delete(u.updates[table], uuid) + if len(u.updates[table]) == 0 { + delete(u.updates, table) + } + if len(u.updates) == 0 { + u.updates = nil + } + + return nil +} + +func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error { + m := dbModel.Mapper + + model, err := dbModel.NewModel(table) + if err != nil { + return err + } + + mapperInfo, err := dbModel.NewModelInfo(model) + if err != nil { + return err + } + + err = m.GetRowData(&op.Row, mapperInfo) + if err != nil { + return err + } + + err = mapperInfo.SetField("_uuid", uuid) + if err != nil { + return err + } + + resultRow, err := m.NewRow(mapperInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: nil, + new: model, + rowUpdate2: &rowUpdate2{ + Insert: &resultRow, + New: &resultRow, + Old: nil, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + new := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + + delta := ovsdb.NewRow() + changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta) + if err != nil { + return err + } + if !changed { + return nil + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: new, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + schema := dbModel.Schema.Table(table) + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + new := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + + differences := make(map[string]interface{}) + for _, mutation := range op.Mutations { + column := schema.Column(mutation.Column) + if column == nil { + continue + } + + var nativeValue interface{} + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) { + nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value) + if err != nil { + return err + } + } else { + nativeValue, err = ovsdb.OvsToNative(column, mutation.Value) + if err != nil { + return err + } + } + + if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil { + return err + } + + current, err := newInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + + newValue, diff := mutate(current, mutation.Mutator, nativeValue) + if err := newInfo.SetField(mutation.Column, newValue); err != nil { + return err + } + + old, err := oldInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + diff, changed := mergeDifference(old, differences[mutation.Column], diff) + if changed { + differences[mutation.Column] = diff + } else { + delete(differences, mutation.Column) + } + } + + if len(differences) == 0 { + return nil + } + + delta := ovsdb.NewRow() + for column, diff := range differences { + colSchema := schema.Column(column) + diffOvs, err := ovsdb.NativeToOvs(colSchema, diff) + if err != nil { + return err + } + delta[column] = diffOvs + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: new, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + + info, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(info) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: nil, + rowUpdate2: &rowUpdate2{ + Delete: &ovsdb.Row{}, + Old: &oldRow, + }, + }, + ) + + return err +} + +func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, update, modify, false) +} + +func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, modify, nil, true) +} + +// updateOrModifyModel updates info about a model with a given row containing +// the change. The change row itself can be interpreted as an update or a +// modify. If the change is an update and a modify row is provided, it will be +// filled with the modify data. +func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) { + schema := dbModel.Schema.Table(table) + var changed bool + + for column, updateOvs := range *changeRow { + colSchema := schema.Column(column) + if colSchema == nil { + // ignore columns we don't know about in our schema + continue + } + + currentNative, err := info.FieldByColumn(column) + if err != nil { + return false, err + } + + updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs) + if err != nil { + return false, err + } + + if isModify { + differenceNative, isDifferent := applyDifference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + err = info.SetField(column, differenceNative) + if err != nil { + return false, err + } + } else { + differenceNative, isDifferent := difference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + if isDifferent && modifyRow != nil { + deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative) + if err != nil { + return false, err + } + (*modifyRow)[column] = deltaOvs + } + err = info.SetField(column, updateNative) + if err != nil { + return false, err + } + } + } + + return changed, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go new file mode 100644 index 000000000..d0135c488 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go @@ -0,0 +1,570 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const BridgeTable = "Bridge" + +type ( + BridgeFailMode = string + BridgeProtocols = string +) + +var ( + BridgeFailModeStandalone BridgeFailMode = "standalone" + BridgeFailModeSecure BridgeFailMode = "secure" + BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10" + BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11" + BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12" + BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13" + BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14" + BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15" +) + +// Bridge defines an object in Bridge table +type Bridge struct { + UUID string `ovsdb:"_uuid"` + AutoAttach *string `ovsdb:"auto_attach"` + Controller []string `ovsdb:"controller"` + DatapathID *string `ovsdb:"datapath_id"` + DatapathType string `ovsdb:"datapath_type"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FailMode *BridgeFailMode `ovsdb:"fail_mode"` + FloodVLANs []int `ovsdb:"flood_vlans"` + FlowTables map[int]string `ovsdb:"flow_tables"` + IPFIX *string `ovsdb:"ipfix"` + McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` + Mirrors []string `ovsdb:"mirrors"` + Name string `ovsdb:"name"` + Netflow *string `ovsdb:"netflow"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + Protocols []BridgeProtocols `ovsdb:"protocols"` + RSTPEnable bool `ovsdb:"rstp_enable"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Sflow *string `ovsdb:"sflow"` + Status map[string]string `ovsdb:"status"` + STPEnable bool `ovsdb:"stp_enable"` +} + +func (a *Bridge) GetUUID() string { + return a.UUID +} + +func (a *Bridge) GetAutoAttach() *string { + return a.AutoAttach +} + +func copyBridgeAutoAttach(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeAutoAttach(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetController() []string { + return a.Controller +} + +func copyBridgeController(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeController(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetDatapathID() *string { + return a.DatapathID +} + +func copyBridgeDatapathID(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeDatapathID(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetDatapathType() string { + return a.DatapathType +} + +func (a *Bridge) GetDatapathVersion() string { + return a.DatapathVersion +} + +func (a *Bridge) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBridgeExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetFailMode() *BridgeFailMode { + return a.FailMode +} + +func copyBridgeFailMode(a *BridgeFailMode) *BridgeFailMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeFailMode(a, b *BridgeFailMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetFloodVLANs() []int { + return a.FloodVLANs +} + +func copyBridgeFloodVLANs(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalBridgeFloodVLANs(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetFlowTables() map[int]string { + return a.FlowTables +} + +func copyBridgeFlowTables(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeFlowTables(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetIPFIX() *string { + return a.IPFIX +} + +func copyBridgeIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetMcastSnoopingEnable() bool { + return a.McastSnoopingEnable +} + +func (a *Bridge) GetMirrors() []string { + return a.Mirrors +} + +func copyBridgeMirrors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeMirrors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetName() string { + return a.Name +} + +func (a *Bridge) GetNetflow() *string { + return a.Netflow +} + +func copyBridgeNetflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeNetflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyBridgeOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetPorts() []string { + return a.Ports +} + +func copyBridgePorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgePorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetProtocols() []BridgeProtocols { + return a.Protocols +} + +func copyBridgeProtocols(a []BridgeProtocols) []BridgeProtocols { + if a == nil { + return nil + } + b := make([]BridgeProtocols, len(a)) + copy(b, a) + return b +} + +func equalBridgeProtocols(a, b []BridgeProtocols) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetRSTPEnable() bool { + return a.RSTPEnable +} + +func (a *Bridge) GetRSTPStatus() map[string]string { + return a.RSTPStatus +} + +func copyBridgeRSTPStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeRSTPStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSflow() *string { + return a.Sflow +} + +func copyBridgeSflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeSflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetStatus() map[string]string { + return a.Status +} + +func copyBridgeStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSTPEnable() bool { + return a.STPEnable +} + +func (a *Bridge) DeepCopyInto(b *Bridge) { + *b = *a + b.AutoAttach = copyBridgeAutoAttach(a.AutoAttach) + b.Controller = copyBridgeController(a.Controller) + b.DatapathID = copyBridgeDatapathID(a.DatapathID) + b.ExternalIDs = copyBridgeExternalIDs(a.ExternalIDs) + b.FailMode = copyBridgeFailMode(a.FailMode) + b.FloodVLANs = copyBridgeFloodVLANs(a.FloodVLANs) + b.FlowTables = copyBridgeFlowTables(a.FlowTables) + b.IPFIX = copyBridgeIPFIX(a.IPFIX) + b.Mirrors = copyBridgeMirrors(a.Mirrors) + b.Netflow = copyBridgeNetflow(a.Netflow) + b.OtherConfig = copyBridgeOtherConfig(a.OtherConfig) + b.Ports = copyBridgePorts(a.Ports) + b.Protocols = copyBridgeProtocols(a.Protocols) + b.RSTPStatus = copyBridgeRSTPStatus(a.RSTPStatus) + b.Sflow = copyBridgeSflow(a.Sflow) + b.Status = copyBridgeStatus(a.Status) +} + +func (a *Bridge) DeepCopy() *Bridge { + b := new(Bridge) + a.DeepCopyInto(b) + return b +} + +func (a *Bridge) CloneModelInto(b model.Model) { + c := b.(*Bridge) + a.DeepCopyInto(c) +} + +func (a *Bridge) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Bridge) Equals(b *Bridge) bool { + return a.UUID == b.UUID && + equalBridgeAutoAttach(a.AutoAttach, b.AutoAttach) && + equalBridgeController(a.Controller, b.Controller) && + equalBridgeDatapathID(a.DatapathID, b.DatapathID) && + a.DatapathType == b.DatapathType && + a.DatapathVersion == b.DatapathVersion && + equalBridgeExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalBridgeFailMode(a.FailMode, b.FailMode) && + equalBridgeFloodVLANs(a.FloodVLANs, b.FloodVLANs) && + equalBridgeFlowTables(a.FlowTables, b.FlowTables) && + equalBridgeIPFIX(a.IPFIX, b.IPFIX) && + a.McastSnoopingEnable == b.McastSnoopingEnable && + equalBridgeMirrors(a.Mirrors, b.Mirrors) && + a.Name == b.Name && + equalBridgeNetflow(a.Netflow, b.Netflow) && + equalBridgeOtherConfig(a.OtherConfig, b.OtherConfig) && + equalBridgePorts(a.Ports, b.Ports) && + equalBridgeProtocols(a.Protocols, b.Protocols) && + a.RSTPEnable == b.RSTPEnable && + equalBridgeRSTPStatus(a.RSTPStatus, b.RSTPStatus) && + equalBridgeSflow(a.Sflow, b.Sflow) && + equalBridgeStatus(a.Status, b.Status) && + a.STPEnable == b.STPEnable +} + +func (a *Bridge) EqualsModel(b model.Model) bool { + c := b.(*Bridge) + return a.Equals(c) +} + +var _ model.CloneableModel = &Bridge{} +var _ model.ComparableModel = &Bridge{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go new file mode 100644 index 000000000..57a26e805 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go @@ -0,0 +1,143 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" + +// FlowSampleCollectorSet defines an object in Flow_Sample_Collector_Set table +type FlowSampleCollectorSet struct { + UUID string `ovsdb:"_uuid"` + Bridge string `ovsdb:"bridge"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + IPFIX *string `ovsdb:"ipfix"` + LocalGroupID *int `ovsdb:"local_group_id"` +} + +func (a *FlowSampleCollectorSet) GetUUID() string { + return a.UUID +} + +func (a *FlowSampleCollectorSet) GetBridge() string { + return a.Bridge +} + +func (a *FlowSampleCollectorSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyFlowSampleCollectorSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalFlowSampleCollectorSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *FlowSampleCollectorSet) GetID() int { + return a.ID +} + +func (a *FlowSampleCollectorSet) GetIPFIX() *string { + return a.IPFIX +} + +func copyFlowSampleCollectorSetIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) GetLocalGroupID() *int { + return a.LocalGroupID +} + +func copyFlowSampleCollectorSetLocalGroupID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetLocalGroupID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) DeepCopyInto(b *FlowSampleCollectorSet) { + *b = *a + b.ExternalIDs = copyFlowSampleCollectorSetExternalIDs(a.ExternalIDs) + b.IPFIX = copyFlowSampleCollectorSetIPFIX(a.IPFIX) + b.LocalGroupID = copyFlowSampleCollectorSetLocalGroupID(a.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) DeepCopy() *FlowSampleCollectorSet { + b := new(FlowSampleCollectorSet) + a.DeepCopyInto(b) + return b +} + +func (a *FlowSampleCollectorSet) CloneModelInto(b model.Model) { + c := b.(*FlowSampleCollectorSet) + a.DeepCopyInto(c) +} + +func (a *FlowSampleCollectorSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FlowSampleCollectorSet) Equals(b *FlowSampleCollectorSet) bool { + return a.UUID == b.UUID && + a.Bridge == b.Bridge && + equalFlowSampleCollectorSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + equalFlowSampleCollectorSetIPFIX(a.IPFIX, b.IPFIX) && + equalFlowSampleCollectorSetLocalGroupID(a.LocalGroupID, b.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) EqualsModel(b model.Model) bool { + c := b.(*FlowSampleCollectorSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &FlowSampleCollectorSet{} +var _ model.ComparableModel = &FlowSampleCollectorSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go new file mode 100644 index 000000000..c5aabca46 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go @@ -0,0 +1,3 @@ +package ovsdb + +//go:generate modelgen --extended -p ovsdb -o . vswitch.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go new file mode 100644 index 000000000..7ba2329e3 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go @@ -0,0 +1,11 @@ +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +// ObservDatabaseModel returns the DatabaseModel object to be used by observability library. +func ObservDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ + "Bridge": &Bridge{}, + "Flow_Sample_Collector_Set": &FlowSampleCollectorSet{}, + }) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go new file mode 100644 index 000000000..5ff1587a6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go @@ -0,0 +1,118 @@ +package sampledecoder + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "k8s.io/klog/v2/textlogger" +) + +const OVSDBTimeout = 10 * time.Second + +func NewNBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := nbdb.FullDatabaseModel() + if err != nil { + return nil, err + } + + // define client indexes for ACLs to quickly find them by sample_new or sample_est column. + dbModel.SetIndexes(map[string][]model.ClientIndex{ + nbdb.ACLTable: { + {Columns: []model.ColumnKey{{Column: "sample_new"}}}, + {Columns: []model.ColumnKey{{Column: "sample_est"}}}, + }, + }) + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&nbdb.ACL{}), + client.WithTable(&nbdb.Sample{}), + ), + ) + + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +func NewOVSDBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := ovsdb.ObservDatabaseModel() + if err != nil { + return nil, err + } + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&ovsdb.FlowSampleCollectorSet{}), + client.WithTable(&ovsdb.Bridge{}), + ), + ) + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// newClient creates a new client object given the provided config +// the stopCh is required to ensure the goroutine for ssl cert +// update is not leaked +func newClient(cfg dbConfig, dbModel model.ClientDBModel) (client.Client, error) { + const connectTimeout = OVSDBTimeout * 2 + const inactivityTimeout = OVSDBTimeout * 18 + // Don't log anything from the libovsdb client by default + config := textlogger.NewConfig(textlogger.Verbosity(0)) + logger := textlogger.NewLogger(config) + + options := []client.Option{ + // Reading and parsing the DB after reconnect at scale can (unsurprisingly) + // take longer than a normal ovsdb operation. Give it a bit more time, so + // we don't time out and enter a reconnect loop. In addition, it also enables + // inactivity check on the ovsdb connection. + client.WithInactivityCheck(inactivityTimeout, connectTimeout, &backoff.ZeroBackOff{}), + client.WithLeaderOnly(true), + client.WithLogger(&logger), + } + + for _, endpoint := range strings.Split(cfg.address, ",") { + options = append(options, client.WithEndpoint(endpoint)) + } + if cfg.scheme != "unix" { + return nil, fmt.Errorf("only unix scheme is supported for now") + } + + client, err := client.NewOVSDBClient(dbModel, options...) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), connectTimeout) + defer cancel() + err = client.Connect(ctx) + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go new file mode 100644 index 000000000..0bf419bd5 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go @@ -0,0 +1,290 @@ +package sampledecoder + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" +) + +type SampleDecoder struct { + nbClient client.Client + ovsdbClient client.Client + cleanupCollectors []int +} + +type dbConfig struct { + address string + scheme string +} + +type Cookie struct { + ObsDomainID uint32 + ObsPointID uint32 +} + +const CookieSize = 8 +const bridgeName = "br-int" + +var SampleEndian = getEndian() + +func getEndian() binary.ByteOrder { + // Use network bite order + return binary.BigEndian +} + +// getLocalNBClient only supports connecting to nbdb via unix socket. +// address is the path to the unix socket, e.g. "/var/run/ovn/ovnnb_db.sock" +func getLocalNBClient(ctx context.Context, address string) (client.Client, error) { + config := dbConfig{ + address: "unix:" + address, + scheme: "unix", + } + libovsdbOvnNBClient, err := NewNBClientWithConfig(ctx, config) + if err != nil { + return nil, fmt.Errorf("error creating libovsdb client: %w ", err) + } + return libovsdbOvnNBClient, nil +} + +func getLocalOVSDBClient(ctx context.Context) (client.Client, error) { + config := dbConfig{ + address: "unix:/var/run/openvswitch/db.sock", + scheme: "unix", + } + return NewOVSDBClientWithConfig(ctx, config) +} + +// NewSampleDecoderWithDefaultCollector creates a new SampleDecoder, initializes the OVSDB client and adds the default collector. +// It allows to set the groupID and ownerName for the created default collector. +// If the default collector already exists with a different owner or different groupID an error will be returned. +// Shutdown should be called to clean up the collector from the OVSDB. +func NewSampleDecoderWithDefaultCollector(ctx context.Context, nbdbSocketPath string, ownerName string, groupID int) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + ovsdbClient, err := getLocalOVSDBClient(ctx) + if err != nil { + return nil, err + } + decoder := &SampleDecoder{ + nbClient: nbClient, + ovsdbClient: ovsdbClient, + } + err = decoder.AddCollector(observability.DefaultObservabilityCollectorSetID, groupID, ownerName) + if err != nil { + return nil, err + } + decoder.cleanupCollectors = append(decoder.cleanupCollectors, observability.DefaultObservabilityCollectorSetID) + return decoder, nil +} + +// NewSampleDecoder creates a new SampleDecoder and initializes the OVSDB client. +func NewSampleDecoder(ctx context.Context, nbdbSocketPath string) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + return &SampleDecoder{ + nbClient: nbClient, + }, nil +} + +func (d *SampleDecoder) Shutdown() { + for _, collectorID := range d.cleanupCollectors { + err := d.DeleteCollector(collectorID) + if err != nil { + fmt.Printf("Error deleting collector with ID=%d: %v", collectorID, err) + } + } +} + +func getObservAppID(obsDomainID uint32) uint8 { + return uint8(obsDomainID >> 24) +} + +// findACLBySample relies on the client index based on sample_new and sample_est column. +func findACLBySample(nbClient client.Client, acl *nbdb.ACL) ([]*nbdb.ACL, error) { + found := []*nbdb.ACL{} + err := nbClient.Where(acl).List(context.Background(), &found) + return found, err +} + +func (d *SampleDecoder) DecodeCookieIDs(obsDomainID, obsPointID uint32) (string, error) { + // Find sample using obsPointID + sample, err := libovsdbops.FindSample(d.nbClient, int(obsPointID)) + if err != nil || sample == nil { + return "", fmt.Errorf("find sample failed: %w", err) + } + // find db object using observ application ID + // Since ACL is indexed both by sample_new and sample_est, when searching by one of them, + // we need to make sure the other one will not match. + // nil is a valid index value, therefore we have to use non-existing UUID. + wrongUUID := "wrongUUID" + var dbObj interface{} + switch getObservAppID(obsDomainID) { + case observability.ACLNewTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &sample.UUID, SampleEst: &wrongUUID}) + if err != nil { + return "", fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return "", fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + case observability.ACLEstTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &wrongUUID, SampleEst: &sample.UUID}) + if err != nil { + return "", fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return "", fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + default: + return "", fmt.Errorf("unknown app ID: %d", getObservAppID(obsDomainID)) + } + msg := getMessage(dbObj) + if msg == "" { + return "", fmt.Errorf("failed to get message for db object %v", dbObj) + } + return msg, nil +} + +func getMessage(dbObj interface{}) string { + switch o := dbObj.(type) { + case *nbdb.ACL: + var action string + switch o.Action { + case nbdb.ACLActionAllow, nbdb.ACLActionAllowRelated, nbdb.ACLActionAllowStateless: + action = "Allowed" + case nbdb.ACLActionDrop: + action = "Dropped" + case nbdb.ACLActionPass: + action = "Delegated to network policy" + default: + action = "Action " + o.Action + } + actor := o.ExternalIDs[libovsdbops.OwnerTypeKey.String()] + var msg string + switch actor { + case libovsdbops.AdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("admin network policy %s, direction %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()], o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.BaselineAdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("baseline admin network policy %s, direction %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()], o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.MulticastNamespaceOwnerType: + msg = fmt.Sprintf("multicast in namespace %s, direction %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()], o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.MulticastClusterOwnerType: + msg = fmt.Sprintf("cluster multicast policy, direction %s", o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.NetpolNodeOwnerType: + msg = "default allow from local node policy, direction ingress" + case libovsdbops.NetworkPolicyOwnerType: + msg = fmt.Sprintf("network policy %s, direction %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()], o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.NetpolNamespaceOwnerType: + msg = fmt.Sprintf("network policies isolation in namespace %s, direction %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()], o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()]) + case libovsdbops.EgressFirewallOwnerType: + msg = fmt.Sprintf("egress firewall in namespace %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()]) + case libovsdbops.UDNIsolationOwnerType: + msg = fmt.Sprintf("UDN isolation of type %s", o.ExternalIDs[libovsdbops.ObjectNameKey.String()]) + } + return fmt.Sprintf("%s by %s", action, msg) + default: + return "" + } +} + +func (d *SampleDecoder) DecodeCookieBytes(cookie []byte) (string, error) { + if uint64(len(cookie)) != CookieSize { + return "", fmt.Errorf("invalid cookie size: %d", len(cookie)) + } + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie), SampleEndian, &c) + if err != nil { + return "", err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func (d *SampleDecoder) DecodeCookie8Bytes(cookie [8]byte) (string, error) { + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie[:]), SampleEndian, &c) + if err != nil { + return "", err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func getGroupID(groupID *int) string { + if groupID == nil { + return "unset" + } + return fmt.Sprintf("%d", *groupID) +} + +func (d *SampleDecoder) AddCollector(collectorID, groupID int, ownerName string) error { + if d.ovsdbClient == nil { + return fmt.Errorf("OVSDB client is not initialized") + } + // find existing collector with the same ID + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding existing collector: %w", err) + } + if len(collectors) > 0 && (collectors[0].ExternalIDs["owner"] != ownerName || + collectors[0].LocalGroupID == nil || *collectors[0].LocalGroupID != groupID) { + return fmt.Errorf("requested collector with id=%v already exists "+ + "with the external_ids=%+v, local_group_id=%v", collectorID, collectors[0].ExternalIDs["owner"], getGroupID(collectors[0].LocalGroupID)) + } + + // find br-int UUID to attach collector + bridges := []*ovsdb.Bridge{} + err = d.ovsdbClient.WhereCache(func(item *ovsdb.Bridge) bool { + return item.Name == bridgeName + }).List(context.Background(), &bridges) + if err != nil || len(bridges) != 1 { + return fmt.Errorf("failed finding br-int: %w", err) + } + + ops, err := d.ovsdbClient.Create(&ovsdb.FlowSampleCollectorSet{ + ID: collectorID, + Bridge: bridges[0].UUID, + LocalGroupID: &groupID, + ExternalIDs: map[string]string{"owner": ownerName}, + }) + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + _, err = d.ovsdbClient.Transact(context.Background(), ops...) + return err +} + +func (d *SampleDecoder) DeleteCollector(collectorID int) error { + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding exisiting collector: %w", err) + } + if len(collectors) != 1 { + return fmt.Errorf("expected only 1 collector with given id") + } + + ops, err := d.ovsdbClient.Where(collectors[0]).Delete() + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + res, err := d.ovsdbClient.Transact(context.Background(), ops...) + fmt.Println("res: ", res) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go new file mode 100644 index 000000000..442b1a6c7 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go @@ -0,0 +1,89 @@ +package types + +import ( + "net" + + "github.com/containernetworking/cni/pkg/types" +) + +// NetConf is CNI NetConf with DeviceID +type NetConf struct { + types.NetConf + // Role is valid only on L3 / L2 topologies. Not on localnet. + // It allows for using this network to be either secondary or + // primary user defined network for the pod. + // primary user defined networks are used in order to achieve + // native network isolation. + // In order to ensure backwards compatibility, if empty the + // network is considered secondary + Role string `json:"role,omitempty"` + // specifies the OVN topology for this network configuration + // when not specified, by default it is Layer3AttachDefTopoType + Topology string `json:"topology,omitempty"` + // captures net-attach-def name in the form of namespace/name + NADName string `json:"netAttachDefName,omitempty"` + // Network MTU + MTU int `json:"mtu,omitempty"` + // comma-seperated subnet cidr + // for secondary layer3 network, eg. 10.128.0.0/14/23 + // for layer2 and localnet network, eg. 10.1.130.0/24 + Subnets string `json:"subnets,omitempty"` + // comma-seperated list of IPs, expressed in the form of subnets, to be excluded from being allocated for Pod + // valid for layer2 and localnet network topology + // eg. "10.1.130.0/27, 10.1.130.122/32" + ExcludeSubnets string `json:"excludeSubnets,omitempty"` + // join subnet cidr is required for supporting + // services and ingress for user defined networks + // in case of dualstack cluster, please do a comma-seperated list + // expected format: + // 1) V4 single stack: "v4CIDR" (eg: "100.65.0.0/16") + // 2) V6 single stack: "v6CIDR" (eg: "fd99::/64") + // 3) dualstack: "v4CIDR,v6CIDR" (eg: "100.65.0.0/16,fd99::/64") + // valid for UDN layer3/layer2 network topology + // default value: 100.65.0.0/16,fd99::/64 if not provided + JoinSubnet string `json:"joinSubnet,omitempty"` + // VLANID, valid in localnet topology network only + VLANID int `json:"vlanID,omitempty"` + // AllowPersistentIPs is valid on both localnet / layer topologies. + // It allows for having IP allocations that outlive the pod for which + // they are originally created - e.g. a KubeVirt VM's migration, or + // restart. + AllowPersistentIPs bool `json:"allowPersistentIPs,omitempty"` + + // PciAddrs in case of using sriov or Auxiliry device name in case of SF + DeviceID string `json:"deviceID,omitempty"` + // LogFile to log all the messages from cni shim binary to + LogFile string `json:"logFile,omitempty"` + // Level is the logging verbosity level + LogLevel string `json:"logLevel,omitempty"` + // LogFileMaxSize is the maximum size in bytes of the logfile + // before it gets rolled. + LogFileMaxSize int `json:"logfile-maxsize"` + // LogFileMaxBackups represents the maximum number of + // old log files to retain + LogFileMaxBackups int `json:"logfile-maxbackups"` + // LogFileMaxAge represents the maximum number + // of days to retain old log files + LogFileMaxAge int `json:"logfile-maxage"` + // Runtime arguments passed by the NPWG implementation (e.g. multus) + RuntimeConfig struct { + // see https://github.com/k8snetworkplumbingwg/device-info-spec + CNIDeviceInfoFile string `json:"CNIDeviceInfoFile,omitempty"` + } `json:"runtimeConfig,omitempty"` +} + +// NetworkSelectionElement represents one element of the JSON format +// Network Attachment Selection Annotation as described in section 4.1.2 +// of the CRD specification. +type NetworkSelectionElement struct { + // Name contains the name of the Network object this element selects + Name string `json:"name"` + // Namespace contains the optional namespace that the network referenced + // by Name exists in + Namespace string `json:"namespace,omitempty"` + // MacRequest contains an optional requested MAC address for this + // network attachment + MacRequest string `json:"mac,omitempty"` + // GatewayRequest contains default route IP address for the pod + GatewayRequest []net.IP `json:"default-route,omitempty"` +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go new file mode 100644 index 000000000..3d935c5c6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go @@ -0,0 +1,173 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +var ErrorAttachDefNotOvnManaged = errors.New("net-attach-def not managed by OVN") +var ErrorChainingNotSupported = errors.New("CNI plugin chaining is not supported") + +// WriteCNIConfig writes a CNI JSON config file to directory given by global config +// if the file doesn't already exist, or is different than the content that would +// be written. +func WriteCNIConfig() error { + netConf := &ovncnitypes.NetConf{ + NetConf: types.NetConf{ + CNIVersion: "0.4.0", + Name: "ovn-kubernetes", + Type: CNI.Plugin, + }, + LogFile: Logging.CNIFile, + LogLevel: fmt.Sprintf("%d", Logging.Level), + LogFileMaxSize: Logging.LogFileMaxSize, + LogFileMaxBackups: Logging.LogFileMaxBackups, + LogFileMaxAge: Logging.LogFileMaxAge, + } + + newBytes, err := json.Marshal(netConf) + if err != nil { + return fmt.Errorf("failed to marshal CNI config JSON: %v", err) + } + + confFile := filepath.Join(CNI.ConfDir, CNIConfFileName) + if existingBytes, err := os.ReadFile(confFile); err == nil { + if bytes.Equal(newBytes, existingBytes) { + // No changes; do nothing + return nil + } + } + + // Install the CNI config file after all initialization is done + // MkdirAll() returns no error if the path already exists + if err := os.MkdirAll(CNI.ConfDir, os.ModeDir); err != nil { + return err + } + + var f *os.File + f, err = os.CreateTemp(CNI.ConfDir, "ovnkube-") + if err != nil { + return err + } + + if _, err := f.Write(newBytes); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + + return os.Rename(f.Name(), confFile) +} + +// ParseNetConf parses config in NAD spec +func ParseNetConf(bytes []byte) (*ovncnitypes.NetConf, error) { + var netconf *ovncnitypes.NetConf + + confList, err := libcni.ConfListFromBytes(bytes) + if err == nil { + netconf, err = parseNetConfList(confList) + if err == nil { + if _, singleErr := parseNetConfSingle(bytes); singleErr == nil { + return nil, fmt.Errorf("CNI config cannot have both a plugin list and a single config") + } + } + } else { + netconf, err = parseNetConfSingle(bytes) + } + + if err != nil { + return nil, err + } + + if netconf.Topology == "" { + // NAD of default network + netconf.Name = ovntypes.DefaultNetworkName + } + + return netconf, nil +} + +func parseNetConfSingle(bytes []byte) (*ovncnitypes.NetConf, error) { + netconf := &ovncnitypes.NetConf{MTU: Default.MTU} + err := json.Unmarshal(bytes, &netconf) + if err != nil { + return nil, err + } + + // skip non-OVN NAD + if netconf.Type != "ovn-k8s-cni-overlay" { + return nil, ErrorAttachDefNotOvnManaged + } + + err = ValidateNetConfNameFields(netconf) + if err != nil { + return nil, err + } + + return netconf, nil +} + +func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, error) { + if len(confList.Plugins) > 1 { + return nil, ErrorChainingNotSupported + } + + netconf := &ovncnitypes.NetConf{MTU: Default.MTU} + if err := json.Unmarshal(confList.Plugins[0].Bytes, netconf); err != nil { + return nil, err + } + + // skip non-OVN NAD + if netconf.Type != "ovn-k8s-cni-overlay" { + return nil, ErrorAttachDefNotOvnManaged + } + + netconf.Name = confList.Name + netconf.CNIVersion = confList.CNIVersion + + if err := ValidateNetConfNameFields(netconf); err != nil { + return nil, err + } + + return netconf, nil +} + +func ValidateNetConfNameFields(netconf *ovncnitypes.NetConf) error { + if netconf.Topology != "" { + if netconf.NADName == "" { + return fmt.Errorf("missing NADName in secondary network netconf %s", netconf.Name) + } + // "ovn-kubernetes" network name is reserved for later + if netconf.Name == "" || netconf.Name == ovntypes.DefaultNetworkName || netconf.Name == "ovn-kubernetes" { + return fmt.Errorf("invalid name in in secondary network netconf (%s)", netconf.Name) + } + } + + return nil +} + +// ReadCNIConfig unmarshals a CNI JSON config into an NetConf structure +func ReadCNIConfig(bytes []byte) (*ovncnitypes.NetConf, error) { + conf, err := ParseNetConf(bytes) + if err != nil { + return nil, err + } + if conf.RawPrevResult != nil { + if err := version.ParsePrevResult(&conf.NetConf); err != nil { + return nil, err + } + } + return conf, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go new file mode 100644 index 000000000..6a54509af --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go @@ -0,0 +1,2662 @@ +package config + +import ( + "flag" + "fmt" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/urfave/cli/v2" + gcfg "gopkg.in/gcfg.v1" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog/v2" + + kexec "k8s.io/utils/exec" + utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +// DefaultEncapPort number used if not supplied +const DefaultEncapPort = 6081 + +const DefaultAPIServer = "http://localhost:8443" + +// Default IANA-assigned UDP port number for VXLAN +const DefaultVXLANPort = 4789 + +const DefaultDBTxnTimeout = time.Second * 100 + +// The following are global config parameters that other modules may access directly +var ( + // Build information. Populated at build-time. + // commit ID used to build ovn-kubernetes + Commit = "" + // branch used to build ovn-kubernetes + Branch = "" + // ovn-kubernetes build user + BuildUser = "" + // ovn-kubernetes build date + BuildDate = "" + // ovn-kubernetes version, to be changed with every release + Version = "1.0.0" + // version of the go runtime used to compile ovn-kubernetes + GoVersion = runtime.Version() + // os and architecture used to build ovn-kubernetes + OSArch = fmt.Sprintf("%s %s", runtime.GOOS, runtime.GOARCH) + + // ovn-kubernetes cni config file name + CNIConfFileName = "10-ovn-kubernetes.conf" + + // Default holds parsed config file parameters and command-line overrides + Default = DefaultConfig{ + MTU: 1400, + ConntrackZone: 64000, + EncapType: "geneve", + EncapIP: "", + EncapPort: DefaultEncapPort, + InactivityProbe: 100000, // in Milliseconds + OpenFlowProbe: 180, // in Seconds + OfctrlWaitBeforeClear: 0, // in Milliseconds + MonitorAll: true, + OVSDBTxnTimeout: DefaultDBTxnTimeout, + LFlowCacheEnable: true, + RawClusterSubnets: "10.128.0.0/14/23", + Zone: types.OvnDefaultZone, + } + + // Logging holds logging-related parsed config file parameters and command-line overrides + Logging = LoggingConfig{ + File: "", // do not log to a file by default + CNIFile: "", + LibovsdbFile: "", + Level: 4, + LogFileMaxSize: 100, // Size in Megabytes + LogFileMaxBackups: 5, + LogFileMaxAge: 5, //days + ACLLoggingRateLimit: 20, + } + + // Monitoring holds monitoring-related parsed config file parameters and command-line overrides + Monitoring = MonitoringConfig{ + RawNetFlowTargets: "", + RawSFlowTargets: "", + RawIPFIXTargets: "", + } + + // IPFIX holds IPFIX-related performance configuration options. It requires that the + // IPFIXTargets value of the Monitoring section contains at least one endpoint. + IPFIX = IPFIXConfig{ + Sampling: 400, + CacheActiveTimeout: 60, + CacheMaxFlows: 0, + } + + // CNI holds CNI-related parsed config file parameters and command-line overrides + CNI = CNIConfig{ + ConfDir: "/etc/cni/net.d", + Plugin: "ovn-k8s-cni-overlay", + } + + // Kubernetes holds Kubernetes-related parsed config file parameters and command-line overrides + Kubernetes = KubernetesConfig{ + APIServer: DefaultAPIServer, + RawServiceCIDRs: "172.16.1.0/24", + OVNConfigNamespace: "ovn-kubernetes", + HostNetworkNamespace: "", + PlatformType: "", + DNSServiceNamespace: "kube-system", + DNSServiceName: "kube-dns", + // By default, use a short lifetime length for certificates to ensure that the automatic rotation works well, + // might revisit in the future to use a more sensible value + CertDuration: 10 * time.Minute, + } + + // Metrics holds Prometheus metrics-related parameters. + Metrics MetricsConfig + + // OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides + OVNKubernetesFeature = OVNKubernetesFeatureConfig{ + EgressIPReachabiltyTotalTimeout: 1, + } + + // OvnNorth holds northbound OVN database client and server authentication and location details + OvnNorth OvnAuthConfig + + // OvnSouth holds southbound OVN database client and server authentication and location details + OvnSouth OvnAuthConfig + + // Gateway holds node gateway-related parsed config file parameters and command-line overrides + Gateway = GatewayConfig{ + V4JoinSubnet: "100.64.0.0/16", + V6JoinSubnet: "fd98::/64", + V4MasqueradeSubnet: "169.254.169.0/29", + V6MasqueradeSubnet: "fd69::/125", + MasqueradeIPs: MasqueradeIPsConfig{ + V4OVNMasqueradeIP: net.ParseIP("169.254.169.1"), + V6OVNMasqueradeIP: net.ParseIP("fd69::1"), + V4HostMasqueradeIP: net.ParseIP("169.254.169.2"), + V6HostMasqueradeIP: net.ParseIP("fd69::2"), + V4HostETPLocalMasqueradeIP: net.ParseIP("169.254.169.3"), + V6HostETPLocalMasqueradeIP: net.ParseIP("fd69::3"), + V4DummyNextHopMasqueradeIP: net.ParseIP("169.254.169.4"), + V6DummyNextHopMasqueradeIP: net.ParseIP("fd69::4"), + V4OVNServiceHairpinMasqueradeIP: net.ParseIP("169.254.169.5"), + V6OVNServiceHairpinMasqueradeIP: net.ParseIP("fd69::5"), + }, + } + + // Set Leaderelection config values based on + // https://github.com/openshift/enhancements/blame/84e894ead7b188a1013556e0ba6973b8463995f1/CONVENTIONS.md#L183 + + // MasterHA holds master HA related config options. + MasterHA = HAConfig{ + ElectionRetryPeriod: 26, + ElectionRenewDeadline: 107, + ElectionLeaseDuration: 137, + } + + // ClusterMgrHA holds cluster manager HA related config options. + ClusterMgrHA = HAConfig{ + ElectionRetryPeriod: 26, + ElectionRenewDeadline: 107, + ElectionLeaseDuration: 137, + } + + // HybridOverlay holds hybrid overlay feature config options. + HybridOverlay = HybridOverlayConfig{ + VXLANPort: DefaultVXLANPort, + } + + // UnprivilegedMode allows ovnkube-node to run without SYS_ADMIN capability, by performing interface setup in the CNI plugin + UnprivilegedMode bool + + // EnableMulticast enables multicast support between the pods within the same namespace + EnableMulticast bool + + // IPv4Mode captures whether we are using IPv4 for OVN logical topology. (ie, single-stack IPv4 or dual-stack) + IPv4Mode bool + + // IPv6Mode captures whether we are using IPv6 for OVN logical topology. (ie, single-stack IPv6 or dual-stack) + IPv6Mode bool + + // OvnKubeNode holds ovnkube-node parsed config file parameters and command-line overrides + OvnKubeNode = OvnKubeNodeConfig{ + Mode: types.NodeModeFull, + } + + ClusterManager = ClusterManagerConfig{ + V4TransitSwitchSubnet: "100.88.0.0/16", + V6TransitSwitchSubnet: "fd97::/64", + } +) + +const ( + kubeServiceAccountPath string = "/var/run/secrets/kubernetes.io/serviceaccount/" + kubeServiceAccountFileToken string = "token" + kubeServiceAccountFileCACert string = "ca.crt" +) + +// DefaultConfig holds parsed config file parameters and command-line overrides +type DefaultConfig struct { + // MTU value used for the overlay networks. + MTU int `gcfg:"mtu"` + // RoutableMTU is the maximum routable MTU between nodes, used to facilitate + // an MTU migration procedure where different nodes might be using different + // MTU values + RoutableMTU int `gcfg:"routable-mtu"` + // ConntrackZone affects only the gateway nodes, This value is used to track connections + // that are initiated from the pods so that the reverse connections go back to the pods. + // This represents the conntrack zone used for the conntrack flow rules. + ConntrackZone int `gcfg:"conntrack-zone"` + // HostMasqConntrackZone is an unexposed config with the value of ConntrackZone+1 + HostMasqConntrackZone int + // OVNMasqConntrackZone is an unexposed config with the value of ConntrackZone+2 + OVNMasqConntrackZone int + // HostNodePortCTZone is an unexposed config with the value of ConntrackZone+3 + HostNodePortConntrackZone int + // ReassemblyConntrackZone is an unexposed config with the value of ConntrackZone+4 + ReassemblyConntrackZone int + // EncapType value defines the encapsulation protocol to use to transmit packets between + // hypervisors. By default the value is 'geneve' + EncapType string `gcfg:"encap-type"` + // The IP address of the encapsulation endpoint. If not specified, the IP address the + // NodeName resolves to will be used + EncapIP string `gcfg:"encap-ip"` + // The UDP Port of the encapsulation endpoint. If not specified, the IP default port + // of 6081 will be used + EncapPort uint `gcfg:"encap-port"` + // Maximum number of milliseconds of idle time on connection that + // ovn-controller waits before it will send a connection health probe. + InactivityProbe int `gcfg:"inactivity-probe"` + // Maximum number of seconds of idle time on the OpenFlow connection + // that ovn-controller will wait before it sends a connection health probe + OpenFlowProbe int `gcfg:"openflow-probe"` + // Maximum number of milliseconds that ovn-controller waits before clearing existing flows + // during start up, to make sure the initial flow compute is complete and avoid data plane + // interruptions. + OfctrlWaitBeforeClear int `gcfg:"ofctrl-wait-before-clear"` + // The boolean flag indicates if ovn-controller should monitor all data in SB DB + // instead of conditionally monitoring the data relevant to this node only. + // By default monitor-all is enabled. + MonitorAll bool `gcfg:"monitor-all"` + // OVSDBTxnTimeout is the timeout for db transaction, may be useful to increase for high-scale clusters. + // default value is 100 seconds. + OVSDBTxnTimeout time.Duration `gcfg:"db-txn-timeout"` + // The boolean flag indicates if ovn-controller should + // enable/disable the logical flow in-memory cache it uses + // when processing Southbound database logical flow changes. + // By default caching is enabled. + LFlowCacheEnable bool `gcfg:"enable-lflow-cache"` + // Maximum number of logical flow cache entries ovn-controller + // may create when the logical flow cache is enabled. By + // default the size of the cache is unlimited. + LFlowCacheLimit uint `gcfg:"lflow-cache-limit"` + // Maximum number of logical flow cache entries ovn-controller + // may create when the logical flow cache is enabled. By + // default the size of the cache is unlimited. + LFlowCacheLimitKb uint `gcfg:"lflow-cache-limit-kb"` + // RawClusterSubnets holds the unparsed cluster subnets. Should only be + // used inside config module. + RawClusterSubnets string `gcfg:"cluster-subnets"` + // ClusterSubnets holds parsed cluster subnet entries and may be used + // outside the config module. + ClusterSubnets []CIDRNetworkEntry + // EnableUDPAggregation is true if ovn-kubernetes should use UDP Generic Receive + // Offload forwarding to improve the performance of containers that transmit lots + // of small UDP packets by allowing them to be aggregated before passing through + // the kernel network stack. This requires a new-enough kernel (5.15 or RHEL 8.5). + EnableUDPAggregation bool `gcfg:"enable-udp-aggregation"` + + // Zone name to which ovnkube-node/ovnkube-controller belongs to + Zone string `gcfg:"zone"` +} + +// LoggingConfig holds logging-related parsed config file parameters and command-line overrides +type LoggingConfig struct { + // File is the path of the file to log to + File string `gcfg:"logfile"` + // CNIFile is the path of the file for the CNI shim to log to + CNIFile string `gcfg:"cnilogfile"` + // LibovsdbFile is the path of the file for the libovsdb client to log to + LibovsdbFile string `gcfg:"libovsdblogfile"` + // Level is the logging verbosity level + Level int `gcfg:"loglevel"` + // LogFileMaxSize is the maximum size in megabytes of the logfile + // before it gets rolled. + LogFileMaxSize int `gcfg:"logfile-maxsize"` + // LogFileMaxBackups represents the the maximum number of old log files to retain + LogFileMaxBackups int `gcfg:"logfile-maxbackups"` + // LogFileMaxAge represents the maximum number of days to retain old log files + LogFileMaxAge int `gcfg:"logfile-maxage"` + // Logging rate-limiting meter + ACLLoggingRateLimit int `gcfg:"acl-logging-rate-limit"` +} + +// MonitoringConfig holds monitoring-related parsed config file parameters and command-line overrides +type MonitoringConfig struct { + // RawNetFlowTargets holds the unparsed NetFlow targets. Should only be used inside the config module. + RawNetFlowTargets string `gcfg:"netflow-targets"` + // RawSFlowTargets holds the unparsed SFlow targets. Should only be used inside the config module. + RawSFlowTargets string `gcfg:"sflow-targets"` + // RawIPFIXTargets holds the unparsed IPFIX targets. Should only be used inside the config module. + RawIPFIXTargets string `gcfg:"ipfix-targets"` + // NetFlowTargets holds the parsed NetFlow targets and may be used outside the config module. + NetFlowTargets []HostPort + // SFlowTargets holds the parsed SFlow targets and may be used outside the config module. + SFlowTargets []HostPort + // IPFIXTargets holds the parsed IPFIX targets and may be used outside the config module. + IPFIXTargets []HostPort +} + +// IPFIXConfig holds IPFIX-related performance configuration options. It requires that the ipfix-targets +// value of the [monitoring] section contains at least one endpoint. +type IPFIXConfig struct { + // Sampling is an optional integer in range 1 to 4,294,967,295. It holds the rate at which + // packets should be sampled and sent to each target collector. If not specified, defaults to + // 400, which means one out of 400 packets, on average, will be sent to each target collector. + Sampling uint `gcfg:"sampling"` + // CacheActiveTimeout is an optional integer in range 0 to 4,200. It holds the maximum period in + // seconds for which an IPFIX flow record is cached and aggregated before being sent. If not + // specified, defaults to 60. If 0, caching is disabled. + CacheActiveTimeout uint `gcfg:"cache-active-timeout"` + // CacheMaxFlows is an optional integer in range 0 to 4,294,967,295. It holds the maximum number + // of IPFIX flow records that can be cached at a time. If not specified in OVS, defaults to 0 + // (however, this controller defaults it to 60). If 0, caching is disabled. + CacheMaxFlows uint `gcfg:"cache-max-flows"` +} + +// CNIConfig holds CNI-related parsed config file parameters and command-line overrides +type CNIConfig struct { + // ConfDir specifies the CNI config directory in which to write the overlay CNI config file + ConfDir string `gcfg:"conf-dir"` + // Plugin specifies the name of the CNI plugin + Plugin string `gcfg:"plugin"` +} + +// KubernetesConfig holds Kubernetes-related parsed config file parameters and command-line overrides +type KubernetesConfig struct { + BootstrapKubeconfig string `gcfg:"bootstrap-kubeconfig"` + CertDir string `gcfg:"cert-dir"` + CertDuration time.Duration `gcfg:"cert-duration"` + Kubeconfig string `gcfg:"kubeconfig"` + CACert string `gcfg:"cacert"` + CAData []byte + APIServer string `gcfg:"apiserver"` + Token string `gcfg:"token"` + TokenFile string `gcfg:"tokenFile"` + CompatServiceCIDR string `gcfg:"service-cidr"` + RawServiceCIDRs string `gcfg:"service-cidrs"` + ServiceCIDRs []*net.IPNet + OVNConfigNamespace string `gcfg:"ovn-config-namespace"` + OVNEmptyLbEvents bool `gcfg:"ovn-empty-lb-events"` + PodIP string `gcfg:"pod-ip"` // UNUSED + RawNoHostSubnetNodes string `gcfg:"no-hostsubnet-nodes"` + NoHostSubnetNodes labels.Selector + HostNetworkNamespace string `gcfg:"host-network-namespace"` + PlatformType string `gcfg:"platform-type"` + HealthzBindAddress string `gcfg:"healthz-bind-address"` + + // CompatMetricsBindAddress is overridden by the corresponding option in MetricsConfig + CompatMetricsBindAddress string `gcfg:"metrics-bind-address"` + // CompatOVNMetricsBindAddress is overridden by the corresponding option in MetricsConfig + CompatOVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"` + // CompatMetricsEnablePprof is overridden by the corresponding option in MetricsConfig + CompatMetricsEnablePprof bool `gcfg:"metrics-enable-pprof"` + + DNSServiceNamespace string `gcfg:"dns-service-namespace"` + DNSServiceName string `gcfg:"dns-service-name"` +} + +// MetricsConfig holds Prometheus metrics-related parameters. +type MetricsConfig struct { + BindAddress string `gcfg:"bind-address"` + OVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"` + ExportOVSMetrics bool `gcfg:"export-ovs-metrics"` + EnablePprof bool `gcfg:"enable-pprof"` + NodeServerPrivKey string `gcfg:"node-server-privkey"` + NodeServerCert string `gcfg:"node-server-cert"` + // EnableConfigDuration holds the boolean flag to enable OVN-Kubernetes master to monitor OVN-Kubernetes master + // configuration duration and optionally, its application to all nodes + EnableConfigDuration bool `gcfg:"enable-config-duration"` + EnableScaleMetrics bool `gcfg:"enable-scale-metrics"` +} + +// OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides +type OVNKubernetesFeatureConfig struct { + // Admin Network Policy feature is enabled + EnableAdminNetworkPolicy bool `gcfg:"enable-admin-network-policy"` + // EgressIP feature is enabled + EnableEgressIP bool `gcfg:"enable-egress-ip"` + // EgressIP node reachability total timeout in seconds + EgressIPReachabiltyTotalTimeout int `gcfg:"egressip-reachability-total-timeout"` + EnableEgressFirewall bool `gcfg:"enable-egress-firewall"` + EnableEgressQoS bool `gcfg:"enable-egress-qos"` + EnableEgressService bool `gcfg:"enable-egress-service"` + EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"` + EnableMultiNetwork bool `gcfg:"enable-multi-network"` + EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` + EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` + EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` + EnableInterconnect bool `gcfg:"enable-interconnect"` + EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` + EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` + EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` + EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableObservability bool `gcfg:"enable-observability"` +} + +// GatewayMode holds the node gateway mode +type GatewayMode string + +const ( + // GatewayModeDisabled indicates the node gateway mode is disabled + GatewayModeDisabled GatewayMode = "" + // GatewayModeShared indicates OVN shares a gateway interface with the node + GatewayModeShared GatewayMode = "shared" + // GatewayModeLocal indicates OVN creates a local NAT-ed interface for the gateway + GatewayModeLocal GatewayMode = "local" +) + +// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides +type GatewayConfig struct { + // Mode is the gateway mode; if may be either empty (disabled), "shared", or "local" + Mode GatewayMode `gcfg:"mode"` + // Interface is the network interface to use for the gateway in "shared" mode + Interface string `gcfg:"interface"` + // Exgress gateway interface is the optional network interface to use for external gw pods traffic. + EgressGWInterface string `gcfg:"egw-interface"` + // NextHop is the gateway IP address of Interface; will be autodetected if not given + NextHop string `gcfg:"next-hop"` + // VLANID is the option VLAN tag to apply to gateway traffic for "shared" mode + VLANID uint `gcfg:"vlan-id"` + // NodeportEnable sets whether to provide Kubernetes NodePort service or not + NodeportEnable bool `gcfg:"nodeport"` + // DisableSNATMultipleGws sets whether to disable SNAT of egress traffic in namespaces annotated with routing-external-gws + DisableSNATMultipleGWs bool `gcfg:"disable-snat-multiple-gws"` + // V4JoinSubnet to be used in the cluster + V4JoinSubnet string `gcfg:"v4-join-subnet"` + // V6JoinSubnet to be used in the cluster + V6JoinSubnet string `gcfg:"v6-join-subnet"` + // V4MasqueradeSubnet to be used in the cluster + V4MasqueradeSubnet string `gcfg:"v4-masquerade-subnet"` + // V6MasqueradeSubnet to be used in the cluster + V6MasqueradeSubnet string `gcfg:"v6-masquerade-subnet"` + // MasqueradeIps to be allocated from the masquerade subnets to enable host to service traffic + MasqueradeIPs MasqueradeIPsConfig + + // DisablePacketMTUCheck disables adding openflow flows to check packets too large to be + // delivered to OVN due to pod MTU being lower than NIC MTU. Disabling this check will result in southbound packets + // exceeding pod MTU to be dropped by OVN. With this check enabled, ICMP needs frag/packet too big will be sent + // back to the original client + DisablePacketMTUCheck bool `gcfg:"disable-pkt-mtu-check"` + // RouterSubnet is the subnet to be used for the GR external port. auto-detected if not given. + // Must match the the kube node IP address. Currently valid for DPU only. + RouterSubnet string `gcfg:"router-subnet"` + // SingeNode indicates the cluster has only one node + SingleNode bool `gcfg:"single-node"` + // DisableForwarding (enabled by default) controls if forwarding is allowed on OVNK controlled interfaces + DisableForwarding bool `gcfg:"disable-forwarding"` + // AllowNoUplink (disabled by default) controls if the external gateway bridge without an uplink port is allowed in local gateway mode. + AllowNoUplink bool `gcfg:"allow-no-uplink"` +} + +// OvnAuthConfig holds client authentication and location details for +// an OVN database (either northbound or southbound) +type OvnAuthConfig struct { + // e.g: "ssl:192.168.1.2:6641,ssl:192.168.1.2:6642" + Address string `gcfg:"address"` + PrivKey string `gcfg:"client-privkey"` + Cert string `gcfg:"client-cert"` + CACert string `gcfg:"client-cacert"` + CertCommonName string `gcfg:"cert-common-name"` + Scheme OvnDBScheme + ElectionTimer uint `gcfg:"election-timer"` + northbound bool + + exec kexec.Interface +} + +// HAConfig holds configuration for HA +// configuration. +type HAConfig struct { + ElectionLeaseDuration int `gcfg:"election-lease-duration"` + ElectionRenewDeadline int `gcfg:"election-renew-deadline"` + ElectionRetryPeriod int `gcfg:"election-retry-period"` +} + +// HybridOverlayConfig holds configuration for hybrid overlay +// configuration. +type HybridOverlayConfig struct { + // Enabled indicates whether hybrid overlay features are enabled or not. + Enabled bool `gcfg:"enabled"` + // RawClusterSubnets holds the unparsed hybrid overlay cluster subnets. + // Should only be used inside config module. + RawClusterSubnets string `gcfg:"cluster-subnets"` + // ClusterSubnets holds parsed hybrid overlay cluster subnet entries and + // may be used outside the config module. + ClusterSubnets []CIDRNetworkEntry + // VXLANPort holds the VXLAN tunnel UDP port number. + VXLANPort uint `gcfg:"hybrid-overlay-vxlan-port"` +} + +// OvnKubeNodeConfig holds ovnkube-node configurations +type OvnKubeNodeConfig struct { + Mode string `gcfg:"mode"` + DPResourceDeviceIdsMap map[string][]string + MgmtPortNetdev string `gcfg:"mgmt-port-netdev"` + MgmtPortDPResourceName string `gcfg:"mgmt-port-dp-resource-name"` +} + +// ClusterManagerConfig holds configuration for ovnkube-cluster-manager +type ClusterManagerConfig struct { + // V4TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones + V4TransitSwitchSubnet string `gcfg:"v4-transit-switch-subnet"` + // V6TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones + V6TransitSwitchSubnet string `gcfg:"v6-transit-switch-subnet"` +} + +// OvnDBScheme describes the OVN database connection transport method +type OvnDBScheme string + +const ( + // OvnDBSchemeSSL specifies SSL as the OVN database transport method + OvnDBSchemeSSL OvnDBScheme = "ssl" + // OvnDBSchemeTCP specifies TCP as the OVN database transport method + OvnDBSchemeTCP OvnDBScheme = "tcp" + // OvnDBSchemeUnix specifies Unix domains sockets as the OVN database transport method + OvnDBSchemeUnix OvnDBScheme = "unix" +) + +// Config is used to read the structured config file and to cache config in testcases +type config struct { + Default DefaultConfig + Logging LoggingConfig + Monitoring MonitoringConfig + IPFIX IPFIXConfig + CNI CNIConfig + OVNKubernetesFeature OVNKubernetesFeatureConfig + Kubernetes KubernetesConfig + Metrics MetricsConfig + OvnNorth OvnAuthConfig + OvnSouth OvnAuthConfig + Gateway GatewayConfig + MasterHA HAConfig + ClusterMgrHA HAConfig + HybridOverlay HybridOverlayConfig + OvnKubeNode OvnKubeNodeConfig + ClusterManager ClusterManagerConfig +} + +var ( + savedDefault DefaultConfig + savedLogging LoggingConfig + savedMonitoring MonitoringConfig + savedIPFIX IPFIXConfig + savedCNI CNIConfig + savedOVNKubernetesFeature OVNKubernetesFeatureConfig + savedKubernetes KubernetesConfig + savedMetrics MetricsConfig + savedOvnNorth OvnAuthConfig + savedOvnSouth OvnAuthConfig + savedGateway GatewayConfig + savedMasterHA HAConfig + savedClusterMgrHA HAConfig + savedHybridOverlay HybridOverlayConfig + savedOvnKubeNode OvnKubeNodeConfig + savedClusterManager ClusterManagerConfig + + // legacy service-cluster-ip-range CLI option + serviceClusterIPRange string + // legacy cluster-subnet CLI option + clusterSubnet string + // legacy init-gateways CLI option + initGateways bool + // legacy gateway-local CLI option + gatewayLocal bool + // legacy disable-ovn-iface-id-ver CLI option + disableOVNIfaceIDVer bool +) + +func init() { + // Cache original default config values + savedDefault = Default + savedLogging = Logging + savedMonitoring = Monitoring + savedIPFIX = IPFIX + savedCNI = CNI + savedOVNKubernetesFeature = OVNKubernetesFeature + savedKubernetes = Kubernetes + savedMetrics = Metrics + savedOvnNorth = OvnNorth + savedOvnSouth = OvnSouth + savedGateway = Gateway + savedMasterHA = MasterHA + savedClusterMgrHA = ClusterMgrHA + savedHybridOverlay = HybridOverlay + savedOvnKubeNode = OvnKubeNode + savedClusterManager = ClusterManager + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("Version: %s\n", Version) + fmt.Printf("Git commit: %s\n", Commit) + fmt.Printf("Git branch: %s\n", Branch) + fmt.Printf("Go version: %s\n", GoVersion) + fmt.Printf("Build date: %s\n", BuildDate) + fmt.Printf("OS/Arch: %s\n", OSArch) + } + Flags = GetFlags([]cli.Flag{}) +} + +// PrepareTestConfig restores default config values. Used by testcases to +// provide a pristine environment between tests. +func PrepareTestConfig() error { + Default = savedDefault + Logging = savedLogging + Logging.Level = 5 + Monitoring = savedMonitoring + IPFIX = savedIPFIX + CNI = savedCNI + OVNKubernetesFeature = savedOVNKubernetesFeature + Kubernetes = savedKubernetes + Metrics = savedMetrics + OvnNorth = savedOvnNorth + OvnSouth = savedOvnSouth + Gateway = savedGateway + MasterHA = savedMasterHA + HybridOverlay = savedHybridOverlay + OvnKubeNode = savedOvnKubeNode + ClusterManager = savedClusterManager + EnableMulticast = false + Default.OVSDBTxnTimeout = 5 * time.Second + + if err := completeConfig(); err != nil { + return err + } + + // Don't pick up defaults from the environment + os.Unsetenv("KUBECONFIG") + os.Unsetenv("K8S_CACERT") + os.Unsetenv("K8S_APISERVER") + os.Unsetenv("K8S_TOKEN") + os.Unsetenv("K8S_TOKEN_FILE") + + return nil +} + +// copy members of struct 'src' into the corresponding field in struct 'dst' +// if the field in 'src' is a non-zero int or a non-zero-length string and +// does not contain a default value. This function should be called with pointers to structs. +func overrideFields(dst, src, defaults interface{}) error { + dstStruct := reflect.ValueOf(dst).Elem() + srcStruct := reflect.ValueOf(src).Elem() + if dstStruct.Kind() != srcStruct.Kind() || dstStruct.Kind() != reflect.Struct { + return fmt.Errorf("mismatched value types") + } + if dstStruct.NumField() != srcStruct.NumField() { + return fmt.Errorf("mismatched struct types") + } + + var defStruct reflect.Value + if defaults != nil { + defStruct = reflect.ValueOf(defaults).Elem() + } + // Iterate over each field in dst/src Type so we can get the tags, + // and use the field name to retrieve the field's actual value from + // the dst/src instance + var handled bool + dstType := reflect.TypeOf(dst).Elem() + for i := 0; i < dstType.NumField(); i++ { + structField := dstType.Field(i) + // Ignore private internal fields; we only care about overriding + // 'gcfg' tagged fields read from CLI or the config file + if _, ok := structField.Tag.Lookup("gcfg"); !ok { + continue + } + handled = true + + dstField := dstStruct.FieldByName(structField.Name) + srcField := srcStruct.FieldByName(structField.Name) + var dv reflect.Value + if defStruct.IsValid() { + dv = defStruct.FieldByName(structField.Name) + } + if !dstField.IsValid() || !srcField.IsValid() { + return fmt.Errorf("invalid struct %q field %q", dstType.Name(), structField.Name) + } + if dstField.Kind() != srcField.Kind() { + return fmt.Errorf("mismatched struct %q fields %q", dstType.Name(), structField.Name) + } + if dv.IsValid() && reflect.DeepEqual(dv.Interface(), srcField.Interface()) { + continue + } + dstField.Set(srcField) + } + if !handled { + // No tags found in the struct so we don't know how to override + return fmt.Errorf("failed to find 'gcfg' tags in struct %q", dstType.Name()) + } + + return nil +} + +var cliConfig config + +// CommonFlags capture general options. +var CommonFlags = []cli.Flag{ + // Mode flags + &cli.StringFlag{ + Name: "init-master", + Usage: "initialize master (both cluster-manager and ovnkube-controller), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-cluster-manager", + Usage: "initialize cluster manager (but not ovnkube-controller), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-ovnkube-controller", + Usage: "initialize ovnkube-controller (but not cluster-manager), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-node", + Usage: "initialize node, requires the name that node is registered with in kubernetes cluster", + }, + &cli.StringFlag{ + Name: "cleanup-node", + Usage: "cleanup node, requires the name that node is registered with in kubernetes cluster", + }, + &cli.StringFlag{ + Name: "pidfile", + Usage: "Name of file that will hold the ovnkube pid (optional)", + }, + &cli.StringFlag{ + Name: "config-file", + Usage: "configuration file path (default: /etc/openvswitch/ovn_k8s.conf)", + //Value: "/etc/openvswitch/ovn_k8s.conf", + }, + &cli.IntFlag{ + Name: "mtu", + Usage: "MTU value used for the overlay networks (default: 1400)", + Destination: &cliConfig.Default.MTU, + Value: Default.MTU, + }, + &cli.IntFlag{ + Name: "routable-mtu", + Usage: "Maximum routable MTU between nodes, used to facilitate an MTU migration procedure where different nodes might be using different MTU values", + Destination: &cliConfig.Default.RoutableMTU, + }, + &cli.IntFlag{ + Name: "conntrack-zone", + Usage: "For gateway nodes, the conntrack zone used for conntrack flow rules (default: 64000)", + Destination: &cliConfig.Default.ConntrackZone, + Value: Default.ConntrackZone, + }, + &cli.StringFlag{ + Name: "encap-type", + Usage: "The encapsulation protocol to use to transmit packets between hypervisors (default: geneve)", + Destination: &cliConfig.Default.EncapType, + Value: Default.EncapType, + }, + &cli.StringFlag{ + Name: "encap-ip", + Usage: "The IP address of the encapsulation endpoint (default: Node IP address resolved from Node hostname)", + Destination: &cliConfig.Default.EncapIP, + }, + &cli.UintFlag{ + Name: "encap-port", + Usage: "The UDP port used by the encapsulation endpoint (default: 6081)", + Destination: &cliConfig.Default.EncapPort, + Value: Default.EncapPort, + }, + &cli.IntFlag{ + Name: "inactivity-probe", + Usage: "Maximum number of milliseconds of idle time on " + + "connection for ovn-controller before it sends a inactivity probe", + Destination: &cliConfig.Default.InactivityProbe, + Value: Default.InactivityProbe, + }, + &cli.IntFlag{ + Name: "openflow-probe", + Usage: "Maximum number of seconds of idle time on the openflow " + + "connection for ovn-controller before it sends a inactivity probe", + Destination: &cliConfig.Default.OpenFlowProbe, + Value: Default.OpenFlowProbe, + }, + &cli.IntFlag{ + Name: "ofctrl-wait-before-clear", + Usage: "Maximum number of milliseconds that ovn-controller waits before " + + "clearing existing flows during start up, to make sure the initial flow " + + "compute is complete and avoid data plane interruptions.", + Destination: &cliConfig.Default.OfctrlWaitBeforeClear, + Value: Default.OfctrlWaitBeforeClear, + }, + &cli.BoolFlag{ + Name: "monitor-all", + Usage: "Enable monitoring all data from SB DB instead of conditionally " + + "monitoring the data relevant to this node only. " + + "By default it is enabled.", + Destination: &cliConfig.Default.MonitorAll, + Value: Default.MonitorAll, + }, + &cli.DurationFlag{ + Name: "db-txn-timeout", + Usage: "OVSDBTxnTimeout is the timeout for db transaction in seconds, " + + "may be useful to increase for high-scale clusters. default value is 60 seconds.", + Destination: &cliConfig.Default.OVSDBTxnTimeout, + Value: Default.OVSDBTxnTimeout, + }, + &cli.BoolFlag{ + Name: "enable-lflow-cache", + Usage: "Enable the logical flow in-memory cache it uses " + + "when processing Southbound database logical flow changes. " + + "By default caching is enabled.", + Destination: &cliConfig.Default.LFlowCacheEnable, + Value: Default.LFlowCacheEnable, + }, + &cli.UintFlag{ + Name: "lflow-cache-limit", + Usage: "Maximum number of logical flow cache entries ovn-controller " + + "may create when the logical flow cache is enabled. By " + + "default the size of the cache is unlimited.", + Destination: &cliConfig.Default.LFlowCacheLimit, + Value: Default.LFlowCacheLimit, + }, + &cli.UintFlag{ + Name: "lflow-cache-limit-kb", + Usage: "Maximum size of the logical flow cache ovn-controller " + + "may create when the logical flow cache is enabled. By " + + "default the size of the cache is unlimited.", + Destination: &cliConfig.Default.LFlowCacheLimitKb, + Value: Default.LFlowCacheLimitKb, + }, + &cli.StringFlag{ + Name: "cluster-subnet", + Usage: "Deprecated alias for cluster-subnets.", + Destination: &clusterSubnet, + }, + &cli.StringFlag{ + Name: "cluster-subnets", + Value: Default.RawClusterSubnets, + Usage: "A comma separated set of IP subnets and the associated " + + "hostsubnet prefix lengths to use for the cluster (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " + + "Each entry is given in the form [IP address/prefix-length/hostsubnet-prefix-length] " + + "and cannot overlap with other entries. The hostsubnet-prefix-length " + + "defines how large a subnet is given to each node and may be different " + + "for each entry. For IPv6 subnets, it must be 64 (and does not need to " + + "be explicitly specified). For IPv4 subnets an explicit " + + "hostsubnet-prefix should be specified, but for backward compatibility " + + "it defaults to 24 if unspecified.", + Destination: &cliConfig.Default.RawClusterSubnets, + }, + &cli.BoolFlag{ + Name: "unprivileged-mode", + Usage: "Run ovnkube-node container in unprivileged mode. Valid only with --init-node option.", + Destination: &UnprivilegedMode, + }, + &cli.BoolFlag{ + Name: "enable-multicast", + Usage: "Adds multicast support. Valid only with --init-master option.", + Destination: &EnableMulticast, + }, + // Logging options + &cli.IntFlag{ + Name: "loglevel", + Usage: "log verbosity and level: info, warn, fatal, error are always printed no matter the log level. Use 5 for debug (default: 4)", + Destination: &cliConfig.Logging.Level, + Value: Logging.Level, + }, + &cli.StringFlag{ + Name: "logfile", + Usage: "path of a file to direct log output to", + Destination: &cliConfig.Logging.File, + }, + &cli.StringFlag{ + Name: "cnilogfile", + Usage: "path of a file to direct log from cni shim to output to (default: /var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log)", + Destination: &cliConfig.Logging.CNIFile, + Value: "/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log", + }, + &cli.StringFlag{ + Name: "libovsdblogfile", + Usage: "path of a file to direct log from libovsdb client to output to (default is to use same as --logfile)", + Destination: &cliConfig.Logging.LibovsdbFile, + }, + // Logfile rotation parameters + &cli.IntFlag{ + Name: "logfile-maxsize", + Usage: "Maximum size in bytes of the log file before it gets rolled", + Destination: &cliConfig.Logging.LogFileMaxSize, + Value: Logging.LogFileMaxSize, + }, + &cli.IntFlag{ + Name: "logfile-maxbackups", + Usage: "Maximum number of old log files to retain", + Destination: &cliConfig.Logging.LogFileMaxBackups, + Value: Logging.LogFileMaxBackups, + }, + &cli.IntFlag{ + Name: "logfile-maxage", + Usage: "Maximum number of days to retain old log files", + Destination: &cliConfig.Logging.LogFileMaxAge, + Value: Logging.LogFileMaxAge, + }, + &cli.IntFlag{ + Name: "acl-logging-rate-limit", + Usage: "The largest number of messages per second that gets logged before drop (default 20)", + Destination: &cliConfig.Logging.ACLLoggingRateLimit, + Value: 20, + }, + &cli.StringFlag{ + Name: "zone", + Usage: "zone name to which ovnkube-node/ovnkube-controller belongs to", + Value: Default.Zone, + Destination: &cliConfig.Default.Zone, + }, +} + +// MonitoringFlags capture monitoring-related options +var MonitoringFlags = []cli.Flag{ + // Monitoring options + &cli.StringFlag{ + Name: "netflow-targets", + Value: Monitoring.RawNetFlowTargets, + Usage: "A comma separated set of NetFlow collectors to export flow data (eg, \"10.128.0.150:2056,10.0.0.151:2056\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawNetFlowTargets, + }, + &cli.StringFlag{ + Name: "sflow-targets", + Value: Monitoring.RawSFlowTargets, + Usage: "A comma separated set of SFlow collectors to export flow data (eg, \"10.128.0.150:6343,10.0.0.151:6343\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawSFlowTargets, + }, + &cli.StringFlag{ + Name: "ipfix-targets", + Value: Monitoring.RawIPFIXTargets, + Usage: "A comma separated set of IPFIX collectors to export flow data (eg, \"10.128.0.150:2055,10.0.0.151:2055\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawIPFIXTargets, + }, +} + +// IPFIXFlags capture IPFIX-related options +var IPFIXFlags = []cli.Flag{ + &cli.UintFlag{ + Name: "ipfix-sampling", + Usage: "Rate at which packets should be sampled and sent to each target collector (default: 400)", + Destination: &cliConfig.IPFIX.Sampling, + Value: IPFIX.Sampling, + }, + &cli.UintFlag{ + Name: "ipfix-cache-max-flows", + Usage: "Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled (default: 0)", + Destination: &cliConfig.IPFIX.CacheMaxFlows, + Value: IPFIX.CacheMaxFlows, + }, &cli.UintFlag{ + Name: "ipfix-cache-active-timeout", + Usage: "Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled (default: 60)", + Destination: &cliConfig.IPFIX.CacheActiveTimeout, + Value: IPFIX.CacheActiveTimeout, + }, +} + +// CNIFlags capture CNI-related options +var CNIFlags = []cli.Flag{ + // CNI options + &cli.StringFlag{ + Name: "cni-conf-dir", + Usage: "the CNI config directory in which to write the overlay CNI config file (default: /etc/cni/net.d)", + Destination: &cliConfig.CNI.ConfDir, + Value: CNI.ConfDir, + }, + &cli.StringFlag{ + Name: "cni-plugin", + Usage: "the name of the CNI plugin (default: ovn-k8s-cni-overlay)", + Destination: &cliConfig.CNI.Plugin, + Value: CNI.Plugin, + }, +} + +// OVNK8sFeatureFlags capture OVN-Kubernetes feature related options +var OVNK8sFeatureFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: "enable-admin-network-policy", + Usage: "Configure to use Admin Network Policy CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableAdminNetworkPolicy, + Value: OVNKubernetesFeature.EnableAdminNetworkPolicy, + }, + &cli.BoolFlag{ + Name: "enable-egress-ip", + Usage: "Configure to use EgressIP CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressIP, + Value: OVNKubernetesFeature.EnableEgressIP, + }, + &cli.IntFlag{ + Name: "egressip-reachability-total-timeout", + Usage: "EgressIP node reachability total timeout in seconds (default: 1)", + Destination: &cliConfig.OVNKubernetesFeature.EgressIPReachabiltyTotalTimeout, + Value: 1, + }, + &cli.BoolFlag{ + Name: "enable-egress-firewall", + Usage: "Configure to use EgressFirewall CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressFirewall, + Value: OVNKubernetesFeature.EnableEgressFirewall, + }, + &cli.BoolFlag{ + Name: "enable-egress-qos", + Usage: "Configure to use EgressQoS CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressQoS, + Value: OVNKubernetesFeature.EnableEgressQoS, + }, + &cli.IntFlag{ + Name: "egressip-node-healthcheck-port", + Usage: "Configure EgressIP node reachability using gRPC on this TCP port.", + Destination: &cliConfig.OVNKubernetesFeature.EgressIPNodeHealthCheckPort, + }, + &cli.BoolFlag{ + Name: "enable-multi-network", + Usage: "Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetwork, + Value: OVNKubernetesFeature.EnableMultiNetwork, + }, + &cli.BoolFlag{ + Name: "enable-multi-networkpolicy", + Usage: "Configure to use MultiNetworkPolicy CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy, + Value: OVNKubernetesFeature.EnableMultiNetworkPolicy, + }, + &cli.BoolFlag{ + Name: "enable-network-segmentation", + Usage: "Configure to use network segmentation feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation, + Value: OVNKubernetesFeature.EnableNetworkSegmentation, + }, + &cli.BoolFlag{ + Name: "enable-stateless-netpol", + Usage: "Configure to use stateless network policy feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableStatelessNetPol, + Value: OVNKubernetesFeature.EnableStatelessNetPol, + }, + &cli.BoolFlag{ + Name: "enable-interconnect", + Usage: "Configure to enable interconnecting multiple zones.", + Destination: &cliConfig.OVNKubernetesFeature.EnableInterconnect, + Value: OVNKubernetesFeature.EnableInterconnect, + }, + &cli.BoolFlag{ + Name: "enable-egress-service", + Usage: "Configure to use EgressService CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressService, + Value: OVNKubernetesFeature.EnableEgressService, + }, + &cli.BoolFlag{ + Name: "enable-multi-external-gateway", + Usage: "Configure to use AdminPolicyBasedExternalRoute CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiExternalGateway, + Value: OVNKubernetesFeature.EnableMultiExternalGateway, + }, + &cli.BoolFlag{ + Name: "enable-persistent-ips", + Usage: "Configure to use the persistent ips feature for virtualization with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnablePersistentIPs, + Value: OVNKubernetesFeature.EnablePersistentIPs, + }, + &cli.BoolFlag{ + Name: "enable-dns-name-resolver", + Usage: "Configure to use DNSNameResolver CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableDNSNameResolver, + Value: OVNKubernetesFeature.EnableDNSNameResolver, + }, + &cli.BoolFlag{ + Name: "enable-svc-template-support", + Usage: "Configure to use svc-template with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableServiceTemplateSupport, + Value: OVNKubernetesFeature.EnableServiceTemplateSupport, + }, + &cli.BoolFlag{ + Name: "enable-observability", + Usage: "Configure to use OVN sampling with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableObservability, + Value: OVNKubernetesFeature.EnableObservability, + }, +} + +// K8sFlags capture Kubernetes-related options +var K8sFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "service-cluster-ip-range", + Usage: "Deprecated alias for k8s-service-cidrs.", + Destination: &serviceClusterIPRange, + }, + &cli.StringFlag{ + Name: "k8s-service-cidr", + Usage: "Deprecated alias for k8s-service-cidrs.", + Destination: &cliConfig.Kubernetes.CompatServiceCIDR, + }, + &cli.StringFlag{ + Name: "k8s-service-cidrs", + Usage: "A comma-separated set of CIDR notation IP ranges from which k8s assigns " + + "service cluster IPs. This should be the same as the value " + + "provided for kube-apiserver \"--service-cluster-ip-range\" " + + "option. (default: 172.16.1.0/24)", + Destination: &cliConfig.Kubernetes.RawServiceCIDRs, + Value: Kubernetes.RawServiceCIDRs, + }, + &cli.StringFlag{ + Name: "k8s-kubeconfig", + Usage: "absolute path to the Kubernetes kubeconfig file (not required if the --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)", + Destination: &cliConfig.Kubernetes.Kubeconfig, + }, + &cli.StringFlag{ + Name: "bootstrap-kubeconfig", + Usage: "absolute path to the Kubernetes kubeconfig file that is used to create the initial, per node, client certificates (should only be used together with 'cert-dir')", + Destination: &cliConfig.Kubernetes.BootstrapKubeconfig, + }, + &cli.StringFlag{ + Name: "k8s-apiserver", + Usage: "URL of the Kubernetes API server (not required if --k8s-kubeconfig is given) (default: http://localhost:8443)", + Destination: &cliConfig.Kubernetes.APIServer, + Value: Kubernetes.APIServer, + }, + &cli.StringFlag{ + Name: "cert-dir", + Usage: "absolute path to the directory of the client key and certificate (not required if --k8s-kubeconfig or --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)", + Destination: &cliConfig.Kubernetes.CertDir, + }, + &cli.DurationFlag{ + Name: "cert-duration", + Usage: "requested certificate duration, default: 10min", + Destination: &cliConfig.Kubernetes.CertDuration, + Value: Kubernetes.CertDuration, + }, + &cli.StringFlag{ + Name: "k8s-cacert", + Usage: "the absolute path to the Kubernetes API CA certificate (not required if --k8s-kubeconfig is given)", + Destination: &cliConfig.Kubernetes.CACert, + }, + &cli.StringFlag{ + Name: "k8s-token", + Usage: "the Kubernetes API authentication token (not required if --k8s-kubeconfig is given)", + Destination: &cliConfig.Kubernetes.Token, + }, + &cli.StringFlag{ + Name: "k8s-token-file", + Usage: "the path to Kubernetes API token. If set, it is periodically read and takes precedence over k8s-token", + Destination: &cliConfig.Kubernetes.TokenFile, + }, + &cli.StringFlag{ + Name: "ovn-config-namespace", + Usage: "specify a namespace which will contain services to config the OVN databases", + Destination: &cliConfig.Kubernetes.OVNConfigNamespace, + Value: Kubernetes.OVNConfigNamespace, + }, + &cli.BoolFlag{ + Name: "ovn-empty-lb-events", + Usage: "If set, then load balancers do not get deleted when all backends are removed. " + + "Instead, ovn-kubernetes monitors the OVN southbound database for empty lb backends " + + "controller events. If one arrives, then a NeedPods event is sent so that Kubernetes " + + "will spin up pods for the load balancer to send traffic to.", + Destination: &cliConfig.Kubernetes.OVNEmptyLbEvents, + }, + &cli.StringFlag{ + Name: "pod-ip", + Usage: "UNUSED", + }, + &cli.StringFlag{ + Name: "no-hostsubnet-nodes", + Usage: "Specify a label for nodes that will manage their own hostsubnets", + Destination: &cliConfig.Kubernetes.RawNoHostSubnetNodes, + }, + &cli.StringFlag{ + Name: "host-network-namespace", + Usage: "specify a namespace which will be used to classify host network traffic for network policy", + Destination: &cliConfig.Kubernetes.HostNetworkNamespace, + Value: Kubernetes.HostNetworkNamespace, + }, + &cli.StringFlag{ + Name: "platform-type", + Usage: "The cloud provider platform type ovn-kubernetes is deployed on. " + + "Valid values can be found in: https://github.com/ovn-org/ovn-kubernetes/blob/master/go-controller/vendor/github.com/openshift/api/config/v1/types_infrastructure.go#L130-L172", + Destination: &cliConfig.Kubernetes.PlatformType, + Value: Kubernetes.PlatformType, + }, + &cli.StringFlag{ + Name: "healthz-bind-address", + Usage: "The IP address and port for the node proxy healthz server to serve on (set to '0.0.0.0:10256' or '[::]:10256' for listening in all interfaces and IP families). Disabled by default.", + Destination: &cliConfig.Kubernetes.HealthzBindAddress, + }, + &cli.StringFlag{ + Name: "dns-service-namespace", + Usage: "DNS kubernetes service namespace used to expose name resolving to live migratable vms.", + Destination: &cliConfig.Kubernetes.DNSServiceNamespace, + Value: Kubernetes.DNSServiceNamespace, + }, + &cli.StringFlag{ + Name: "dns-service-name", + Usage: "DNS kubernetes service name used to expose name resolving to live migratable vms.", + Destination: &cliConfig.Kubernetes.DNSServiceName, + Value: Kubernetes.DNSServiceName, + }, +} + +// MetricsFlags capture metrics-related options +var MetricsFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "metrics-bind-address", + Usage: "The IP address and port for the OVN K8s metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)", + Destination: &cliConfig.Metrics.BindAddress, + }, + &cli.StringFlag{ + Name: "ovn-metrics-bind-address", + Usage: "The IP address and port for the OVN metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)", + Destination: &cliConfig.Metrics.OVNMetricsBindAddress, + }, + &cli.BoolFlag{ + Name: "export-ovs-metrics", + Usage: "When true exports OVS metrics from the OVN metrics server", + Destination: &cliConfig.Metrics.ExportOVSMetrics, + }, + &cli.BoolFlag{ + Name: "metrics-enable-pprof", + Usage: "If true, then also accept pprof requests on the metrics port.", + Destination: &cliConfig.Metrics.EnablePprof, + Value: Metrics.EnablePprof, + }, + &cli.StringFlag{ + Name: "node-server-privkey", + Usage: "Private key that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerPrivKey, + }, + &cli.StringFlag{ + Name: "node-server-cert", + Usage: "Certificate that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerCert, + }, + &cli.BoolFlag{ + Name: "metrics-enable-config-duration", + Usage: "Enables monitoring OVN-Kubernetes master and OVN configuration duration", + Destination: &cliConfig.Metrics.EnableConfigDuration, + }, + &cli.BoolFlag{ + Name: "metrics-enable-scale", + Usage: "Enables metrics related to scaling", + Destination: &cliConfig.Metrics.EnableScaleMetrics, + }, +} + +// OvnNBFlags capture OVN northbound database options +var OvnNBFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "nb-address", + Usage: "IP address and port of the OVN northbound API " + + "(eg, ssl:1.2.3.4:6641,ssl:1.2.3.5:6642). Leave empty to " + + "use a local unix socket.", + Destination: &cliConfig.OvnNorth.Address, + }, + &cli.StringFlag{ + Name: "nb-client-privkey", + Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-privkey.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.PrivKey, + }, + &cli.StringFlag{ + Name: "nb-client-cert", + Usage: "Client certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-cert.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.Cert, + }, + &cli.StringFlag{ + Name: "nb-client-cacert", + Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-ca.cert)." + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.CACert, + }, + &cli.StringFlag{ + Name: "nb-cert-common-name", + Usage: "Common Name of the certificate used for TLS server certificate verification. " + + "In cases where the certificate doesn't have any SAN Extensions, this parameter " + + "should match the DNS(hostname) of the server. In case the certificate has a " + + "SAN extension, this parameter should match one of the SAN fields.", + Destination: &cliConfig.OvnNorth.CertCommonName, + }, + &cli.UintFlag{ + Name: "nb-raft-election-timer", + Usage: "The desired northbound database election timer.", + Destination: &cliConfig.OvnNorth.ElectionTimer, + }, +} + +// OvnSBFlags capture OVN southbound database options +var OvnSBFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "sb-address", + Usage: "IP address and port of the OVN southbound API " + + "(eg, ssl:1.2.3.4:6642,ssl:1.2.3.5:6642). " + + "Leave empty to use a local unix socket.", + Destination: &cliConfig.OvnSouth.Address, + }, + &cli.StringFlag{ + Name: "sb-client-privkey", + Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnsb-privkey.pem)." + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.PrivKey, + }, + &cli.StringFlag{ + Name: "sb-client-cert", + Usage: "Client certificate that the client should use for talking to the OVN database(default when ssl address is used: /etc/openvswitch/ovnsb-cert.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.Cert, + }, + &cli.StringFlag{ + Name: "sb-client-cacert", + Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used /etc/openvswitch/ovnsb-ca.cert). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.CACert, + }, + &cli.StringFlag{ + Name: "sb-cert-common-name", + Usage: "Common Name of the certificate used for TLS server certificate verification. " + + "In cases where the certificate doesn't have any SAN Extensions, this parameter " + + "should match the DNS(hostname) of the server. In case the certificate has a " + + "SAN extension, this parameter should match one of the SAN fields.", + Destination: &cliConfig.OvnSouth.CertCommonName, + }, + &cli.UintFlag{ + Name: "sb-raft-election-timer", + Usage: "The desired southbound database election timer.", + Destination: &cliConfig.OvnSouth.ElectionTimer, + }, +} + +// OVNGatewayFlags capture L3 Gateway related flags +var OVNGatewayFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "gateway-mode", + Usage: "Sets the cluster gateway mode. One of \"shared\", " + + "or \"local\". If not given, gateway functionality is disabled.", + }, + &cli.StringFlag{ + Name: "gateway-interface", + Usage: "The interface on nodes that will be the gateway interface. " + + "If none specified, then the node's interface on which the " + + "default gateway is configured will be used as the gateway " + + "interface. Only useful with \"init-gateways\"", + Destination: &cliConfig.Gateway.Interface, + }, + &cli.StringFlag{ + Name: "exgw-interface", + Usage: "The interface on nodes that will be used for external gw network traffic. " + + "If none specified, ovnk will use the default interface", + Destination: &cliConfig.Gateway.EgressGWInterface, + }, + &cli.StringFlag{ + Name: "gateway-nexthop", + Usage: "The external default gateway which is used as a next hop by " + + "OVN gateway. This is many times just the default gateway " + + "of the node in question. If not specified, the default gateway" + + "configured in the node is used. Only useful with " + + "\"init-gateways\"", + Destination: &cliConfig.Gateway.NextHop, + }, + &cli.UintFlag{ + Name: "gateway-vlanid", + Usage: "The VLAN on which the external network is available. " + + "Valid only for Shared Gateway interface mode.", + Destination: &cliConfig.Gateway.VLANID, + }, + &cli.BoolFlag{ + Name: "nodeport", + Usage: "Setup nodeport based ingress on gateways.", + Destination: &cliConfig.Gateway.NodeportEnable, + }, + &cli.BoolFlag{ + Name: "disable-snat-multiple-gws", + Usage: "Disable SNAT for egress traffic with multiple gateways.", + Destination: &cliConfig.Gateway.DisableSNATMultipleGWs, + }, + &cli.BoolFlag{ + Name: "disable-forwarding", + Usage: "Disable forwarding on OVNK controlled interfaces.", + Destination: &cliConfig.Gateway.DisableForwarding, + }, + &cli.StringFlag{ + Name: "gateway-v4-join-subnet", + Usage: "The v4 join subnet used for assigning join switch IPv4 addresses", + Destination: &cliConfig.Gateway.V4JoinSubnet, + Value: Gateway.V4JoinSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v6-join-subnet", + Usage: "The v6 join subnet used for assigning join switch IPv6 addresses", + Destination: &cliConfig.Gateway.V6JoinSubnet, + Value: Gateway.V6JoinSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v4-masquerade-subnet", + Usage: "The v4 masquerade subnet used for assigning masquerade IPv4 addresses", + Destination: &cliConfig.Gateway.V4MasqueradeSubnet, + Value: Gateway.V4MasqueradeSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v6-masquerade-subnet", + Usage: "The v6 masquerade subnet used for assigning masquerade IPv6 addresses", + Destination: &cliConfig.Gateway.V6MasqueradeSubnet, + Value: Gateway.V6MasqueradeSubnet, + }, + &cli.BoolFlag{ + Name: "disable-pkt-mtu-check", + Usage: "Disable OpenFlow checks for if packet size is greater than pod MTU", + Destination: &cliConfig.Gateway.DisablePacketMTUCheck, + }, + &cli.StringFlag{ + Name: "gateway-router-subnet", + Usage: "The Subnet to be used for the gateway router external port (shared mode only). " + + "auto-detected if not given. Must match the the kube node IP address. " + + "Currently valid for DPUs only", + Destination: &cliConfig.Gateway.RouterSubnet, + Value: Gateway.RouterSubnet, + }, + &cli.BoolFlag{ + Name: "single-node", + Usage: "Enable single node optimizations. " + + "Single node indicates a one node cluster and allows to simplify ovn-kubernetes gateway logic", + Destination: &cliConfig.Gateway.SingleNode, + }, + &cli.BoolFlag{ + Name: "allow-no-uplink", + Usage: "Allow the external gateway bridge without an uplink port in local gateway mode", + Destination: &cliConfig.Gateway.AllowNoUplink, + }, + // Deprecated CLI options + &cli.BoolFlag{ + Name: "init-gateways", + Usage: "DEPRECATED; use --gateway-mode instead", + Destination: &initGateways, + }, + &cli.BoolFlag{ + Name: "gateway-local", + Usage: "DEPRECATED; use --gateway-mode instead", + Destination: &gatewayLocal, + }, +} + +// MasterHAFlags capture leader election flags for master +var MasterHAFlags = []cli.Flag{ + &cli.IntFlag{ + Name: "ha-election-lease-duration", + Usage: "Leader election lease duration (in secs) (default: 60)", + Destination: &cliConfig.MasterHA.ElectionLeaseDuration, + Value: MasterHA.ElectionLeaseDuration, + }, + &cli.IntFlag{ + Name: "ha-election-renew-deadline", + Usage: "Leader election renew deadline (in secs) (default: 30)", + Destination: &cliConfig.MasterHA.ElectionRenewDeadline, + Value: MasterHA.ElectionRenewDeadline, + }, + &cli.IntFlag{ + Name: "ha-election-retry-period", + Usage: "Leader election retry period (in secs) (default: 20)", + Destination: &cliConfig.MasterHA.ElectionRetryPeriod, + Value: MasterHA.ElectionRetryPeriod, + }, +} + +// ClusterMgrHAFlags capture leader election flags for cluster manager +var ClusterMgrHAFlags = []cli.Flag{ + &cli.IntFlag{ + Name: "cluster-manager-ha-election-lease-duration", + Usage: "Leader election lease duration (in secs) (default: 60)", + Destination: &cliConfig.ClusterMgrHA.ElectionLeaseDuration, + Value: ClusterMgrHA.ElectionLeaseDuration, + }, + &cli.IntFlag{ + Name: "cluster-manager-ha-election-renew-deadline", + Usage: "Leader election renew deadline (in secs) (default: 30)", + Destination: &cliConfig.ClusterMgrHA.ElectionRenewDeadline, + Value: ClusterMgrHA.ElectionRenewDeadline, + }, + &cli.IntFlag{ + Name: "cluster-manager-ha-election-retry-period", + Usage: "Leader election retry period (in secs) (default: 20)", + Destination: &cliConfig.ClusterMgrHA.ElectionRetryPeriod, + Value: ClusterMgrHA.ElectionRetryPeriod, + }, +} + +// HybridOverlayFlags capture hybrid overlay feature options +var HybridOverlayFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: "enable-hybrid-overlay", + Usage: "Enables hybrid overlay functionality", + Destination: &cliConfig.HybridOverlay.Enabled, + }, + &cli.StringFlag{ + Name: "hybrid-overlay-cluster-subnets", + Value: HybridOverlay.RawClusterSubnets, + Usage: "A comma separated set of IP subnets and the associated" + + "hostsubnetlengths (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " + + "to use with the extended hybrid network. Each entry is given " + + "in the form IP address/subnet mask/hostsubnetlength, " + + "the hostsubnetlength is optional and if unspecified defaults to 24. The " + + "hostsubnetlength defines how many IP addresses are dedicated to each node.", + Destination: &cliConfig.HybridOverlay.RawClusterSubnets, + }, + &cli.UintFlag{ + Name: "hybrid-overlay-vxlan-port", + Value: HybridOverlay.VXLANPort, + Usage: "The UDP port used by the VXLAN protocol for hybrid networks.", + Destination: &cliConfig.HybridOverlay.VXLANPort, + }, +} + +// OvnKubeNodeFlags captures ovnkube-node specific configurations +var OvnKubeNodeFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "ovnkube-node-mode", + Usage: "ovnkube-node operating mode full(default), dpu, dpu-host", + Value: OvnKubeNode.Mode, + Destination: &cliConfig.OvnKubeNode.Mode, + }, + &cli.StringFlag{ + Name: "ovnkube-node-mgmt-port-netdev", + Usage: "When provided, use this netdev as management port. It will be renamed to ovn-k8s-mp0 " + + "and used to allow host network services and pods to access k8s pod and service networks. ", + Value: OvnKubeNode.MgmtPortNetdev, + Destination: &cliConfig.OvnKubeNode.MgmtPortNetdev, + }, + &cli.StringFlag{ + Name: "ovnkube-node-mgmt-port-dp-resource-name", + Usage: "When provided, use this device plugin resource name to find the allocated resource as management port. " + + "The interface chosen from this resource will be renamed to ovn-k8s-mp0 " + + "and used to allow host network services and pods to access k8s pod and service networks. ", + Value: OvnKubeNode.MgmtPortDPResourceName, + Destination: &cliConfig.OvnKubeNode.MgmtPortDPResourceName, + }, + &cli.BoolFlag{ + Name: "disable-ovn-iface-id-ver", + Usage: "Deprecated; iface-id-ver is always enabled", + Destination: &disableOVNIfaceIDVer, + }, +} + +// ClusterManagerFlags captures ovnkube-cluster-manager specific configurations +var ClusterManagerFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "cluster-manager-v4-transit-switch-subnet", + Usage: "The v4 transit switch subnet used for assigning transit switch IPv4 addresses for interconnect", + Destination: &cliConfig.ClusterManager.V4TransitSwitchSubnet, + Value: ClusterManager.V4TransitSwitchSubnet, + }, + &cli.StringFlag{ + Name: "cluster-manager-v6-transit-switch-subnet", + Usage: "The v6 transit switch subnet used for assigning transit switch IPv6 addresses for interconnect", + Destination: &cliConfig.ClusterManager.V6TransitSwitchSubnet, + Value: ClusterManager.V6TransitSwitchSubnet, + }, +} + +// Flags are general command-line flags. Apps should add these flags to their +// own urfave/cli flags and call InitConfig() early in the application. +var Flags []cli.Flag + +// GetFlags returns an array of all command-line flags necessary to configure +// ovn-kubernetes +func GetFlags(customFlags []cli.Flag) []cli.Flag { + flags := CommonFlags + flags = append(flags, CNIFlags...) + flags = append(flags, OVNK8sFeatureFlags...) + flags = append(flags, K8sFlags...) + flags = append(flags, MetricsFlags...) + flags = append(flags, OvnNBFlags...) + flags = append(flags, OvnSBFlags...) + flags = append(flags, OVNGatewayFlags...) + flags = append(flags, MasterHAFlags...) + flags = append(flags, ClusterMgrHAFlags...) + flags = append(flags, HybridOverlayFlags...) + flags = append(flags, MonitoringFlags...) + flags = append(flags, IPFIXFlags...) + flags = append(flags, OvnKubeNodeFlags...) + flags = append(flags, ClusterManagerFlags...) + flags = append(flags, customFlags...) + return flags +} + +// Defaults are a set of flags to indicate which options should be read from +// ovs-vsctl and used as default values if option is not found via the config +// file or command-line +type Defaults struct { + OvnNorthAddress bool + K8sAPIServer bool + K8sToken bool + K8sTokenFile bool + K8sCert bool +} + +const ( + ovsVsctlCommand = "ovs-vsctl" +) + +// Can't use pkg/ovs or pkg/util here because those package import this one +func rawExec(exec kexec.Interface, cmd string, args ...string) (string, error) { + cmdPath, err := exec.LookPath(cmd) + if err != nil { + return "", err + } + + klog.V(5).Infof("Exec: %s %s", cmdPath, strings.Join(args, " ")) + out, err := exec.Command(cmdPath, args...).CombinedOutput() + if err != nil { + klog.V(5).Infof("Exec: %s %s => %v", cmdPath, strings.Join(args, " "), err) + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +// Can't use pkg/ovs or pkg/util here because those package import this one +func runOVSVsctl(exec kexec.Interface, args ...string) (string, error) { + newArgs := append([]string{"--timeout=15"}, args...) + out, err := rawExec(exec, ovsVsctlCommand, newArgs...) + if err != nil { + return "", err + } + return strings.Trim(strings.TrimSpace(out), "\""), nil +} + +func getOVSExternalID(exec kexec.Interface, name string) string { + out, err := runOVSVsctl(exec, + "--if-exists", + "get", + "Open_vSwitch", + ".", + "external_ids:"+name) + if err != nil { + klog.V(5).Infof("Failed to get OVS external_id %s: %v\n\t%s", name, err, out) + return "" + } + return out +} + +func setOVSExternalID(exec kexec.Interface, key, value string) error { + out, err := runOVSVsctl(exec, + "set", + "Open_vSwitch", + ".", + fmt.Sprintf("external_ids:%s=%s", key, value)) + if err != nil { + return fmt.Errorf("error setting OVS external ID '%s=%s': %v\n %q", key, value, err, out) + } + return nil +} + +func buildKubernetesConfig(exec kexec.Interface, cli, file *config, saPath string, defaults *Defaults) error { + // token adn ca.crt may be from files mounted in container. + saConfig := savedKubernetes + if data, err := os.ReadFile(filepath.Join(saPath, kubeServiceAccountFileToken)); err == nil { + saConfig.Token = string(data) + saConfig.TokenFile = filepath.Join(saPath, kubeServiceAccountFileToken) + } + if _, err2 := os.Stat(filepath.Join(saPath, kubeServiceAccountFileCACert)); err2 == nil { + saConfig.CACert = filepath.Join(saPath, kubeServiceAccountFileCACert) + } + + if err := overrideFields(&Kubernetes, &saConfig, &savedKubernetes); err != nil { + return err + } + + // values for token, cacert, kubeconfig, api-server may be found in several places. + // Priority order (highest first): OVS config, command line options, config file, + // environment variables, service account files + + envConfig := savedKubernetes + envVarsMap := map[string]string{ + "Kubeconfig": "KUBECONFIG", + "BootstrapKubeconfig": "BOOTSTRAP_KUBECONFIG", + "CertDir": "CERT_DIR", + "CACert": "K8S_CACERT", + "APIServer": "K8S_APISERVER", + "Token": "K8S_TOKEN", + "TokenFile": "K8S_TOKEN_FILE", + "HostNetworkNamespace": "OVN_HOST_NETWORK_NAMESPACE", + } + for k, v := range envVarsMap { + if x, exists := os.LookupEnv(v); exists && len(x) > 0 { + reflect.ValueOf(&envConfig).Elem().FieldByName(k).SetString(x) + } + } + + if err := overrideFields(&Kubernetes, &envConfig, &savedKubernetes); err != nil { + return err + } + + // Copy config file values over default values + if err := overrideFields(&Kubernetes, &file.Kubernetes, &savedKubernetes); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&Kubernetes, &cli.Kubernetes, &savedKubernetes); err != nil { + return err + } + + // Grab default values from OVS external IDs + if defaults.K8sAPIServer { + Kubernetes.APIServer = getOVSExternalID(exec, "k8s-api-server") + } + if defaults.K8sToken { + Kubernetes.Token = getOVSExternalID(exec, "k8s-api-token") + } + if defaults.K8sTokenFile { + Kubernetes.TokenFile = getOVSExternalID(exec, "k8s-api-token-file") + } + + if defaults.K8sCert { + Kubernetes.CACert = getOVSExternalID(exec, "k8s-ca-certificate") + } + + if Kubernetes.Kubeconfig != "" && !pathExists(Kubernetes.Kubeconfig) { + return fmt.Errorf("kubernetes kubeconfig file %q not found", Kubernetes.Kubeconfig) + } + + if Kubernetes.CACert != "" { + bytes, err := os.ReadFile(Kubernetes.CACert) + if err != nil { + return err + } + Kubernetes.CAData = bytes + } + + url, err := url.Parse(Kubernetes.APIServer) + if err != nil { + return fmt.Errorf("kubernetes API server address %q invalid: %v", Kubernetes.APIServer, err) + } else if url.Scheme != "https" && url.Scheme != "http" { + return fmt.Errorf("kubernetes API server URL scheme %q invalid", url.Scheme) + } + + // Legacy --service-cluster-ip-range or --k8s-service-cidr options override config file or --k8s-service-cidrs. + if serviceClusterIPRange != "" { + Kubernetes.RawServiceCIDRs = serviceClusterIPRange + } else if Kubernetes.CompatServiceCIDR != "" { + Kubernetes.RawServiceCIDRs = Kubernetes.CompatServiceCIDR + } + if Kubernetes.RawServiceCIDRs == "" { + return fmt.Errorf("kubernetes service-cidrs is required") + } + + return nil +} + +// completeKubernetesConfig completes the Kubernetes config by parsing raw values +// into their final form. +func completeKubernetesConfig(allSubnets *configSubnets) error { + Kubernetes.ServiceCIDRs = []*net.IPNet{} + for _, cidrString := range strings.Split(Kubernetes.RawServiceCIDRs, ",") { + _, serviceCIDR, err := net.ParseCIDR(cidrString) + if err != nil { + return fmt.Errorf("kubernetes service network CIDR %q invalid: %v", cidrString, err) + } + Kubernetes.ServiceCIDRs = append(Kubernetes.ServiceCIDRs, serviceCIDR) + allSubnets.append(configSubnetService, serviceCIDR) + } + if len(Kubernetes.ServiceCIDRs) > 2 { + return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair") + } else if len(Kubernetes.ServiceCIDRs) == 2 && utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[0]) == utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[1]) { + return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair") + } + + if Kubernetes.RawNoHostSubnetNodes != "" { + nodeSelector, err := metav1.ParseToLabelSelector(Kubernetes.RawNoHostSubnetNodes) + if err != nil { + return fmt.Errorf("labelSelector \"%s\" is invalid: %v", Kubernetes.RawNoHostSubnetNodes, err) + } + selector, err := metav1.LabelSelectorAsSelector(nodeSelector) + if err != nil { + return fmt.Errorf("failed to convert %v into a labels.Selector: %v", nodeSelector, err) + } + Kubernetes.NoHostSubnetNodes = selector + } + + return nil +} + +func buildMetricsConfig(cli, file *config) error { + // Copy KubernetesConfig backwards-compat values over default values + if Kubernetes.CompatMetricsBindAddress != "" { + Metrics.BindAddress = Kubernetes.CompatMetricsBindAddress + } + if Kubernetes.CompatOVNMetricsBindAddress != "" { + Metrics.OVNMetricsBindAddress = Kubernetes.CompatOVNMetricsBindAddress + } + Metrics.EnablePprof = Kubernetes.CompatMetricsEnablePprof + + // Copy config file values over Kubernetes and default values + if err := overrideFields(&Metrics, &file.Metrics, &savedMetrics); err != nil { + return err + } + + // And CLI overrides over config file, Kubernetes, and default values + if err := overrideFields(&Metrics, &cli.Metrics, &savedMetrics); err != nil { + return err + } + + return nil +} + +func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&Gateway, &file.Gateway, &savedGateway); err != nil { + return err + } + + cli.Gateway.Mode = GatewayMode(ctx.String("gateway-mode")) + if cli.Gateway.Mode == GatewayModeDisabled { + // Handle legacy CLI options + if ctx.Bool("init-gateways") { + cli.Gateway.Mode = GatewayModeShared + if ctx.Bool("gateway-local") { + cli.Gateway.Mode = GatewayModeLocal + } + } + } + // And CLI overrides over config file and default values + if err := overrideFields(&Gateway, &cli.Gateway, &savedGateway); err != nil { + return err + } + + if Gateway.Mode != GatewayModeDisabled { + validModes := []string{string(GatewayModeShared), string(GatewayModeLocal)} + var found bool + for _, mode := range validModes { + if string(Gateway.Mode) == mode { + found = true + break + } + } + if !found { + return fmt.Errorf("invalid gateway mode %q: expect one of %s", string(Gateway.Mode), strings.Join(validModes, ",")) + } + } + + // Options are only valid if Mode is not disabled + if Gateway.Mode == GatewayModeDisabled { + if Gateway.Interface != "" { + return fmt.Errorf("gateway interface option %q not allowed when gateway is disabled", Gateway.Interface) + } + if Gateway.NextHop != "" { + return fmt.Errorf("gateway next-hop option %q not allowed when gateway is disabled", Gateway.NextHop) + } + } + + if Gateway.Mode != GatewayModeShared && Gateway.VLANID != 0 { + return fmt.Errorf("gateway VLAN ID option: %d is supported only in shared gateway mode", Gateway.VLANID) + } + + return nil +} + +func completeGatewayConfig(allSubnets *configSubnets, masqueradeIPs *MasqueradeIPsConfig) error { + // Validate v4 and v6 join subnets + v4IP, v4JoinCIDR, err := net.ParseCIDR(Gateway.V4JoinSubnet) + if err != nil || utilnet.IsIPv6(v4IP) { + return fmt.Errorf("invalid gateway v4 join subnet specified, subnet: %s: error: %v", Gateway.V4JoinSubnet, err) + } + + v6IP, v6JoinCIDR, err := net.ParseCIDR(Gateway.V6JoinSubnet) + if err != nil || !utilnet.IsIPv6(v6IP) { + return fmt.Errorf("invalid gateway v6 join subnet specified, subnet: %s: error: %v", Gateway.V6JoinSubnet, err) + } + allSubnets.append(configSubnetJoin, v4JoinCIDR) + allSubnets.append(configSubnetJoin, v6JoinCIDR) + + //validate v4 and v6 masquerade subnets + v4MasqueradeIP, v4MasqueradeCIDR, err := net.ParseCIDR(Gateway.V4MasqueradeSubnet) + if err != nil || utilnet.IsIPv6(v4MasqueradeCIDR.IP) { + return fmt.Errorf("invalid gateway v4 masquerade subnet specified, subnet: %s: error: %v", Gateway.V4MasqueradeSubnet, err) + } + if err = AllocateV4MasqueradeIPs(v4MasqueradeIP, masqueradeIPs); err != nil { + return fmt.Errorf("unable to allocate V4MasqueradeIPs: %s", err) + } + + v6MasqueradeIP, v6MasqueradeCIDR, err := net.ParseCIDR(Gateway.V6MasqueradeSubnet) + if err != nil || !utilnet.IsIPv6(v6MasqueradeCIDR.IP) { + return fmt.Errorf("invalid gateway v6 masquerade subnet specified, subnet: %s: error: %v", Gateway.V6MasqueradeSubnet, err) + } + if err = AllocateV6MasqueradeIPs(v6MasqueradeIP, masqueradeIPs); err != nil { + return fmt.Errorf("unable to allocate V6MasqueradeIPs: %s", err) + } + + allSubnets.append(configSubnetMasquerade, v4MasqueradeCIDR) + allSubnets.append(configSubnetMasquerade, v6MasqueradeCIDR) + + return nil +} + +func buildOVNKubernetesFeatureConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&OVNKubernetesFeature, &file.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil { + return err + } + // And CLI overrides over config file and default values + if err := overrideFields(&OVNKubernetesFeature, &cli.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil { + return err + } + return nil +} + +func buildMasterHAConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&MasterHA, &file.MasterHA, &savedMasterHA); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&MasterHA, &cli.MasterHA, &savedMasterHA); err != nil { + return err + } + + if MasterHA.ElectionLeaseDuration <= MasterHA.ElectionRenewDeadline { + return fmt.Errorf("invalid HA election lease duration '%d'. "+ + "It should be greater than HA election renew deadline '%d'", + MasterHA.ElectionLeaseDuration, MasterHA.ElectionRenewDeadline) + } + + if MasterHA.ElectionRenewDeadline <= MasterHA.ElectionRetryPeriod { + return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+ + "It should be greater than HA election retry period '%d'", + MasterHA.ElectionRenewDeadline, MasterHA.ElectionRetryPeriod) + } + return nil +} + +func buildClusterMgrHAConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&ClusterMgrHA, &file.ClusterMgrHA, &savedClusterMgrHA); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&ClusterMgrHA, &cli.ClusterMgrHA, &savedClusterMgrHA); err != nil { + return err + } + + if ClusterMgrHA.ElectionLeaseDuration <= ClusterMgrHA.ElectionRenewDeadline { + return fmt.Errorf("invalid HA election lease duration '%d'. "+ + "It should be greater than HA election renew deadline '%d'", + ClusterMgrHA.ElectionLeaseDuration, ClusterMgrHA.ElectionRenewDeadline) + } + + if ClusterMgrHA.ElectionRenewDeadline <= ClusterMgrHA.ElectionRetryPeriod { + return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+ + "It should be greater than HA election retry period '%d'", + ClusterMgrHA.ElectionRenewDeadline, ClusterMgrHA.ElectionRetryPeriod) + } + return nil +} + +func buildMonitoringConfig(ctx *cli.Context, cli, file *config) error { + var err error + if err = overrideFields(&Monitoring, &file.Monitoring, &savedMonitoring); err != nil { + return err + } + if err = overrideFields(&Monitoring, &cli.Monitoring, &savedMonitoring); err != nil { + return err + } + return nil +} + +// completeMonitoringConfig completes the Monitoring config by parsing raw values +// into their final form. +func completeMonitoringConfig() error { + var err error + if Monitoring.RawNetFlowTargets != "" { + Monitoring.NetFlowTargets, err = ParseFlowCollectors(Monitoring.RawNetFlowTargets) + if err != nil { + return fmt.Errorf("netflow targets invalid: %v", err) + } + } + if Monitoring.RawSFlowTargets != "" { + Monitoring.SFlowTargets, err = ParseFlowCollectors(Monitoring.RawSFlowTargets) + if err != nil { + return fmt.Errorf("sflow targets invalid: %v", err) + } + } + if Monitoring.RawIPFIXTargets != "" { + Monitoring.IPFIXTargets, err = ParseFlowCollectors(Monitoring.RawIPFIXTargets) + if err != nil { + return fmt.Errorf("ipfix targets invalid: %v", err) + } + } + return nil +} + +func buildIPFIXConfig(cli, file *config) error { + if err := overrideFields(&IPFIX, &file.IPFIX, &savedIPFIX); err != nil { + return err + } + return overrideFields(&IPFIX, &cli.IPFIX, &savedIPFIX) +} + +func buildHybridOverlayConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&HybridOverlay, &file.HybridOverlay, &savedHybridOverlay); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&HybridOverlay, &cli.HybridOverlay, &savedHybridOverlay); err != nil { + return err + } + + if HybridOverlay.Enabled && HybridOverlay.VXLANPort > 65535 { + return fmt.Errorf("hybrid overlay vxlan port is invalid. The port cannot be larger than 65535") + } + + return nil +} + +// completeHybridOverlayConfig completes the HybridOverlay config by parsing raw values +// into their final form. +func completeHybridOverlayConfig(allSubnets *configSubnets) error { + if !HybridOverlay.Enabled || len(HybridOverlay.RawClusterSubnets) == 0 { + return nil + } + + var err error + HybridOverlay.ClusterSubnets, err = ParseClusterSubnetEntries(HybridOverlay.RawClusterSubnets) + if err != nil { + return fmt.Errorf("hybrid overlay cluster subnet invalid: %v", err) + } + for _, subnet := range HybridOverlay.ClusterSubnets { + allSubnets.append(configSubnetHybrid, subnet.CIDR) + } + + return nil +} + +func buildClusterManagerConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&ClusterManager, &file.ClusterManager, &savedClusterManager); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&ClusterManager, &cli.ClusterManager, &savedClusterManager); err != nil { + return err + } + + return nil +} + +// completeClusterManagerConfig completes the ClusterManager config by parsing raw values +// into their final form. +func completeClusterManagerConfig(allSubnets *configSubnets) error { + // Validate v4 and v6 transit switch subnets + v4IP, v4TransitCIDR, err := net.ParseCIDR(ClusterManager.V4TransitSwitchSubnet) + if err != nil || utilnet.IsIPv6(v4IP) { + return fmt.Errorf("invalid transit switch v4 subnet specified, subnet: %s: error: %v", ClusterManager.V4TransitSwitchSubnet, err) + } + + v6IP, v6TransitCIDR, err := net.ParseCIDR(ClusterManager.V6TransitSwitchSubnet) + if err != nil || !utilnet.IsIPv6(v6IP) { + return fmt.Errorf("invalid transit switch v6 subnet specified, subnet: %s: error: %v", ClusterManager.V6TransitSwitchSubnet, err) + } + allSubnets.append(configSubnetTransit, v4TransitCIDR) + allSubnets.append(configSubnetTransit, v6TransitCIDR) + return nil +} + +func buildDefaultConfig(cli, file *config) error { + if err := overrideFields(&Default, &file.Default, &savedDefault); err != nil { + return err + } + + if err := overrideFields(&Default, &cli.Default, &savedDefault); err != nil { + return err + } + + // Legacy cluster-subnet CLI option overrides config file or --cluster-subnets + if clusterSubnet != "" { + Default.RawClusterSubnets = clusterSubnet + } + if Default.RawClusterSubnets == "" { + return fmt.Errorf("cluster subnet is required") + } + + if Default.Zone == "" { + Default.Zone = types.OvnDefaultZone + } + return nil +} + +// completeDefaultConfig completes the Default config by parsing raw values +// into their final form. +func completeDefaultConfig(allSubnets *configSubnets) error { + var err error + Default.ClusterSubnets, err = ParseClusterSubnetEntries(Default.RawClusterSubnets) + if err != nil { + return fmt.Errorf("cluster subnet invalid: %v", err) + } + for _, subnet := range Default.ClusterSubnets { + allSubnets.append(configSubnetCluster, subnet.CIDR) + } + + Default.HostMasqConntrackZone = Default.ConntrackZone + 1 + Default.OVNMasqConntrackZone = Default.ConntrackZone + 2 + Default.HostNodePortConntrackZone = Default.ConntrackZone + 3 + Default.ReassemblyConntrackZone = Default.ConntrackZone + 4 + return nil +} + +// getConfigFilePath returns config file path and 'true' if the config file is +// the fallback path (eg not given by the user), 'false' if given explicitly +// by the user +func getConfigFilePath(ctx *cli.Context) (string, bool) { + configFile := ctx.String("config-file") + if configFile != "" { + return configFile, false + } + return "/etc/openvswitch/ovn_k8s.conf", true +} + +// InitConfig reads the config file and common command-line options and +// constructs the global config object from them. It returns the config file +// path (if explicitly specified) or an error +func InitConfig(ctx *cli.Context, exec kexec.Interface, defaults *Defaults) (string, error) { + return initConfigWithPath(ctx, exec, kubeServiceAccountPath, defaults) +} + +// InitConfigSa reads the config file and common command-line options and +// constructs the global config object from them. It passes the service account directory. +// It returns the config file path (if explicitly specified) or an error +func InitConfigSa(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) { + return initConfigWithPath(ctx, exec, saPath, defaults) +} + +// stripTokenFromK8sConfig removes k8s SA token & CAData values +// from the KubernetesConfig struct used for logging. +func stripTokenFromK8sConfig() KubernetesConfig { + k8sConf := Kubernetes + // Token and CAData are sensitive fields so stripping + // them while logging. + k8sConf.Token = "" + k8sConf.CAData = []byte{} + return k8sConf +} + +// initConfigWithPath reads the given config file (or if empty, reads the config file +// specified by command-line arguments, or empty, the default config file) and +// common command-line options and constructs the global config object from +// them. It returns the config file path (if explicitly specified) or an error +func initConfigWithPath(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) { + var retConfigFile string + var configFile string + var configFileIsDefault bool + var err error + // initialize cfg with default values, allow file read to override + cfg := config{ + Default: savedDefault, + Logging: savedLogging, + IPFIX: savedIPFIX, + CNI: savedCNI, + OVNKubernetesFeature: savedOVNKubernetesFeature, + Kubernetes: savedKubernetes, + OvnNorth: savedOvnNorth, + OvnSouth: savedOvnSouth, + Gateway: savedGateway, + MasterHA: savedMasterHA, + ClusterMgrHA: savedClusterMgrHA, + HybridOverlay: savedHybridOverlay, + OvnKubeNode: savedOvnKubeNode, + ClusterManager: savedClusterManager, + } + + configFile, configFileIsDefault = getConfigFilePath(ctx) + + if !configFileIsDefault { + // Only return explicitly specified config file + retConfigFile = configFile + } + + f, err := os.Open(configFile) + // Failure to find a default config file is not a hard error + if err != nil && !configFileIsDefault { + return "", fmt.Errorf("failed to open config file %s: %v", configFile, err) + } + if f != nil { + defer f.Close() + + // Parse ovn-k8s config file. + if err = gcfg.ReadInto(&cfg, f); err != nil { + if gcfg.FatalOnly(err) != nil { + return "", fmt.Errorf("failed to parse config file %s: %v", f.Name(), err) + } + // error is only a warning -> log it but continue + klog.Warningf("Warning on parsing config file: %s", err) + } + klog.Infof("Parsed config file %s", f.Name()) + klog.Infof("Parsed config: %+v", cfg) + } + + if defaults == nil { + defaults = &Defaults{} + } + + // Build config that needs no special processing + if err = overrideFields(&CNI, &cfg.CNI, &savedCNI); err != nil { + return "", err + } + if err = overrideFields(&CNI, &cliConfig.CNI, &savedCNI); err != nil { + return "", err + } + + // Logging setup + if err = overrideFields(&Logging, &cfg.Logging, &savedLogging); err != nil { + return "", err + } + if err = overrideFields(&Logging, &cliConfig.Logging, &savedLogging); err != nil { + return "", err + } + + var level klog.Level + if err := level.Set(strconv.Itoa(Logging.Level)); err != nil { + return "", fmt.Errorf("failed to set klog log level %v", err) + } + if Logging.File != "" { + klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) + klog.InitFlags(klogFlags) + if err := klogFlags.Set("logtostderr", "false"); err != nil { + klog.Errorf("Error setting klog logtostderr: %v", err) + } + if err := klogFlags.Set("alsologtostderr", "true"); err != nil { + klog.Errorf("Error setting klog alsologtostderr: %v", err) + } + klog.SetOutput(&lumberjack.Logger{ + Filename: Logging.File, + MaxSize: Logging.LogFileMaxSize, // megabytes + MaxBackups: Logging.LogFileMaxBackups, + MaxAge: Logging.LogFileMaxAge, // days + Compress: true, + }) + } + + if err = buildDefaultConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildKubernetesConfig(exec, &cliConfig, &cfg, saPath, defaults); err != nil { + return "", err + } + + // Metrics must be built after Kubernetes to ensure metrics options override + // legacy Kubernetes metrics options + if err = buildMetricsConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildOVNKubernetesFeatureConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildGatewayConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildMasterHAConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildClusterMgrHAConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildMonitoringConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildIPFIXConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildHybridOverlayConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildOvnKubeNodeConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildClusterManagerConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + tmpAuth, err := buildOvnAuth(exec, true, &cliConfig.OvnNorth, &cfg.OvnNorth, defaults.OvnNorthAddress) + if err != nil { + return "", err + } + OvnNorth = *tmpAuth + + tmpAuth, err = buildOvnAuth(exec, false, &cliConfig.OvnSouth, &cfg.OvnSouth, false) + if err != nil { + return "", err + } + OvnSouth = *tmpAuth + + if err := completeConfig(); err != nil { + return "", err + } + + klog.V(5).Infof("Default config: %+v", Default) + klog.V(5).Infof("Logging config: %+v", Logging) + klog.V(5).Infof("Monitoring config: %+v", Monitoring) + klog.V(5).Infof("IPFIX config: %+v", IPFIX) + klog.V(5).Infof("CNI config: %+v", CNI) + klog.V(5).Infof("Kubernetes config: %+v", stripTokenFromK8sConfig()) + klog.V(5).Infof("Gateway config: %+v", Gateway) + klog.V(5).Infof("OVN North config: %+v", OvnNorth) + klog.V(5).Infof("OVN South config: %+v", OvnSouth) + klog.V(5).Infof("Hybrid Overlay config: %+v", HybridOverlay) + klog.V(5).Infof("Ovnkube Node config: %+v", OvnKubeNode) + klog.V(5).Infof("Ovnkube Cluster Manager config: %+v", ClusterManager) + + return retConfigFile, nil +} + +func completeConfig() error { + allSubnets := newConfigSubnets() + + if err := completeKubernetesConfig(allSubnets); err != nil { + return err + } + if err := completeDefaultConfig(allSubnets); err != nil { + return err + } + + if err := completeGatewayConfig(allSubnets, &Gateway.MasqueradeIPs); err != nil { + return err + } + if err := completeMonitoringConfig(); err != nil { + return err + } + if err := completeHybridOverlayConfig(allSubnets); err != nil { + return err + } + + if err := completeClusterManagerConfig(allSubnets); err != nil { + return err + } + + if err := allSubnets.checkForOverlaps(); err != nil { + return err + } + + var err error + IPv4Mode, IPv6Mode, err = allSubnets.checkIPFamilies() + if err != nil { + return err + } + + return nil +} + +func pathExists(path string) bool { + _, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + return false + } + return true +} + +// parseAddress parses an OVN database address, which can be of form +// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" (OVS/OVN format) or +// "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641" (legacy ovnkube format) +// or "ssl:[fd01::1]:6641,ssl:[fd01::2]:6641 +// and returns the validated address(es) and the scheme +func parseAddress(urlString string) (string, OvnDBScheme, error) { + var parsedAddress, scheme string + var parsedScheme OvnDBScheme + + urlString = strings.Replace(urlString, "//", "", -1) + for _, ovnAddress := range strings.Split(urlString, ",") { + splits := strings.SplitN(ovnAddress, ":", 2) + if len(splits) != 2 { + return "", "", fmt.Errorf("failed to parse OVN address %s", urlString) + } + + if scheme == "" { + scheme = splits[0] + } else if scheme != splits[0] { + return "", "", fmt.Errorf("invalid protocols in OVN address %s", + urlString) + } + + if scheme == "unix" { + if parsedAddress != "" { + parsedAddress += "," + } + parsedAddress += ovnAddress + } else { + host, port, err := net.SplitHostPort(splits[1]) + if err != nil { + return "", "", fmt.Errorf("failed to parse OVN DB host/port %q: %v", + splits[1], err) + } + + if parsedAddress != "" { + parsedAddress += "," + } + parsedAddress += fmt.Sprintf("%s:%s", scheme, net.JoinHostPort(host, port)) + } + } + + switch { + case scheme == "ssl": + parsedScheme = OvnDBSchemeSSL + case scheme == "tcp": + parsedScheme = OvnDBSchemeTCP + case scheme == "unix": + parsedScheme = OvnDBSchemeUnix + default: + return "", "", fmt.Errorf("unknown OVN DB scheme %q", scheme) + } + return parsedAddress, parsedScheme, nil +} + +// buildOvnAuth returns an OvnAuthConfig object describing the connection to an +// OVN database, given a connection description string and authentication +// details +func buildOvnAuth(exec kexec.Interface, northbound bool, cliAuth, confAuth *OvnAuthConfig, readAddress bool) (*OvnAuthConfig, error) { + auth := &OvnAuthConfig{ + northbound: northbound, + exec: exec, + } + + var direction string + var defaultAuth *OvnAuthConfig + if northbound { + direction = "nb" + defaultAuth = &savedOvnNorth + } else { + direction = "sb" + defaultAuth = &savedOvnSouth + } + + // Determine final address so we know how to set cert/key defaults + address := cliAuth.Address + if address == "" { + address = confAuth.Address + } + if address == "" && readAddress { + address = getOVSExternalID(exec, "ovn-"+direction) + } + if strings.HasPrefix(address, "ssl") { + // Set up default SSL cert/key paths + auth.CACert = "/etc/openvswitch/ovn" + direction + "-ca.cert" + auth.PrivKey = "/etc/openvswitch/ovn" + direction + "-privkey.pem" + auth.Cert = "/etc/openvswitch/ovn" + direction + "-cert.pem" + } + + // Build the final auth config with overrides from CLI and config file + if err := overrideFields(auth, confAuth, defaultAuth); err != nil { + return nil, err + } + if err := overrideFields(auth, cliAuth, defaultAuth); err != nil { + return nil, err + } + + if address == "" { + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + auth.Scheme = OvnDBSchemeUnix + auth.Address = fmt.Sprintf("unix:/var/run/ovn/ovn%s_db.sock", direction) + return auth, nil + } + + var err error + auth.Address, auth.Scheme, err = parseAddress(address) + if err != nil { + return nil, err + } + + switch { + case auth.Scheme == OvnDBSchemeSSL: + if auth.PrivKey == "" || auth.Cert == "" || auth.CACert == "" || auth.CertCommonName == "" { + return nil, fmt.Errorf("must specify private key, certificate, CA certificate, and common name used in the certificate for 'ssl' scheme") + } + case auth.Scheme == OvnDBSchemeTCP: + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + case auth.Scheme == OvnDBSchemeUnix: + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + } + + return auth, nil +} + +func (a *OvnAuthConfig) ensureCACert() error { + if pathExists(a.CACert) { + // CA file exists, nothing to do + return nil + } + + // Client can bootstrap the CA from the OVN API. Use nbctl for both + // SB and NB since ovn-sbctl only supports --bootstrap-ca-cert from + // 2.9.90+. + // FIXME: change back to a.ctlCmd when sbctl supports --bootstrap-ca-cert + // https://github.com/openvswitch/ovs/pull/226 + args := []string{ + "--db=" + a.GetURL(), + "--timeout=5", + } + if a.Scheme == OvnDBSchemeSSL { + args = append(args, "--private-key="+a.PrivKey) + args = append(args, "--certificate="+a.Cert) + args = append(args, "--bootstrap-ca-cert="+a.CACert) + } + args = append(args, "list", "nb_global") + _, _ = rawExec(a.exec, "ovn-nbctl", args...) + if _, err := os.Stat(a.CACert); os.IsNotExist(err) { + klog.Warningf("Bootstrapping %s CA certificate failed", a.CACert) + } + return nil +} + +// GetURL returns a URL suitable for passing to ovn-northd which describes the +// transport mechanism for connection to the database +func (a *OvnAuthConfig) GetURL() string { + return a.Address +} + +// SetDBAuth sets the authentication configuration and connection method +// for the OVN northbound or southbound database server or client +func (a *OvnAuthConfig) SetDBAuth() error { + if a.Scheme == OvnDBSchemeSSL { + // Both server and client SSL schemes require privkey and cert + if !pathExists(a.PrivKey) { + return fmt.Errorf("private key file %s not found", a.PrivKey) + } + if !pathExists(a.Cert) { + return fmt.Errorf("certificate file %s not found", a.Cert) + } + + // Client can bootstrap the CA cert from the DB + if err := a.ensureCACert(); err != nil { + return err + } + + // Tell Southbound DB clients (like ovn-controller) + // which certificates to use to talk to the DB. + // Must happen *before* setting the "ovn-remote" + // external-id. + if !a.northbound { + out, err := runOVSVsctl(a.exec, "del-ssl") + if err != nil { + return fmt.Errorf("error deleting ovs-vsctl SSL "+ + "configuration: %q (%v)", out, err) + } + + out, err = runOVSVsctl(a.exec, "set-ssl", a.PrivKey, a.Cert, a.CACert) + if err != nil { + return fmt.Errorf("error setting client southbound DB SSL options: %v\n %q", err, out) + } + } + } + + if !a.northbound { + // store the Southbound Database address in an external id - "external_ids:ovn-remote" + if err := setOVSExternalID(a.exec, "ovn-remote", "\""+a.GetURL()+"\""); err != nil { + return err + } + } + + return nil +} + +func (a *OvnAuthConfig) updateIP(newIPs []string, port string) { + newAddresses := make([]string, 0, len(newIPs)) + for _, ipAddress := range newIPs { + newAddresses = append(newAddresses, fmt.Sprintf("%v:%s", a.Scheme, net.JoinHostPort(ipAddress, port))) + } + a.Address = strings.Join(newAddresses, ",") +} + +// UpdateOVNNodeAuth updates the host and URL in ClientAuth +// for both OvnNorth and OvnSouth. It updates them with the new masterIP. +func UpdateOVNNodeAuth(masterIP []string, southboundDBPort, northboundDBPort string) { + klog.V(5).Infof("Update OVN node auth with new master ip: %s", masterIP) + OvnNorth.updateIP(masterIP, northboundDBPort) + OvnSouth.updateIP(masterIP, southboundDBPort) +} + +// ovnKubeNodeModeSupported validates the provided mode is supported by ovnkube node +func ovnKubeNodeModeSupported(mode string) error { + found := false + supportedModes := []string{types.NodeModeFull, types.NodeModeDPU, types.NodeModeDPUHost} + for _, m := range supportedModes { + if mode == m { + found = true + break + } + } + if !found { + return fmt.Errorf("unexpected ovnkube-node-mode: %s. supported modes: %v", mode, supportedModes) + } + return nil +} + +// buildOvnKubeNodeConfig updates OvnKubeNode config from cli and config file +func buildOvnKubeNodeConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&OvnKubeNode, &file.OvnKubeNode, &savedOvnKubeNode); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&OvnKubeNode, &cli.OvnKubeNode, &savedOvnKubeNode); err != nil { + return err + } + + // validate ovnkube-node-mode + if err := ovnKubeNodeModeSupported(OvnKubeNode.Mode); err != nil { + return err + } + + // ovnkube-node-mode dpu/dpu-host does not support hybrid overlay + if OvnKubeNode.Mode != types.NodeModeFull && HybridOverlay.Enabled { + return fmt.Errorf("hybrid overlay is not supported with ovnkube-node mode %s", OvnKubeNode.Mode) + } + + // Warn the user if both MgmtPortNetdev and MgmtPortDPResourceName are specified since they + // configure the management port. + if OvnKubeNode.MgmtPortNetdev != "" && OvnKubeNode.MgmtPortDPResourceName != "" { + klog.Warningf("ovnkube-node-mgmt-port-netdev (%s) and ovnkube-node-mgmt-port-dp-resource-name (%s) "+ + "both specified. The provided netdev in ovnkube-node-mgmt-port-netdev will be overriden by a netdev "+ + "chosen by the resource provided by ovnkube-node-mgmt-port-dp-resource-name.", + OvnKubeNode.MgmtPortNetdev, OvnKubeNode.MgmtPortDPResourceName) + } + + // when DPU is used, management port is always backed by a representor. On the + // host side, it needs to be provided through --ovnkube-node-mgmt-port-netdev. + // On the DPU, it is derrived from the annotation exposed on the host side. + if OvnKubeNode.Mode == types.NodeModeDPU && !(OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "") { + return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must not be provided") + } + if OvnKubeNode.Mode == types.NodeModeDPUHost && OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "" { + return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must be provided") + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go new file mode 100644 index 000000000..17cdb3027 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go @@ -0,0 +1,345 @@ +package config + +import ( + "fmt" + "net" + "reflect" + "strconv" + "strings" + + iputils "github.com/containernetworking/plugins/pkg/ip" + utilnet "k8s.io/utils/net" +) + +// HostPort is the object that holds the definition for a host and port tuple +type HostPort struct { + Host *net.IP + Port int32 +} + +// String representation of a HostPort entry +func (hp *HostPort) String() string { + switch { + case hp.Host == nil: + return fmt.Sprintf(":%d", hp.Port) + case hp.Host.To4() != nil: + return fmt.Sprintf("%s:%d", *hp.Host, hp.Port) + default: + return fmt.Sprintf("[%s]:%d", *hp.Host, hp.Port) + } +} + +// CIDRNetworkEntry is the object that holds the definition for a single network CIDR range +type CIDRNetworkEntry struct { + CIDR *net.IPNet + HostSubnetLength int +} + +func (c CIDRNetworkEntry) String() string { + return fmt.Sprintf("%s/%d", c.CIDR.String(), c.HostSubnetLength) +} + +// ParseClusterSubnetEntriesWithDefaults returns the parsed set of +// CIDRNetworkEntries. These entries define a network space by specifying a set +// of CIDR and netmasks the SDN can allocate addresses from including how that +// network space is partitioned for each of the cluster nodes. When no host +// specific prefix length is specified, the provided ones are assumed as +// default. The host specific prefix length is validated to be greater than the +// overall subnet length. When 0 is specified as default host specific prefix +// length, no host specific prefix length is allowed or validated. +func ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd string, ipv4HostLength, ipv6HostLength int) ([]CIDRNetworkEntry, error) { + var parsedClusterList []CIDRNetworkEntry + clusterEntriesList := strings.Split(clusterSubnetCmd, ",") + + ipv4HostLengthAllowed := ipv4HostLength != 0 + ipv6HostLengthAllowed := ipv6HostLength != 0 + + for _, clusterEntry := range clusterEntriesList { + clusterEntry := strings.TrimSpace(clusterEntry) + splitClusterEntry := strings.Split(clusterEntry, "/") + + if len(splitClusterEntry) < 2 || len(splitClusterEntry) > 3 { + return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry) + } + + var err error + var parsedClusterEntry CIDRNetworkEntry + _, parsedClusterEntry.CIDR, err = net.ParseCIDR(fmt.Sprintf("%s/%s", splitClusterEntry[0], splitClusterEntry[1])) + if err != nil { + return nil, err + } + + ipv6 := utilnet.IsIPv6(parsedClusterEntry.CIDR.IP) + hostLengthAllowed := (ipv6 && ipv6HostLengthAllowed) || (!ipv6 && ipv4HostLengthAllowed) + + entryMaskLength, _ := parsedClusterEntry.CIDR.Mask.Size() + if len(splitClusterEntry) == 3 { + if !hostLengthAllowed { + return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry) + } + tmp, err := strconv.Atoi(splitClusterEntry[2]) + if err != nil { + return nil, err + } + parsedClusterEntry.HostSubnetLength = tmp + } else { + if ipv6 { + parsedClusterEntry.HostSubnetLength = ipv6HostLength + } else { + // default for backward compatibility + parsedClusterEntry.HostSubnetLength = ipv4HostLength + } + } + + if hostLengthAllowed { + if ipv6 && ipv6HostLengthAllowed && parsedClusterEntry.HostSubnetLength != 64 { + return nil, fmt.Errorf("IPv6 only supports /64 host subnets") + } + + if !ipv6 && parsedClusterEntry.HostSubnetLength > 32 { + return nil, fmt.Errorf("invalid host subnet, IPv4 subnet must be < 32") + } + + if parsedClusterEntry.HostSubnetLength <= entryMaskLength { + return nil, fmt.Errorf("cannot use a host subnet length mask shorter than or equal to the cluster subnet mask. "+ + "host subnet length: %d, cluster subnet length: %d", parsedClusterEntry.HostSubnetLength, entryMaskLength) + } + } + + parsedClusterList = append(parsedClusterList, parsedClusterEntry) + } + + if len(parsedClusterList) == 0 { + return nil, fmt.Errorf("failed to parse any CIDRs from %q", clusterSubnetCmd) + } + + return parsedClusterList, nil +} + +// ParseClusterSubnetEntries returns the parsed set of +// CIDRNetworkEntries. If not specified, it assumes a default host specific +// prefix length of 24 or 64 bits for ipv4 and ipv6 respectively. +func ParseClusterSubnetEntries(clusterSubnetCmd string) ([]CIDRNetworkEntry, error) { + // default to 24 bits host specific prefix length for backward compatibility + return ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd, 24, 64) +} + +// ParseFlowCollectors returns the parsed set of HostPorts passed by the user on the command line +// These entries define the flow collectors OVS will send flow metadata by using NetFlow/SFlow/IPFIX. +func ParseFlowCollectors(flowCollectors string) ([]HostPort, error) { + var parsedFlowsCollectors []HostPort + readCollectors := map[string]struct{}{} + collectors := strings.Split(flowCollectors, ",") + for _, v := range collectors { + host, port, err := net.SplitHostPort(v) + if err != nil { + return nil, fmt.Errorf("cannot parse hostport: %v", err) + } + var ipp *net.IP + // If the host IP is not provided, we keep it nil and later will assume the Node IP + if host != "" { + ip := net.ParseIP(host) + if ip == nil { + return nil, fmt.Errorf("collector IP %s is not a valid IP", host) + } + ipp = &ip + } + parsedPort, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return nil, fmt.Errorf("collector port %s is not a valid port: %v", port, err) + } + // checking if HostPort entry is duplicate + hostPort := HostPort{Host: ipp, Port: int32(parsedPort)} + hps := hostPort.String() + if _, ok := readCollectors[hps]; ok { + // duplicate flow collector. Ignore it + continue + } + readCollectors[hps] = struct{}{} + parsedFlowsCollectors = append(parsedFlowsCollectors, hostPort) + } + + return parsedFlowsCollectors, nil +} + +type configSubnetType string + +const ( + configSubnetJoin configSubnetType = "built-in join subnet" + configSubnetCluster configSubnetType = "cluster subnet" + configSubnetService configSubnetType = "service subnet" + configSubnetHybrid configSubnetType = "hybrid overlay subnet" + configSubnetMasquerade configSubnetType = "masquerade subnet" + configSubnetTransit configSubnetType = "transit switch subnet" +) + +type configSubnet struct { + subnetType configSubnetType + subnet *net.IPNet +} + +// configSubnets represents a set of configured subnets (and their names) +type configSubnets struct { + subnets []configSubnet + v4 map[configSubnetType]bool + v6 map[configSubnetType]bool +} + +// newConfigSubnets returns a new configSubnets +func newConfigSubnets() *configSubnets { + return &configSubnets{ + v4: make(map[configSubnetType]bool), + v6: make(map[configSubnetType]bool), + } +} + +// append adds a single subnet to cs +func (cs *configSubnets) append(subnetType configSubnetType, subnet *net.IPNet) { + cs.subnets = append(cs.subnets, configSubnet{subnetType: subnetType, subnet: subnet}) + if subnetType != configSubnetJoin && subnetType != configSubnetMasquerade && subnetType != configSubnetTransit { + if utilnet.IsIPv6CIDR(subnet) { + cs.v6[subnetType] = true + } else { + cs.v4[subnetType] = true + } + } +} + +// checkForOverlaps checks if any of the subnets in cs overlap +func (cs *configSubnets) checkForOverlaps() error { + for i, si := range cs.subnets { + for j := 0; j < i; j++ { + sj := cs.subnets[j] + if si.subnet.Contains(sj.subnet.IP) || sj.subnet.Contains(si.subnet.IP) { + return fmt.Errorf("illegal network configuration: %s %q overlaps %s %q", + si.subnetType, si.subnet.String(), + sj.subnetType, sj.subnet.String()) + } + } + } + return nil +} + +func (cs *configSubnets) describeSubnetType(subnetType configSubnetType) string { + ipv4 := cs.v4[subnetType] + ipv6 := cs.v6[subnetType] + var familyType string + switch { + case ipv4 && !ipv6: + familyType = "IPv4" + case !ipv4 && ipv6: + familyType = "IPv6" + case ipv4 && ipv6: + familyType = "dual-stack" + default: + familyType = "unknown type" + } + return familyType + " " + string(subnetType) +} + +// checkIPFamilies determines if cs contains a valid single-stack IPv4 configuration, a +// valid single-stack IPv6 configuration, a valid dual-stack configuration, or none of the +// above. +func (cs *configSubnets) checkIPFamilies() (usingIPv4, usingIPv6 bool, err error) { + if len(cs.v6) == 0 { + // Single-stack IPv4 + return true, false, nil + } else if len(cs.v4) == 0 { + // Single-stack IPv6 + return false, true, nil + } else if reflect.DeepEqual(cs.v4, cs.v6) { + // Dual-stack + return true, true, nil + } + + netConfig := cs.describeSubnetType(configSubnetCluster) + netConfig += ", " + cs.describeSubnetType(configSubnetService) + if cs.v4[configSubnetHybrid] || cs.v6[configSubnetHybrid] { + netConfig += ", " + cs.describeSubnetType(configSubnetHybrid) + } + + return false, false, fmt.Errorf("illegal network configuration: %s", netConfig) +} + +func ContainsJoinIP(ip net.IP) bool { + var joinSubnetsConfig []string + if IPv4Mode { + joinSubnetsConfig = append(joinSubnetsConfig, Gateway.V4JoinSubnet) + } + if IPv6Mode { + joinSubnetsConfig = append(joinSubnetsConfig, Gateway.V6JoinSubnet) + } + + for _, subnet := range joinSubnetsConfig { + _, joinSubnet, _ := net.ParseCIDR(subnet) + if joinSubnet.Contains(ip) { + return true + } + } + return false +} + +// masqueradeIP represents the masqueradeIPs used by the masquerade subnets for host to service traffic +type MasqueradeIPsConfig struct { + V4OVNMasqueradeIP net.IP + V6OVNMasqueradeIP net.IP + V4HostMasqueradeIP net.IP + V6HostMasqueradeIP net.IP + V4HostETPLocalMasqueradeIP net.IP + V6HostETPLocalMasqueradeIP net.IP + V4DummyNextHopMasqueradeIP net.IP + V6DummyNextHopMasqueradeIP net.IP + V4OVNServiceHairpinMasqueradeIP net.IP + V6OVNServiceHairpinMasqueradeIP net.IP +} + +// allocateV4/6MasqueradeIPs allocates the masqueradeIPs based off of the passed in masqueradeSubnet (.0) +// it does this by cascading down from the initial ip down to the .5 currently (more masqueradeIps may be added in the future) + +func AllocateV4MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error { + masqueradeIPs.V4OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress) + if masqueradeIPs.V4OVNMasqueradeIP == nil { + return fmt.Errorf("error setting V4OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress) + } + masqueradeIPs.V4HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V4OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down + if masqueradeIPs.V4HostMasqueradeIP == nil { + return fmt.Errorf("error setting V4HostMasqueradeIP: %s", masqueradeIPs.V4OVNMasqueradeIP) + } + masqueradeIPs.V4HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostMasqueradeIP) + if masqueradeIPs.V4HostETPLocalMasqueradeIP == nil { + return fmt.Errorf("error setting V4HostETPLocalMasqueradeIP: %s", masqueradeIPs.V4HostMasqueradeIP) + } + masqueradeIPs.V4DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostETPLocalMasqueradeIP) + if masqueradeIPs.V4DummyNextHopMasqueradeIP == nil { + return fmt.Errorf("error setting V4DummyNextHopMasqueradeIP: %s", masqueradeIPs.V4HostETPLocalMasqueradeIP) + } + masqueradeIPs.V4OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V4DummyNextHopMasqueradeIP) + if masqueradeIPs.V4OVNServiceHairpinMasqueradeIP == nil { + return fmt.Errorf("error setting V4OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V4DummyNextHopMasqueradeIP) + } + return nil +} + +func AllocateV6MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error { + masqueradeIPs.V6OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress) + if masqueradeIPs.V6OVNMasqueradeIP == nil { + return fmt.Errorf("error setting V6OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress) + } + masqueradeIPs.V6HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V6OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down + if masqueradeIPs.V6HostMasqueradeIP == nil { + return fmt.Errorf("error setting V6HostMasqueradeIP: %s", masqueradeIPs.V6OVNMasqueradeIP) + } + masqueradeIPs.V6HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostMasqueradeIP) + if masqueradeIPs.V6HostETPLocalMasqueradeIP == nil { + return fmt.Errorf("error setting V6HostETPLocalMasqueradeIP: %s", masqueradeIPs.V6HostMasqueradeIP) + } + masqueradeIPs.V6DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostETPLocalMasqueradeIP) + if masqueradeIPs.V6DummyNextHopMasqueradeIP == nil { + return fmt.Errorf("error setting V6DummyNextHopMasqueradeIP: %s", masqueradeIPs.V6HostETPLocalMasqueradeIP) + } + masqueradeIPs.V6OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V6DummyNextHopMasqueradeIP) + if masqueradeIPs.V6OVNServiceHairpinMasqueradeIP == nil { + return fmt.Errorf("error setting V6OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V6DummyNextHopMasqueradeIP) + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go new file mode 100644 index 000000000..9924c9b1d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go @@ -0,0 +1,47 @@ +package cryptorand + +import ( + "crypto/rand" + "encoding/binary" + "k8s.io/klog/v2" + "math/big" +) + +func Intn(n int64) uint64 { + val := new(big.Int).SetInt64(n) + randNum, err := rand.Int(rand.Reader, val) + if err != nil { + klog.Errorf("Error generating random number using crypto/rand : %v", err) + return 0 + } + return randNum.Uint64() +} + +func Uint32() uint32 { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err) + return 0 + } + return binary.LittleEndian.Uint32(b) +} + +func Uint64() uint64 { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err) + return 0 + } + return binary.LittleEndian.Uint64(b) +} + +func Read(randBytes []byte) []byte { + _, err := rand.Read(randBytes) + if err != nil { + klog.Errorf("Error reading bytes using crypto/rand: %v", err) + return nil + } + return randBytes +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go new file mode 100644 index 000000000..3d6e81514 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go @@ -0,0 +1,181 @@ +package ops + +import ( + "context" + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// GetACLName returns the ACL name if it has one otherwise returns +// an empty string. +func GetACLName(acl *nbdb.ACL) string { + if acl.Name != nil { + return *acl.Name + } + return "" +} + +func getACLMutableFields(acl *nbdb.ACL) []interface{} { + return []interface{}{&acl.Action, &acl.Direction, &acl.ExternalIDs, &acl.Log, &acl.Match, &acl.Meter, + &acl.Name, &acl.Options, &acl.Priority, &acl.Severity, &acl.Tier, &acl.SampleNew, &acl.SampleEst} +} + +type aclPredicate func(*nbdb.ACL) bool + +// FindACLsWithPredicate looks up ACLs from the cache based on a given predicate +func FindACLsWithPredicate(nbClient libovsdbclient.Client, p aclPredicate) ([]*nbdb.ACL, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + acls := []*nbdb.ACL{} + err := nbClient.WhereCache(p).List(ctx, &acls) + return acls, err +} + +func FindACLs(nbClient libovsdbclient.Client, acls []*nbdb.ACL) ([]*nbdb.ACL, error) { + opModels := make([]operationModel, 0, len(acls)) + foundACLs := make([]*nbdb.ACL, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + found := []*nbdb.ACL{} + opModel := operationModel{ + Model: acl, + ExistingResult: &found, + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { + if len(found) > 0 { + foundACLs = append(foundACLs, found[0]) + } + }, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + err := modelClient.Lookup(opModels...) + return foundACLs, err +} + +// BuildACL builds an ACL with empty optional properties unset +func BuildACL(name string, direction nbdb.ACLDirection, priority int, match string, action nbdb.ACLAction, meter string, + severity nbdb.ACLSeverity, log bool, externalIds map[string]string, options map[string]string, tier int) *nbdb.ACL { + name = fmt.Sprintf("%.63s", name) + + var realName *string + var realMeter *string + var realSeverity *string + if len(name) != 0 { + realName = &name + } + if len(meter) != 0 { + realMeter = &meter + } + if len(severity) != 0 { + realSeverity = &severity + } + acl := &nbdb.ACL{ + Name: realName, + Direction: direction, + Match: match, + Action: action, + Priority: priority, + Severity: realSeverity, + Log: log, + Meter: realMeter, + ExternalIDs: externalIds, + Options: options, + Tier: tier, + } + + return acl +} + +func SetACLLogging(acl *nbdb.ACL, severity nbdb.ACLSeverity, log bool) { + var realSeverity *string + if len(severity) != 0 { + realSeverity = &severity + } + acl.Severity = realSeverity + acl.Log = log +} + +// CreateOrUpdateACLsOps creates or updates the provided ACLs returning the +// corresponding ops +func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + // ensure names are truncated (let's cover our bases from snippets that don't call BuildACL and call this directly) + if acl.Name != nil { + // node ACLs won't have names set + *acl.Name = fmt.Sprintf("%.63s", *acl.Name) + } + opModels = addSample(samplingConfig, opModels, acl) + opModel := operationModel{ + Model: acl, + OnModelUpdates: getACLMutableFields(acl), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + opModel := operationModel{ + Model: acl, + OnModelUpdates: getACLMutableFields(acl), + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdateACLs creates or updates the provided ACLs +func CreateOrUpdateACLs(nbClient libovsdbclient.Client, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) error { + ops, err := CreateOrUpdateACLsOps(nbClient, nil, samplingConfig, acls...) + if err != nil { + return err + } + + _, err = TransactAndCheckAndSetUUIDs(nbClient, acls, ops) + return err +} + +// UpdateACLsLoggingOps updates the log and severity on the provided ACLs and +// returns the corresponding ops +func UpdateACLsLoggingOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + opModel := operationModel{ + Model: acl, + OnModelUpdates: []interface{}{&acl.Severity, &acl.Log}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go new file mode 100644 index 000000000..920bde95f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go @@ -0,0 +1,231 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type addressSetPredicate func(*nbdb.AddressSet) bool + +// getNonZeroAddressSetMutableFields builds a list of address set +// mutable fields with non zero values to be used as the list of fields to +// Update. +// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices +// as default and thus being filtered out of the update. The intention is to +// use non-nil empty maps/slices to clear them out in the update. +// See: https://github.com/ovn-org/libovsdb/issues/226 +func getNonZeroAddressSetMutableFields(as *nbdb.AddressSet) []interface{} { + fields := []interface{}{} + if as.Addresses != nil { + fields = append(fields, &as.Addresses) + } + if as.ExternalIDs != nil { + fields = append(fields, &as.ExternalIDs) + } + return fields +} + +// FindAddressSetsWithPredicate looks up address sets from the cache based on a +// given predicate +func FindAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) ([]*nbdb.AddressSet, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.AddressSet{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetAddressSet looks up an address sets from the cache +func GetAddressSet(nbClient libovsdbclient.Client, as *nbdb.AddressSet) (*nbdb.AddressSet, error) { + found := []*nbdb.AddressSet{} + opModel := operationModel{ + Model: as, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateAddressSetsOps creates the create-ops for the provided address sets +func CreateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: onModelUpdatesNone(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateAddressSets creates the provided address sets +func CreateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + ops, err := CreateAddressSetsOps(nbClient, nil, addrSets...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func CreateOrUpdateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: getNonZeroAddressSetMutableFields(as), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdateAddressSets creates or updates the provided address sets +func CreateOrUpdateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + ops, err := CreateOrUpdateAddressSetsOps(nbClient, nil, addrSets...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// UpdateAddressSetsAddresses updates the Addresses on the provided address sets +func UpdateAddressSetsAddresses(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + return err +} + +// AddAddressesToAddressSetOps adds the provided addresses to the provided address set and +// returns the corresponding ops +func AddAddressesToAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) { + originalAddresses := as.Addresses + as.Addresses = addresses + opModel := operationModel{ + Model: as, + OnModelMutations: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + as.Addresses = originalAddresses + return ops, err +} + +// DeleteAddressesFromAddressSetOps removes the provided addresses from the provided address +// set and returns the corresponding ops +func DeleteAddressesFromAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) { + originalAddresses := as.Addresses + as.Addresses = addresses + opModel := operationModel{ + Model: as, + OnModelMutations: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModel) + as.Addresses = originalAddresses + return ops, err +} + +func DeleteAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteAddressSets deletes the provided address sets +func DeleteAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// DeleteAddressSetsWithPredicateOps returns the ops to delete address sets based on a given predicate +func DeleteAddressSetsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p addressSetPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.AddressSet{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &deleted, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteAddressSetsWithPredicate looks up address sets from the cache based on +// a given predicate and deletes them +func DeleteAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) error { + ops, err := DeleteAddressSetsWithPredicateOps(nbClient, nil, p) + if err != nil { + return nil + } + _, err = TransactAndCheck(nbClient, ops) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go new file mode 100644 index 000000000..3cc17b64f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go @@ -0,0 +1,166 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// ListChassis looks up all chassis from the cache +func ListChassis(sbClient libovsdbclient.Client) ([]*sbdb.Chassis, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + searchedChassis := []*sbdb.Chassis{} + err := sbClient.List(ctx, &searchedChassis) + return searchedChassis, err +} + +// ListChassisPrivate looks up all chassis private models from the cache +func ListChassisPrivate(sbClient libovsdbclient.Client) ([]*sbdb.ChassisPrivate, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*sbdb.ChassisPrivate{} + err := sbClient.List(ctx, &found) + return found, err +} + +// GetChassis looks up a chassis from the cache using the 'Name' column which is an indexed +// column. +func GetChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis) (*sbdb.Chassis, error) { + found := []*sbdb.Chassis{} + opModel := operationModel{ + Model: chassis, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// DeleteChassis deletes the provided chassis and associated private chassis +func DeleteChassis(sbClient libovsdbclient.Client, chassis ...*sbdb.Chassis) error { + opModels := make([]operationModel, 0, len(chassis)) + for i := range chassis { + foundChassis := []*sbdb.Chassis{} + chassisPrivate := sbdb.ChassisPrivate{ + Name: chassis[i].Name, + } + chassisUUID := "" + opModel := []operationModel{ + { + Model: chassis[i], + ExistingResult: &foundChassis, + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { + if len(foundChassis) > 0 { + chassisPrivate.Name = foundChassis[0].Name + chassisUUID = foundChassis[0].UUID + } + }, + }, + { + Model: &chassisPrivate, + ErrNotFound: false, + BulkOp: false, + }, + // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups + // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty + { + Model: &sbdb.IGMPGroup{}, + ModelPredicate: func(group *sbdb.IGMPGroup) bool { + return group.Chassis != nil && chassisUUID != "" && *group.Chassis == chassisUUID + }, + ErrNotFound: false, + BulkOp: true, + }, + } + opModels = append(opModels, opModel...) + } + + m := newModelClient(sbClient) + err := m.Delete(opModels...) + return err +} + +type chassisPredicate func(*sbdb.Chassis) bool + +// DeleteChassisWithPredicate looks up chassis from the cache based on a given +// predicate and deletes them as well as the associated private chassis +func DeleteChassisWithPredicate(sbClient libovsdbclient.Client, p chassisPredicate) error { + foundChassis := []*sbdb.Chassis{} + foundChassisNames := sets.NewString() + foundChassisUUIDS := sets.NewString() + opModels := []operationModel{ + { + Model: &sbdb.Chassis{}, + ModelPredicate: p, + ExistingResult: &foundChassis, + ErrNotFound: false, + BulkOp: true, + DoAfter: func() { + for _, chassis := range foundChassis { + foundChassisNames.Insert(chassis.Name) + foundChassisUUIDS.Insert(chassis.UUID) + } + }, + }, + { + Model: &sbdb.ChassisPrivate{}, + ModelPredicate: func(item *sbdb.ChassisPrivate) bool { return foundChassisNames.Has(item.Name) }, + ErrNotFound: false, + BulkOp: true, + }, + // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups + // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty + { + Model: &sbdb.IGMPGroup{}, + ModelPredicate: func(group *sbdb.IGMPGroup) bool { return group.Chassis != nil && foundChassisUUIDS.Has(*group.Chassis) }, + ErrNotFound: false, + BulkOp: true, + }, + } + m := newModelClient(sbClient) + err := m.Delete(opModels...) + return err +} + +// CreateOrUpdateChassis creates or updates the chassis record along with the encap record +func CreateOrUpdateChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis, encap *sbdb.Encap) error { + m := newModelClient(sbClient) + opModels := []operationModel{ + { + Model: encap, + DoAfter: func() { + chassis.Encaps = []string{encap.UUID} + }, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + }, + { + Model: chassis, + OnModelMutations: []interface{}{&chassis.OtherConfig}, + OnModelUpdates: []interface{}{&chassis.Encaps}, + ErrNotFound: false, + BulkOp: false, + }, + } + + if _, err := m.CreateOrUpdate(opModels...); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go new file mode 100644 index 000000000..dac95c5c0 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go @@ -0,0 +1,47 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type coppPredicate func(*nbdb.Copp) bool + +// CreateOrUpdateCOPPsOps creates or updates the provided COPP returning the +// corresponding ops +func CreateOrUpdateCOPPsOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, copps ...*nbdb.Copp) ([]ovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(copps)) + for i := range copps { + // can't use i in the predicate, for loop replaces it in-memory + copp := copps[i] + opModel := operationModel{ + Model: copp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteCOPPsOps deletes the provided COPPs found using the predicate, returning the +// corresponding ops +func DeleteCOPPsWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, p coppPredicate) ([]ovsdb.Operation, error) { + copp := nbdb.Copp{} + opModels := []operationModel{ + { + Model: &copp, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + }, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go new file mode 100644 index 000000000..b242d7d3e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go @@ -0,0 +1,332 @@ +package ops + +import ( + "fmt" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +type dbObjType int +type ownerType = string +type ExternalIDKey string + +func (key ExternalIDKey) String() string { + return string(key) +} + +// ObjectIDsType defines which ExternalIDs are used to indentify db objects. +// ExternalIDs are defined based on dbObjType and ownerType, e.g. default network controller creates address +// sets for namespaces and network policies, and needs to use different sets of ids for them. So it will +// create ObjectIDsType with the same dbObjType=addressSet, but different ownerTypes NamespaceOwnerType and +// NetworkPolicyOwnerType. Then it can define a set of ExternalIDs that will be used for each type. +// From the db perspective, dbObjType is identified based on the db table, and ownerType is used directly +// in the ExternalIDs with OwnerTypeKey key. +type ObjectIDsType struct { + dbTable dbObjType + ownerObjectType ownerType + // externalIDKeys is a slice, because primary id for given ObjectIDsType will be built from the + // ExternalIDKey values in given order + externalIDKeys []ExternalIDKey + externalIDsMap map[ExternalIDKey]bool +} + +func (it ObjectIDsType) GetExternalIDKeys() []ExternalIDKey { + return it.externalIDKeys +} + +func (it ObjectIDsType) HasKey(key ExternalIDKey) bool { + return it.externalIDsMap[key] +} + +func (it ObjectIDsType) IsSameType(it2 *ObjectIDsType) bool { + return it.ownerObjectType == it2.ownerObjectType && it.dbTable == it2.dbTable +} + +const ( + // ExternalIDs keys that will be a part of a client index. + // OwnerControllerKey and OwnerTypeKey define managing entity (a.k.a. owner) for given db object. + // All the other ids are object-related. + // PrimaryIDKey will be used a primary client index. + // A combination of OwnerControllerKey, OwnerTypeKey, and ObjectNameKey will be used a secondary client index. + // While owner-related keys together with PrimaryIDKey will always be present in the ExternalIDs, + // ObjectNameKey may or may not be used, based on ObjectIDsType. + OwnerControllerKey ExternalIDKey = types.OvnK8sPrefix + "/owner-controller" + OwnerTypeKey ExternalIDKey = types.OvnK8sPrefix + "/owner-type" + // ObjectNameKey is a part of a secondary index, together with OwnerControllerKey and OwnerTypeKey + // May be used by controllers to store e.g. namespace+name of the object. + ObjectNameKey ExternalIDKey = types.OvnK8sPrefix + "/name" + // PrimaryIDKey will be used as a primary index, that is unique for every db object, + // and can be built based on the combination of all the other ids. + PrimaryIDKey ExternalIDKey = types.PrimaryIDKey +) + +// ObjectNameKey may be used as a secondary ID in the future. To ensure easy filtering for namespaced +// objects, you can combine namespace and name in that key. To unify this process (and potential parsing of the key) +// the following 2 functions exist: +// - BuildNamespaceNameKey to combine namespace and name into one key +// - ParseNamespaceNameKey to split the key back into namespace and name + +func BuildNamespaceNameKey(namespace, name string) string { + return namespace + ":" + name +} + +func ParseNamespaceNameKey(key string) (namespace, name string, err error) { + s := strings.Split(key, ":") + if len(s) != 2 { + err = fmt.Errorf("failed to parse namespaced name key %v, expected format :", key) + return + } + return s[0], s[1], nil +} + +// dbIDsMap is used to make sure the same ownerType is not defined twice for the same dbObjType to avoid conflicts. +// It is filled in newObjectIDsType when registering new ObjectIDsType +var dbIDsMap = map[dbObjType]map[ownerType]bool{} + +func newObjectIDsType(dbTable dbObjType, ownerObjectType ownerType, keys []ExternalIDKey) *ObjectIDsType { + if dbIDsMap[dbTable][ownerObjectType] { + panic(fmt.Sprintf("ObjectIDsType for params %v %v is already registered", dbTable, ownerObjectType)) + } + if dbIDsMap[dbTable] == nil { + dbIDsMap[dbTable] = map[ownerType]bool{} + } + dbIDsMap[dbTable][ownerObjectType] = true + keysMap := map[ExternalIDKey]bool{} + for _, key := range keys { + keysMap[key] = true + } + return &ObjectIDsType{dbTable, ownerObjectType, keys, keysMap} +} + +// DbObjectIDs is a structure representing a set of db object ExternalIDs, used to identify +// an object in the db (as a primary/secondary index) or for a predicate search. +// DbObjectIDs consists of 3 parts: +// - idsType defines which IDs are used for a given object, as an ObjectIDsType, +// idsType.ownerObjectType will be written to ExternalIDs[OwnerTypeKey] +// - ownerControllerName defines who manages given object. It is required in case there are more than 1 controllers +// using the same idsType to make sure every controller only updates objects it owns. +// - objectIDs provide values for keys that are used by given idsType. To create a new object, all fields should be set. +// For predicate search, only some values that need to be matched may be set. +// +// dbIndex := NewDbObjectIDs(AddressSetEgressFirewallDNS, "DefaultController", +// map[ExternalIDKey]string{ +// ObjectNameKey: "dns.name", +// IPFamilyKey: "ipv4" +// }) +// +// uses AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{ +// // dnsName +// ObjectNameKey, +// IPFamilyKey, +// }) +// +// its dbIndex will be mapped to the following ExternalIDs +// +// { +// "k8s.ovn.org/owner-controller": "DefaultController" +// "k8s.ovn.org/owner-type": "EgressFirewallDNS" (value of EgressFirewallDNSOwnerType) +// "k8s.ovn.org/name": "dns.name" +// "k8s.ovn.org/ipFamily": "ipv4" +// "k8s.ovn.org/id": "DefaultController:EgressFirewallDNS:dns.name:ipv4" +// } +type DbObjectIDs struct { + idsType *ObjectIDsType + // ownerControllerName specifies which controller owns the object. + // Controller should only change objects it owns, make sure to always set this field. + ownerControllerName string + // objectIDs store values for keys required by given ObjectIDsType, and may be different for different ObjectIDsType. + // These ids should uniquely identify db object with the same ownerControllerName and OwnerTypeKey. + objectIDs map[ExternalIDKey]string +} + +// NewDbObjectIDs is used to construct DbObjectIDs, idsType and controller are always required, +// objectIds may be empty, or half-filled for predicate search. +// objectIds keys that are not used by given idsType will cause panic. +func NewDbObjectIDs(idsType *ObjectIDsType, controller string, objectIds map[ExternalIDKey]string) *DbObjectIDs { + if controller == "" { + panic("NewDbObjectIDs failed: controller should not be empty") + } + externalIDKeys := idsType.GetExternalIDKeys() + if externalIDKeys == nil { + // can only happen if ObjectIDsType{} is passed + panic(fmt.Sprintf("NewDbObjectIDs failed: ObjectIDsType %v should not be empty", idsType)) + } + // only use values for keys from idsType + for key := range objectIds { + if !idsType.HasKey(key) { + panic(fmt.Sprintf("NewDbObjectIDs failed: key %v is unknown", key)) + } + } + if objectIds == nil { + objectIds = map[ExternalIDKey]string{} + } + objectIDs := &DbObjectIDs{ + idsType: idsType, + ownerControllerName: controller, + objectIDs: objectIds, + } + return objectIDs +} + +// AddIDs creates new DbObjectIDs with the additional extraObjectIds. +// If at least one of extraObjectIds keys is not used by the objectIDs.idsType it will cause panic. +func (objectIDs *DbObjectIDs) AddIDs(extraObjectIds map[ExternalIDKey]string) *DbObjectIDs { + ids := deepcopyMap(objectIDs.objectIDs) + for key, value := range extraObjectIds { + ids[key] = value + } + return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids} +} + +func (objectIDs *DbObjectIDs) RemoveIDs(idsToDelete ...ExternalIDKey) *DbObjectIDs { + ids := deepcopyMap(objectIDs.objectIDs) + for _, keyToDel := range idsToDelete { + delete(ids, keyToDel) + } + return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids} +} + +func (objectIDs *DbObjectIDs) HasSameOwner(ownerController string, objectIDsType *ObjectIDsType) bool { + return objectIDs.ownerControllerName == ownerController && objectIDs.idsType.IsSameType(objectIDsType) +} + +func (objectIDs *DbObjectIDs) GetUnsetKeys() []ExternalIDKey { + unsetKeys := []ExternalIDKey{} + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + if _, ok := objectIDs.objectIDs[key]; !ok { + unsetKeys = append(unsetKeys, key) + } + } + return unsetKeys +} + +// GetObjectID returns value from objectIDs.objectIDs map, and empty string for not found values. +// Usually objectIDs.objectIDs doesn't include PrimaryIDKey, OwnerTypeKey, and OwnerControllerKey. +func (objectIDs *DbObjectIDs) GetObjectID(key ExternalIDKey) string { + return objectIDs.objectIDs[key] +} + +// GetExternalIDs should only be used to build ids before creating the new db object. +// If at least one of required by DbObjectIDs.idsType keys is not present in the DbObjectIDs.objectIDs it will panic. +// GetExternalIDs returns a map of ids, that always includes keys +// - OwnerControllerKey +// - OwnerTypeKey +// - PrimaryIDKey +// and also all keys that are preset in objectIDs.objectIDs. +// PrimaryIDKey value consists of the following values joined with ":" +// - objectIDs.ownerControllerName +// - objectIDs.idsType.ownerObjectType +// - values from DbObjectIDs.objectIDs are added in order set in ObjectIDsType.externalIDKeys +func (objectIDs *DbObjectIDs) GetExternalIDs() map[string]string { + return objectIDs.getExternalIDs(false) +} + +func (objectIDs *DbObjectIDs) getExternalIDs(allowEmptyKeys bool) map[string]string { + externalIDs := map[string]string{ + OwnerControllerKey.String(): objectIDs.ownerControllerName, + OwnerTypeKey.String(): objectIDs.idsType.ownerObjectType, + } + for key, value := range objectIDs.objectIDs { + externalIDs[key.String()] = value + } + primaryID, err := objectIDs.getUniqueID() + if err == nil { + // err == nil => primary id was properly built + externalIDs[PrimaryIDKey.String()] = primaryID + } else if !allowEmptyKeys { + panic(fmt.Sprintf("Failed to build Primary ID for %+v: %v", objectIDs, err)) + } + return externalIDs +} + +// String returns a string that is similar to PrimaryIDKey value, but if some required keys are not present +// in the DbObjectIDs.objectIDs, they will be replaced with empty strings. +// String returns the representation of all the information set in DbObjectIDs. +func (objectIDs *DbObjectIDs) String() string { + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + id += ":" + objectIDs.objectIDs[key] + } + return id +} + +func (objectIDs *DbObjectIDs) GetIDsType() *ObjectIDsType { + return objectIDs.idsType +} + +// getUniqueID returns primary id that is build based on objectIDs values. +// If at least one required key is missing, an error will be returned. +func (objectIDs *DbObjectIDs) getUniqueID() (string, error) { + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + value, ok := objectIDs.objectIDs[key] + if !ok { + return "", fmt.Errorf("key %v is required but not present", key) + } + id += ":" + value + } + return id, nil +} + +// NewDbObjectIDsFromExternalIDs is used to parse object ExternalIDs, it sets DbObjectIDs.ownerControllerName based +// on OwnerControllerKey key, and verifies OwnerControllerKey value matches given objectIDsType. +// All the other ids from objectIDsType will be set to DbObjectIDs.objectIDs. +func NewDbObjectIDsFromExternalIDs(objectIDsType *ObjectIDsType, externalIDs map[string]string) (*DbObjectIDs, error) { + if externalIDs[OwnerTypeKey.String()] != objectIDsType.ownerObjectType { + return nil, fmt.Errorf("expected ExternalID %s to equal %s, got %s", + OwnerTypeKey, objectIDsType.ownerObjectType, externalIDs[OwnerTypeKey.String()]) + } + if externalIDs[OwnerControllerKey.String()] == "" { + return nil, fmt.Errorf("required ExternalID %s is empty", OwnerControllerKey) + } + objIDs := map[ExternalIDKey]string{} + for key, value := range externalIDs { + if objectIDsType.HasKey(ExternalIDKey(key)) { + objIDs[ExternalIDKey(key)] = value + } + } + return NewDbObjectIDs(objectIDsType, externalIDs[OwnerControllerKey.String()], objIDs), nil +} + +// hasExternalIDs interface should only include types that use new ExternalIDs from DbObjectIDs. +type hasExternalIDs interface { + GetExternalIDs() map[string]string +} + +// GetNoOwnerPredicate should only be used on initial sync when switching to new ExternalIDs. +// Otherwise, use GetPredicate with the specific OwnerControllerKey id. +func GetNoOwnerPredicate[T hasExternalIDs]() func(item T) bool { + return func(item T) bool { + return item.GetExternalIDs()[OwnerControllerKey.String()] == "" + } +} + +// GetPredicate returns a predicate to search for db obj of type nbdbT. +// Only non-empty ids will be matched (that always includes DbObjectIDs.OwnerTypeKey and DbObjectIDs.ownerControllerName), +// but the other IDs may be empty and will be ignored in the filtering, additional filter function f may be passed, or set +// to nil. +func GetPredicate[nbdbT hasExternalIDs](objectIDs *DbObjectIDs, f func(item nbdbT) bool) func(item nbdbT) bool { + predicateIDs := objectIDs.getExternalIDs(true) + if primaryID, ok := predicateIDs[PrimaryIDKey.String()]; ok { + // when primary id is set, other ids are not required + predicateIDs = map[string]string{PrimaryIDKey.String(): primaryID} + } + return func(item nbdbT) bool { + dbExternalIDs := item.GetExternalIDs() + for predKey, predValue := range predicateIDs { + if dbExternalIDs[predKey] != predValue { + return false + } + } + return f == nil || f(item) + } +} + +func deepcopyMap(m map[ExternalIDKey]string) map[ExternalIDKey]string { + result := map[ExternalIDKey]string{} + for key, value := range m { + result[key] = value + } + return result +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go new file mode 100644 index 000000000..9f0e8dfe8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go @@ -0,0 +1,328 @@ +package ops + +import "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + +const ( + addressSet dbObjType = iota + acl + dhcpOptions + portGroup + logicalRouterPolicy + qos +) + +const ( + // owner types + EgressFirewallDNSOwnerType ownerType = "EgressFirewallDNS" + EgressFirewallOwnerType ownerType = "EgressFirewall" + EgressQoSOwnerType ownerType = "EgressQoS" + AdminNetworkPolicyOwnerType ownerType = "AdminNetworkPolicy" + BaselineAdminNetworkPolicyOwnerType ownerType = "BaselineAdminNetworkPolicy" + // NetworkPolicyOwnerType is deprecated for address sets, should only be used for sync. + // New owner of network policy address sets, is PodSelectorOwnerType. + NetworkPolicyOwnerType ownerType = "NetworkPolicy" + NetpolDefaultOwnerType ownerType = "NetpolDefault" + PodSelectorOwnerType ownerType = "PodSelector" + NamespaceOwnerType ownerType = "Namespace" + // HybridNodeRouteOwnerType is transferred from egressgw to apbRoute controller with the same dbIDs + HybridNodeRouteOwnerType ownerType = "HybridNodeRoute" + EgressIPOwnerType ownerType = "EgressIP" + EgressServiceOwnerType ownerType = "EgressService" + MulticastNamespaceOwnerType ownerType = "MulticastNS" + MulticastClusterOwnerType ownerType = "MulticastCluster" + NetpolNodeOwnerType ownerType = "NetpolNode" + NetpolNamespaceOwnerType ownerType = "NetpolNamespace" + VirtualMachineOwnerType ownerType = "VirtualMachine" + // NetworkPolicyPortIndexOwnerType is the old version of NetworkPolicyOwnerType, kept for sync only + NetworkPolicyPortIndexOwnerType ownerType = "NetworkPolicyPortIndexOwnerType" + // ClusterOwnerType means the object is cluster-scoped and doesn't belong to any k8s objects + ClusterOwnerType ownerType = "Cluster" + // UDNIsolationOwnerType means the object is needed to implement UserDefinedNetwork isolation + UDNIsolationOwnerType ownerType = "UDNIsolation" + + // owner extra IDs, make sure to define only 1 ExternalIDKey for every string value + PriorityKey ExternalIDKey = "priority" + PolicyDirectionKey ExternalIDKey = "direction" + GressIdxKey ExternalIDKey = "gress-index" + IPFamilyKey ExternalIDKey = "ip-family" + TypeKey ExternalIDKey = "type" + IpKey ExternalIDKey = "ip" + PortPolicyIndexKey ExternalIDKey = "port-policy-index" + IpBlockIndexKey ExternalIDKey = "ip-block-index" + RuleIndex ExternalIDKey = "rule-index" + CIDRKey ExternalIDKey = types.OvnK8sPrefix + "/cidr" + PortPolicyProtocolKey ExternalIDKey = "port-policy-protocol" +) + +// ObjectIDsTypes should only be created here + +var AddressSetAdminNetworkPolicy = newObjectIDsType(addressSet, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // anp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetBaselineAdminNetworkPolicy = newObjectIDsType(addressSet, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // banp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{ + // dnsName + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetHybridNodeRoute = newObjectIDsType(addressSet, HybridNodeRouteOwnerType, []ExternalIDKey{ + // nodeName + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressQoS = newObjectIDsType(addressSet, EgressQoSOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // egress qos priority + PriorityKey, + IPFamilyKey, +}) + +var AddressSetPodSelector = newObjectIDsType(addressSet, PodSelectorOwnerType, []ExternalIDKey{ + // pod selector string representation + ObjectNameKey, + IPFamilyKey, +}) + +// deprecated, should only be used for sync +var AddressSetNetworkPolicy = newObjectIDsType(addressSet, NetworkPolicyOwnerType, []ExternalIDKey{ + // namespace_name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetNamespace = newObjectIDsType(addressSet, NamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressIP = newObjectIDsType(addressSet, EgressIPOwnerType, []ExternalIDKey{ + // cluster-wide address set name + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressService = newObjectIDsType(addressSet, EgressServiceOwnerType, []ExternalIDKey{ + // cluster-wide address set name + ObjectNameKey, + IPFamilyKey, +}) + +var ACLAdminNetworkPolicy = newObjectIDsType(acl, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // anp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + // gress rule's peer port's protocol index + PortPolicyProtocolKey, +}) + +var ACLBaselineAdminNetworkPolicy = newObjectIDsType(acl, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // banp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + // gress rule's peer port's protocol index + PortPolicyProtocolKey, +}) + +var ACLNetpolDefault = newObjectIDsType(acl, NetpolDefaultOwnerType, []ExternalIDKey{ + // for now there is only 1 acl of this type, but we use a name in case more types are needed in the future + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLMulticastNamespace = newObjectIDsType(acl, MulticastNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLMulticastCluster = newObjectIDsType(acl, MulticastClusterOwnerType, []ExternalIDKey{ + // cluster-scoped multicast acls + // there are 2 possible TypeKey values for cluster default multicast acl: DefaultDeny and AllowInterNode + TypeKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLNetpolNode = newObjectIDsType(acl, NetpolNodeOwnerType, []ExternalIDKey{ + // node name + ObjectNameKey, + // exact ip for management port, every node may have more than 1 management ip + IpKey, +}) + +// ACLNetworkPolicyPortIndex define a unique index for every network policy ACL. +// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy. +// ACLs are created for every gp.portPolicies: +// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch) +// OR +// - all selector-based peers ACL +// - for every IPBlock +1 ACL +// Therefore unique id for a given gressPolicy is portPolicy idx + IPBlock idx +// (empty policy and all selector-based peers ACLs will have idx=-1) +// Note: keep for backward compatibility only +// Deprecated, should only be used for sync +var ACLNetworkPolicyPortIndex = newObjectIDsType(acl, NetworkPolicyPortIndexOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + PortPolicyIndexKey, + IpBlockIndexKey, +}) + +// ACLNetworkPolicy define a unique index for every network policy ACL. +// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy. +// ACLs are created for gp.portPolicies which are grouped by protocol: +// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch) +// OR +// - all selector-based peers ACL +// - for every IPBlock +1 ACL +// Therefore unique id for a given gressPolicy is protocol name + IPBlock idx +// (protocol will be "None" if no port policy is defined, and empty policy and all +// selector-based peers ACLs will have idx=-1) +var ACLNetworkPolicy = newObjectIDsType(acl, NetworkPolicyOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + PortPolicyProtocolKey, + IpBlockIndexKey, +}) + +var ACLNetpolNamespace = newObjectIDsType(acl, NetpolNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // in the same namespace there can be 2 default deny port groups, egress and ingress + PolicyDirectionKey, + // every port group has default deny and arp allow acl. + TypeKey, +}) + +var ACLEgressFirewall = newObjectIDsType(acl, EgressFirewallOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // there can only be 1 egress firewall object in every namespace, named "default" + // The only additional id we need is the index of the EgressFirewall.Spec.Egress rule. + RuleIndex, +}) + +var ACLUDN = newObjectIDsType(acl, UDNIsolationOwnerType, []ExternalIDKey{ + // name of a UDN-related ACL + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var VirtualMachineDHCPOptions = newObjectIDsType(dhcpOptions, VirtualMachineOwnerType, []ExternalIDKey{ + // We can have multiple VMs with same CIDR they may have different + // hostname. + // vm "namespace/name" + ObjectNameKey, + // CIDR field from DHCPOptions with ":" replaced by "." + CIDRKey, +}) + +var PortGroupNamespace = newObjectIDsType(portGroup, NamespaceOwnerType, []ExternalIDKey{ + // namespace name + ObjectNameKey, +}) + +// every namespace that has at least 1 network policy, has resources that are shared by all network policies +// in that namespace. +var PortGroupNetpolNamespace = newObjectIDsType(portGroup, NetpolNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // in the same namespace there can be 2 default deny port groups, egress and ingress + PolicyDirectionKey, +}) + +var PortGroupNetworkPolicy = newObjectIDsType(portGroup, NetworkPolicyOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, +}) + +var PortGroupAdminNetworkPolicy = newObjectIDsType(portGroup, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // ANP name + ObjectNameKey, +}) + +var PortGroupBaselineAdminNetworkPolicy = newObjectIDsType(portGroup, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // BANP name + ObjectNameKey, +}) + +var PortGroupCluster = newObjectIDsType(portGroup, ClusterOwnerType, []ExternalIDKey{ + // name of a global port group + // currently ClusterPortGroup and ClusterRtrPortGroup are present + ObjectNameKey, +}) + +var PortGroupUDN = newObjectIDsType(portGroup, UDNIsolationOwnerType, []ExternalIDKey{ + // name of a UDN port group + // currently uses: + // secondaryPods - on default network switch to distinguish non-primary pods + ObjectNameKey, +}) + +var LogicalRouterPolicyEgressIP = newObjectIDsType(logicalRouterPolicy, EgressIPOwnerType, []ExternalIDKey{ + // the priority of the LRP + PriorityKey, + // for the reroute policies it should be the "EIPName_Namespace/podName" + // for the no-reroute global policies it should be the unique global name + ObjectNameKey, + // the IP Family for this policy, ip4 or ip6 or ip(dualstack) + IPFamilyKey, +}) + +var QoSEgressQoS = newObjectIDsType(qos, EgressQoSOwnerType, []ExternalIDKey{ + // the priority of the QoSRule (OVN priority is the same as the rule index priority for this feature) + // this value will be unique in a given namespace + PriorityKey, + // namespace + ObjectNameKey, +}) + +var QoSRuleEgressIP = newObjectIDsType(qos, EgressIPOwnerType, []ExternalIDKey{ + // the priority of the QoSRule + PriorityKey, + // should be the unique global name + ObjectNameKey, + // the IP Family for this policy, ip4 or ip6 or ip(dualstack) + IPFamilyKey, +}) diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go new file mode 100644 index 000000000..8eb17c6df --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go @@ -0,0 +1,84 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type DHCPOptionsPredicate func(*nbdb.DHCPOptions) bool + +// CreateOrUpdateDhcpOptionsOps will configure logical switch port DHCPv4Options and DHCPv6Options fields with +// options at dhcpv4Options and dhcpv6Options arguments and create/update DHCPOptions objects that matches the +// pv4 and pv6 predicates. The missing DHCP options will default to nil in the LSP attributes. +func CreateOrUpdateDhcpOptionsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) ([]libovsdb.Operation, error) { + opModels := []operationModel{} + if dhcpIPv4Options != nil { + opModel := operationModel{ + Model: dhcpIPv4Options, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lsp.Dhcpv4Options = &dhcpIPv4Options.UUID }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + if dhcpIPv6Options != nil { + opModel := operationModel{ + Model: dhcpIPv6Options, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lsp.Dhcpv6Options = &dhcpIPv6Options.UUID }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModels = append(opModels, operationModel{ + Model: lsp, + OnModelUpdates: []interface{}{ + &lsp.Dhcpv4Options, + &lsp.Dhcpv6Options, + }, + ErrNotFound: true, + BulkOp: false, + }) + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +func CreateOrUpdateDhcpOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) error { + ops, err := CreateOrUpdateDhcpOptionsOps(nbClient, nil, lsp, dhcpIPv4Options, dhcpIPv6Options) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func DeleteDHCPOptions(nbClient libovsdbclient.Client, dhcpOptions *nbdb.DHCPOptions) error { + opModels := []operationModel{} + opModel := operationModel{ + Model: dhcpOptions, + ErrNotFound: false, + BulkOp: true, + } + opModels = append(opModels, opModel) + m := newModelClient(nbClient) + return m.Delete(opModels...) + +} + +func DeleteDHCPOptionsWithPredicate(nbClient libovsdbclient.Client, p DHCPOptionsPredicate) error { + opModels := []operationModel{} + opModel := operationModel{ + Model: &nbdb.DHCPOptions{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + opModels = append(opModels, opModel) + m := newModelClient(nbClient) + return m.Delete(opModels...) + +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go new file mode 100644 index 000000000..854c8f2b2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go @@ -0,0 +1,88 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// CreateOrUpdateLoadBalancerGroupOps returns the ops to create or update the +// provided load balancer group +func CreateOrUpdateLoadBalancerGroupOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, group *nbdb.LoadBalancerGroup) ([]ovsdb.Operation, error) { + // lb group has no fields other than name, safe to update just with non-default values + opModel := operationModel{ + Model: group, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + if err != nil { + return nil, err + } + return ops, nil +} + +// AddLoadBalancersToGroupOps adds the provided load balancers to the provided +// group and returns the corresponding ops +func AddLoadBalancersToGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := group.LoadBalancer + group.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + group.LoadBalancer = append(group.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: group, + ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name }, + OnModelMutations: []interface{}{&group.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + group.LoadBalancer = originalLBs + return ops, err +} + +// RemoveLoadBalancersFromGroupOps removes the provided load balancers from the +// provided group and returns the corresponding ops +func RemoveLoadBalancersFromGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := group.LoadBalancer + group.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + group.LoadBalancer = append(group.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: group, + ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name }, + OnModelMutations: []interface{}{&group.LoadBalancer}, + // if we want to delete loadbalancer from the port group that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModel) + group.LoadBalancer = originalLBs + return ops, err +} + +type loadBalancerGroupPredicate func(*nbdb.LoadBalancerGroup) bool + +// FindLoadBalancerGroupsWithPredicate looks up load balancer groups from the +// cache based on a given predicate +func FindLoadBalancerGroupsWithPredicate(nbClient libovsdbclient.Client, p loadBalancerGroupPredicate) ([]*nbdb.LoadBalancerGroup, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + groups := []*nbdb.LoadBalancerGroup{} + err := nbClient.WhereCache(p).List(ctx, &groups) + return groups, err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go new file mode 100644 index 000000000..097d37b4c --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go @@ -0,0 +1,151 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// getNonZeroLoadBalancerMutableFields builds a list of load balancer +// mutable fields with non zero values to be used as the list of fields to +// Update. +// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices +// as default and thus being filtered out of the update. The intention is to +// use non-nil empty maps/slices to clear them out in the update. +// See: https://github.com/ovn-org/libovsdb/issues/226 +func getNonZeroLoadBalancerMutableFields(lb *nbdb.LoadBalancer) []interface{} { + fields := []interface{}{} + if lb.Name != "" { + fields = append(fields, &lb.Name) + } + if lb.ExternalIDs != nil { + fields = append(fields, &lb.ExternalIDs) + } + if lb.HealthCheck != nil { + fields = append(fields, &lb.HealthCheck) + } + if lb.IPPortMappings != nil { + fields = append(fields, &lb.IPPortMappings) + } + if lb.Options != nil { + fields = append(fields, &lb.Options) + } + if lb.Protocol != nil { + fields = append(fields, &lb.Protocol) + } + if lb.SelectionFields != nil { + fields = append(fields, &lb.SelectionFields) + } + if lb.Vips != nil { + fields = append(fields, &lb.Vips) + } + return fields +} + +// BuildLoadBalancer builds a load balancer +func BuildLoadBalancer(name string, protocol nbdb.LoadBalancerProtocol, selectionFields []nbdb.LoadBalancerSelectionFields, vips, options, externalIds map[string]string) *nbdb.LoadBalancer { + return &nbdb.LoadBalancer{ + Name: name, + Protocol: &protocol, + Vips: vips, + SelectionFields: selectionFields, + Options: options, + ExternalIDs: externalIds, + } +} + +// CreateOrUpdateLoadBalancersOps creates or updates the provided load balancers +// returning the corresponding ops +func CreateOrUpdateLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lbs)) + for i := range lbs { + // can't use i in the predicate, for loop replaces it in-memory + lb := lbs[i] + opModel := operationModel{ + Model: lb, + OnModelUpdates: getNonZeroLoadBalancerMutableFields(lb), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// RemoveLoadBalancerVipsOps removes the provided VIPs from the provided load +// balancer set and returns the corresponding ops +func RemoveLoadBalancerVipsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lb *nbdb.LoadBalancer, vips ...string) ([]libovsdb.Operation, error) { + originalVips := lb.Vips + lb.Vips = make(map[string]string, len(vips)) + for _, vip := range vips { + lb.Vips[vip] = "" + } + opModel := operationModel{ + Model: lb, + OnModelMutations: []interface{}{&lb.Vips}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.DeleteOps(ops, opModel) + lb.Vips = originalVips + return ops, err +} + +// DeleteLoadBalancersOps deletes the provided load balancers and returns the +// corresponding ops +func DeleteLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lbs)) + for i := range lbs { + // can't use i in the predicate, for loop replaces it in-memory + lb := lbs[i] + opModel := operationModel{ + Model: lb, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} + +// DeleteLoadBalancers deletes the provided load balancers +func DeleteLoadBalancers(nbClient libovsdbclient.Client, lbs []*nbdb.LoadBalancer) error { + ops, err := DeleteLoadBalancersOps(nbClient, nil, lbs...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// ListLoadBalancers looks up all load balancers from the cache +func ListLoadBalancers(nbClient libovsdbclient.Client) ([]*nbdb.LoadBalancer, error) { + lbs := []*nbdb.LoadBalancer{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.List(ctx, &lbs) + return lbs, err +} + +type loadBalancerPredicate func(*nbdb.LoadBalancer) bool + +// FindLoadBalancersWithPredicate looks up loadbalancers from the cache +// based on a given predicate +func FindLoadBalancersWithPredicate(nbClient libovsdbclient.Client, p loadBalancerPredicate) ([]*nbdb.LoadBalancer, error) { + found := []*nbdb.LoadBalancer{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go new file mode 100644 index 000000000..1f7a76ba8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go @@ -0,0 +1,61 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// CreateOrUpdateStaticMacBinding creates or updates the provided static mac binding +func CreateOrUpdateStaticMacBinding(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error { + opModels := make([]operationModel, len(smbs)) + for i := range smbs { + opModel := operationModel{ + Model: smbs[i], + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels[i] = opModel + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + return err +} + +// DeleteStaticMacBindings deletes the provided static mac bindings +func DeleteStaticMacBindings(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error { + opModels := make([]operationModel, len(smbs)) + for i := range smbs { + opModel := operationModel{ + Model: smbs[i], + ErrNotFound: false, + BulkOp: false, + } + opModels[i] = opModel + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +type staticMACBindingPredicate func(*nbdb.StaticMACBinding) bool + +// DeleteStaticMACBindingWithPredicate deletes a Static MAC entry for a logical port from the cache +func DeleteStaticMACBindingWithPredicate(nbClient libovsdbclient.Client, p staticMACBindingPredicate) error { + found := []*nbdb.StaticMACBinding{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &found, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Delete(opModel) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go new file mode 100644 index 000000000..8f1814968 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go @@ -0,0 +1,66 @@ +package ops + +import ( + "reflect" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +func equalsMeterBand(a, b *nbdb.MeterBand) bool { + return a.Action == b.Action && + a.BurstSize == b.BurstSize && + a.Rate == b.Rate && + reflect.DeepEqual(a.ExternalIDs, b.ExternalIDs) +} + +// CreateMeterBandOps creates the provided meter band if it does not exist +func CreateMeterBandOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meterBand *nbdb.MeterBand) ([]ovsdb.Operation, error) { + bands := []*nbdb.MeterBand{} + opModel := operationModel{ + Model: meterBand, + ModelPredicate: func(item *nbdb.MeterBand) bool { return equalsMeterBand(item, meterBand) }, + OnModelUpdates: onModelUpdatesNone(), + ExistingResult: &bands, + DoAfter: func() { + // in case we have multiple equal bands, pick the first one for + // convergence, OVSDB will remove unreferenced ones + if len(bands) > 0 { + uuids := sets.NewString() + for _, band := range bands { + uuids.Insert(band.UUID) + } + meterBand.UUID = uuids.List()[0] + } + }, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// CreateOrUpdateMeterOps creates or updates the provided meter associated to +// the provided meter bands and returns the corresponding ops +func CreateOrUpdateMeterOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meter *nbdb.Meter, meterBands []*nbdb.MeterBand, fields ...interface{}) ([]ovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + meter.Bands = make([]string, 0, len(meterBands)) + for _, band := range meterBands { + meter.Bands = append(meter.Bands, band.UUID) + } + opModel := operationModel{ + Model: meter, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go new file mode 100644 index 000000000..b40dd4104 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go @@ -0,0 +1,516 @@ +package ops + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +func getUUID(model model.Model) string { + switch t := model.(type) { + case *nbdb.ACL: + return t.UUID + case *nbdb.AddressSet: + return t.UUID + case *nbdb.BFD: + return t.UUID + case *nbdb.Copp: + return t.UUID + case *nbdb.GatewayChassis: + return t.UUID + case *nbdb.LoadBalancer: + return t.UUID + case *nbdb.LoadBalancerGroup: + return t.UUID + case *nbdb.LogicalRouter: + return t.UUID + case *nbdb.LogicalRouterPolicy: + return t.UUID + case *nbdb.LogicalRouterPort: + return t.UUID + case *nbdb.LogicalRouterStaticRoute: + return t.UUID + case *nbdb.LogicalSwitch: + return t.UUID + case *nbdb.LogicalSwitchPort: + return t.UUID + case *nbdb.NAT: + return t.UUID + case *nbdb.PortGroup: + return t.UUID + case *nbdb.NBGlobal: + return t.UUID + case *nbdb.MeterBand: + return t.UUID + case *nbdb.Meter: + return t.UUID + case *nbdb.Sample: + return t.UUID + case *nbdb.SampleCollector: + return t.UUID + case *nbdb.SamplingApp: + return t.UUID + case *nbdb.StaticMACBinding: + return t.UUID + case *sbdb.Chassis: + return t.UUID + case *sbdb.ChassisPrivate: + return t.UUID + case *sbdb.IGMPGroup: + return t.UUID + case *sbdb.Encap: + return t.UUID + case *sbdb.PortBinding: + return t.UUID + case *sbdb.SBGlobal: + return t.UUID + case *nbdb.QoS: + return t.UUID + case *nbdb.ChassisTemplateVar: + return t.UUID + case *nbdb.DHCPOptions: + return t.UUID + default: + panic(fmt.Sprintf("getUUID: unknown model %T", t)) + } +} + +func setUUID(model model.Model, uuid string) { + switch t := model.(type) { + case *nbdb.ACL: + t.UUID = uuid + case *nbdb.AddressSet: + t.UUID = uuid + case *nbdb.BFD: + t.UUID = uuid + case *nbdb.Copp: + t.UUID = uuid + case *nbdb.GatewayChassis: + t.UUID = uuid + case *nbdb.LoadBalancer: + t.UUID = uuid + case *nbdb.LoadBalancerGroup: + t.UUID = uuid + case *nbdb.LogicalRouter: + t.UUID = uuid + case *nbdb.LogicalRouterPolicy: + t.UUID = uuid + case *nbdb.LogicalRouterPort: + t.UUID = uuid + case *nbdb.LogicalRouterStaticRoute: + t.UUID = uuid + case *nbdb.LogicalSwitch: + t.UUID = uuid + case *nbdb.LogicalSwitchPort: + t.UUID = uuid + case *nbdb.NAT: + t.UUID = uuid + case *nbdb.PortGroup: + t.UUID = uuid + case *nbdb.NBGlobal: + t.UUID = uuid + case *nbdb.MeterBand: + t.UUID = uuid + case *nbdb.Meter: + t.UUID = uuid + case *nbdb.Sample: + t.UUID = uuid + case *nbdb.SampleCollector: + t.UUID = uuid + case *nbdb.SamplingApp: + t.UUID = uuid + case *nbdb.StaticMACBinding: + t.UUID = uuid + case *sbdb.Chassis: + t.UUID = uuid + case *sbdb.ChassisPrivate: + t.UUID = uuid + case *sbdb.IGMPGroup: + t.UUID = uuid + case *sbdb.Encap: + t.UUID = uuid + case *sbdb.PortBinding: + t.UUID = uuid + case *sbdb.SBGlobal: + t.UUID = uuid + case *nbdb.QoS: + t.UUID = uuid + case *nbdb.ChassisTemplateVar: + t.UUID = uuid + case *nbdb.DHCPOptions: + t.UUID = uuid + default: + panic(fmt.Sprintf("setUUID: unknown model %T", t)) + } +} + +func copyIndexes(model model.Model) model.Model { + switch t := model.(type) { + case *nbdb.ACL: + return &nbdb.ACL{ + UUID: t.UUID, + ExternalIDs: map[string]string{ + types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey], + }, + } + case *nbdb.AddressSet: + return &nbdb.AddressSet{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.BFD: + return &nbdb.BFD{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + DstIP: t.DstIP, + } + case *nbdb.Copp: + return &nbdb.Copp{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.GatewayChassis: + return &nbdb.GatewayChassis{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LoadBalancer: + return &nbdb.LoadBalancer{ + UUID: t.UUID, + // client index + Name: t.Name, + } + case *nbdb.LoadBalancerGroup: + return &nbdb.LoadBalancerGroup{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouter: + return &nbdb.LogicalRouter{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouterPolicy: + return &nbdb.LogicalRouterPolicy{ + UUID: t.UUID, + } + case *nbdb.LogicalRouterPort: + return &nbdb.LogicalRouterPort{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouterStaticRoute: + return &nbdb.LogicalRouterStaticRoute{ + UUID: t.UUID, + } + case *nbdb.LogicalSwitch: + return &nbdb.LogicalSwitch{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalSwitchPort: + return &nbdb.LogicalSwitchPort{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.NAT: + return &nbdb.NAT{ + UUID: t.UUID, + } + case *nbdb.PortGroup: + return &nbdb.PortGroup{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.NBGlobal: + return &nbdb.NBGlobal{ + UUID: t.UUID, + } + case *nbdb.MeterBand: + return &nbdb.MeterBand{ + UUID: t.UUID, + } + case *nbdb.Meter: + return &nbdb.Meter{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.Sample: + return &nbdb.Sample{ + UUID: t.UUID, + Metadata: t.Metadata, + } + case *nbdb.SampleCollector: + return &nbdb.SampleCollector{ + UUID: t.UUID, + ID: t.ID, + } + case *nbdb.SamplingApp: + return &nbdb.SamplingApp{ + UUID: t.UUID, + Type: t.Type, + } + case *nbdb.StaticMACBinding: + return &nbdb.StaticMACBinding{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + IP: t.IP, + } + case *sbdb.Chassis: + return &sbdb.Chassis{ + UUID: t.UUID, + Name: t.Name, + } + case *sbdb.ChassisPrivate: + return &sbdb.ChassisPrivate{ + UUID: t.UUID, + Name: t.Name, + } + case *sbdb.IGMPGroup: + return &sbdb.IGMPGroup{ + UUID: t.UUID, + } + case *sbdb.Encap: + return &sbdb.Encap{ + UUID: t.UUID, + Type: t.Type, + IP: t.IP, + ChassisName: t.ChassisName, + } + case *sbdb.PortBinding: + return &sbdb.PortBinding{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + Datapath: t.Datapath, + TunnelKey: t.TunnelKey, + } + case *sbdb.SBGlobal: + return &sbdb.SBGlobal{ + UUID: t.UUID, + } + case *nbdb.QoS: + return &nbdb.QoS{ + UUID: t.UUID, + ExternalIDs: map[string]string{ + types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey], + }, + } + case *nbdb.ChassisTemplateVar: + return &nbdb.ChassisTemplateVar{ + UUID: t.UUID, + Chassis: t.Chassis, + } + case *nbdb.DHCPOptions: + return &nbdb.DHCPOptions{ + UUID: t.UUID, + ExternalIDs: copyExternalIDs(t.ExternalIDs, types.PrimaryIDKey), + } + default: + panic(fmt.Sprintf("copyIndexes: unknown model %T", t)) + } +} + +func getListFromModel(model model.Model) interface{} { + switch t := model.(type) { + case *nbdb.ACL: + return &[]*nbdb.ACL{} + case *nbdb.AddressSet: + return &[]*nbdb.AddressSet{} + case *nbdb.BFD: + return &[]*nbdb.BFD{} + case *nbdb.Copp: + return &[]*nbdb.Copp{} + case *nbdb.GatewayChassis: + return &[]*nbdb.GatewayChassis{} + case *nbdb.LoadBalancer: + return &[]*nbdb.LoadBalancer{} + case *nbdb.LoadBalancerGroup: + return &[]*nbdb.LoadBalancerGroup{} + case *nbdb.LogicalRouter: + return &[]*nbdb.LogicalRouter{} + case *nbdb.LogicalRouterPolicy: + return &[]*nbdb.LogicalRouterPolicy{} + case *nbdb.LogicalRouterPort: + return &[]*nbdb.LogicalRouterPort{} + case *nbdb.LogicalRouterStaticRoute: + return &[]*nbdb.LogicalRouterStaticRoute{} + case *nbdb.LogicalSwitch: + return &[]*nbdb.LogicalSwitch{} + case *nbdb.LogicalSwitchPort: + return &[]*nbdb.LogicalSwitchPort{} + case *nbdb.NAT: + return &[]*nbdb.NAT{} + case *nbdb.PortGroup: + return &[]*nbdb.PortGroup{} + case *nbdb.NBGlobal: + return &[]*nbdb.NBGlobal{} + case *nbdb.MeterBand: + return &[]*nbdb.MeterBand{} + case *nbdb.Meter: + return &[]*nbdb.Meter{} + case *nbdb.Sample: + return &[]*nbdb.Sample{} + case *nbdb.SampleCollector: + return &[]*nbdb.SampleCollector{} + case *nbdb.SamplingApp: + return &[]*nbdb.SamplingApp{} + case *nbdb.StaticMACBinding: + return &[]*nbdb.StaticMACBinding{} + case *sbdb.Chassis: + return &[]*sbdb.Chassis{} + case *sbdb.ChassisPrivate: + return &[]*sbdb.ChassisPrivate{} + case *sbdb.IGMPGroup: + return &[]*sbdb.IGMPGroup{} + case *sbdb.Encap: + return &[]*sbdb.Encap{} + case *sbdb.PortBinding: + return &[]*sbdb.PortBinding{} + case *nbdb.QoS: + return &[]nbdb.QoS{} + case *nbdb.ChassisTemplateVar: + return &[]*nbdb.ChassisTemplateVar{} + case *nbdb.DHCPOptions: + return &[]nbdb.DHCPOptions{} + default: + panic(fmt.Sprintf("getModelList: unknown model %T", t)) + } +} + +// onModels applies the provided function to a collection of +// models presented in different ways: +// - a single model (pointer to a struct) +// - a slice of models or pointer to slice of models +// - a slice of structs or pointer to a slice of structs +// If the provided function returns an error, iteration stops and +// that error is returned. +func onModels(models interface{}, do func(interface{}) error) error { + v := reflect.ValueOf(models) + if !v.IsValid() { + return nil + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return nil + } + v = v.Elem() + } + switch v.Kind() { + case reflect.Slice: + switch v.Type().Elem().Kind() { + case reflect.Struct: + for i := 0; i < v.Len(); i++ { + model := v.Index(i).Addr().Interface() + err := do(model) + if err != nil { + return err + } + } + case reflect.Interface: + fallthrough + case reflect.Ptr: + for i := 0; i < v.Len(); i++ { + model := v.Index(i).Interface() + err := do(model) + if err != nil { + return err + } + } + default: + panic(fmt.Sprintf("Expected slice of pointers or structs but got %s", v.Type().Elem().Kind())) + } + case reflect.Struct: + err := do(models) + if err != nil { + return err + } + default: + panic(fmt.Sprintf("Expected slice or struct but got %s", v.Kind())) + } + return nil +} + +// buildFailOnDuplicateOps builds a wait operation on a condition that will fail +// if a duplicate to the provided model is considered to be found. We use this +// to avoid duplicates on certain unknown scenarios that are still to be tracked +// down. See: https://bugzilla.redhat.com/show_bug.cgi?id=2042001. +// When no specific operation is required for the provided model, returns an empty +// array for convenience. +func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, error) { + // Right now we mostly consider models with a "Name" field that is not an + // index for which we don't expect duplicate names. + // A duplicate Name field that is an index will fail without the + // need of this wait operation. + // Some models that require a complex condition to detect duplicates are not + // considered for the time being due to the performance hit (e.g ACLs). + timeout := types.OVSDBWaitTimeout + var field interface{} + var value string + switch t := m.(type) { + case *nbdb.LogicalRouter: + field = &t.Name + value = t.Name + case *nbdb.LogicalSwitch: + field = &t.Name + value = t.Name + case *nbdb.LogicalRouterPolicy: + condPriority := model.Condition{ + Field: &t.Priority, + Function: ovsdb.ConditionEqual, + Value: t.Priority, + } + condMatch := model.Condition{ + Field: &t.Match, + Function: ovsdb.ConditionEqual, + Value: t.Match, + } + return c.WhereAll(t, condPriority, condMatch).Wait( + ovsdb.WaitConditionNotEqual, + &timeout, + t, + &t.Priority, + &t.Match, + ) + default: + return []ovsdb.Operation{}, nil + } + + cond := model.Condition{ + Field: field, + Function: ovsdb.ConditionEqual, + Value: value, + } + return c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field) +} + +// getAllUpdatableFields returns a list of all of the columns/fields that can be updated for a model +func getAllUpdatableFields(model model.Model) []interface{} { + switch t := model.(type) { + case *nbdb.LogicalSwitchPort: + return []interface{}{&t.Addresses, &t.Type, &t.TagRequest, &t.Options, &t.PortSecurity} + case *nbdb.PortGroup: + return []interface{}{&t.ACLs, &t.Ports, &t.ExternalIDs} + default: + panic(fmt.Sprintf("getAllUpdatableFields: unknown model %T", t)) + } +} + +func copyExternalIDs(externalIDs map[string]string, keys ...string) map[string]string { + var externalIDsCopy map[string]string + for _, key := range keys { + value, ok := externalIDs[key] + if ok { + if externalIDsCopy == nil { + externalIDsCopy = map[string]string{} + } + externalIDsCopy[key] = value + } + } + return externalIDsCopy +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go new file mode 100644 index 000000000..04f54f0e8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go @@ -0,0 +1,511 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/klog/v2" +) + +var errMultipleResults = errors.New("unexpectedly found multiple results for provided predicate") +var errNoIndexes = errors.New("no indexes found for given model") + +type modelClient struct { + client client.Client +} + +func newModelClient(client client.Client) modelClient { + return modelClient{ + client: client, + } +} + +/* +extractUUIDsFromModels is a helper function which constructs a mutation +for the specified field and mutator extracting the UUIDs of the provided +models as the value for the mutation. +*/ +func extractUUIDsFromModels(models interface{}) []string { + ids := []string{} + _ = onModels(models, func(model interface{}) error { + uuid := getUUID(model) + if uuid != "" { + ids = append(ids, uuid) + } + return nil + }) + if len(ids) == 0 { + return nil + } + return ids +} + +// buildMutationsFromFields builds mutations that use the fields as values. +func buildMutationsFromFields(fields []interface{}, mutator ovsdb.Mutator) ([]model.Mutation, error) { + mutations := []model.Mutation{} + for _, field := range fields { + switch v := field.(type) { + case *map[string]string: + if v == nil || len(*v) == 0 { + continue + } + if mutator == ovsdb.MutateOperationDelete { + // turn empty map values into a mutation to remove the key for + // delete mutations + removeKeys := make([]string, 0, len(*v)) + updateKeys := make(map[string]string, len(*v)) + for key, value := range *v { + if value == "" { + removeKeys = append(removeKeys, key) + } else { + updateKeys[key] = value + } + } + if len(removeKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: removeKeys, + } + mutations = append(mutations, mutation) + } + if len(updateKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: updateKeys, + } + mutations = append(mutations, mutation) + } + continue + } + // RFC 7047, section 5.1: a MutateOperationDelete is generated + // automatically for every updated key. + removeKeys := make([]string, 0, len(*v)) + for key := range *v { + removeKeys = append(removeKeys, key) + } + if len(removeKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: ovsdb.MutateOperationDelete, + Value: removeKeys, + } + mutations = append(mutations, mutation) + } + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: *v, + } + mutations = append(mutations, mutation) + case *[]string: + if v == nil || len(*v) == 0 { + continue + } + if mutator == ovsdb.MutateOperationInsert { + // Most of string sets are UUIDs. The real server does not allow + // this to be empty but the test server does for now. On other + // types of sets most probably there is no need to have empty + // items. So catch this early. + for _, value := range *v { + if value == "" { + return nil, fmt.Errorf("unsupported mutation of set with empty values: %v", *v) + } + } + } + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: *v, + } + mutations = append(mutations, mutation) + default: + return nil, fmt.Errorf("mutation for type %T not implemented", v) + } + } + + return mutations, nil +} + +/* +operationModel is a struct which uses reflection to determine and perform +idempotent operations against OVS DB (NB DB by default). +*/ +type operationModel struct { + // Model specifies the model to be created, or to look up in the cache. + // The values in the fields of the Model are used for mutations and updates + // as well. If this Model is looked up or created, it will have its UUID set + // after the operation. + Model interface{} + // ModelPredicate specifies a predicate to look up models in the cache. + // If Model is provided with non-zero index column values, ModelPredicate + // will be ignored. + ModelPredicate interface{} + // ExistingResult is where the results of the look up are added to. + // Required when Model is not specified. + ExistingResult interface{} + // OnModelMutations specifies the fields from Model that will be used as + // the mutation value. + OnModelMutations []interface{} + // OnModelUpdates specifies the fields from Model that will be used as + // the update value. + // Note: while it is okay to have update and mutate operations on the same row, it + // is an undefined behavior if the same column is used in both update and mutate. + OnModelUpdates []interface{} + // ErrNotFound flags this operation to fail with ErrNotFound if a model is + // not found. + ErrNotFound bool + // BulkOp flags this operation as a bulk operation capable of updating or + // mutating more than 1 model. + BulkOp bool + // DoAfter is invoked at the end of the operation and allows to setup a + // subsequent operation with values obtained from this one. + // If model lookup was successful, or a new db entry was created, + // Model will have UUID set, and it can be used in DoAfter. This only works + // if BulkOp is false and Model != nil. + DoAfter func() +} + +func onModelUpdatesNone() []interface{} { + return nil +} + +func onModelUpdatesAllNonDefault() []interface{} { + return []interface{}{} +} + +/* +CreateOrUpdate performs idempotent operations against libovsdb according to the +following logic: + +a) performs a lookup of the models in the cache by ModelPredicate if provided, +or by Model otherwise. If the models do not exist and ErrNotFound is set, +it returns ErrNotFound + +b) if OnModelUpdates is specified; it performs a direct update of the model if +it exists. + +c) if OnModelMutations is specified; it performs a direct mutation (insert) of +the Model if it exists. + +d) if b) and c) are not true, but Model is provided, it creates the Model +if it does not exist. + +e) if none of the above are true, ErrNotFound is returned. + +If BulkOp is set, update or mutate can happen accross multiple models found. +*/ +func (m *modelClient) CreateOrUpdate(opModels ...operationModel) ([]ovsdb.OperationResult, error) { + created, ops, err := m.createOrUpdateOps(nil, opModels...) + if err != nil { + return nil, err + } + return TransactAndCheckAndSetUUIDs(m.client, created, ops) +} + +func (m *modelClient) CreateOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) { + _, ops, err := m.createOrUpdateOps(ops, opModels...) + return ops, err +} + +func (m *modelClient) createOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) { + hasGuardOp := len(ops) > 0 && isGuardOp(&ops[0]) + guardOp := []ovsdb.Operation{} + doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + // nil represents onModelUpdatesNone + if opModel.OnModelUpdates != nil { + o, err = m.update(model, opModel) + } + // Note: while it is okay to have update and mutate operations on the same row, it is + // an undefined behavior if the same exact column is used in both update and mutate. + if err == nil && opModel.OnModelMutations != nil { + var o2 []ovsdb.Operation + o2, err = m.mutate(model, opModel, ovsdb.MutateOperationInsert) + o = append(o, o2...) + } + return + } + doWhenNotFound := func(model interface{}, opModel *operationModel) ([]ovsdb.Operation, error) { + if !hasGuardOp { + // for the first insert of certain models, build a wait operation + // that checks for duplicates as a guard op to prevent against + // duplicate transactions + var err error + guardOp, err = buildFailOnDuplicateOps(m.client, opModel.Model) + if err != nil { + return nil, err + } + hasGuardOp = len(guardOp) > 0 + } + return m.create(opModel) + } + created, ops, err := m.buildOps(ops, doWhenFound, doWhenNotFound, opModels...) + if len(guardOp) > 0 { + // set the guard op as the first of the list + ops = append(guardOp, ops...) + } + return created, ops, err +} + +/* +Delete performs idempotent delete operations against libovsdb according to the +following logic: + +a) performs a lookup of the models in the cache by ModelPredicate if provided, +or by Model otherwise. If the models do not exist and ErrNotFound is set +it returns ErrNotFound. + +b) if OnModelMutations is specified; it performs a direct mutation (delete) of the +Model if it exists. + +c) if b) is not true; it performs a direct delete of the Model if it exists. + +If BulkOp is set, delete or mutate can happen accross multiple models found. +*/ +func (m *modelClient) Delete(opModels ...operationModel) error { + ops, err := m.DeleteOps(nil, opModels...) + if err != nil { + return err + } + _, err = TransactAndCheck(m.client, ops) + return err +} + +func (m *modelClient) DeleteOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) { + doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + if opModel.OnModelMutations != nil { + return m.mutate(model, opModel, ovsdb.MutateOperationDelete) + } else { + return m.delete(model, opModel) + } + } + _, ops, err := m.buildOps(ops, doWhenFound, nil, opModels...) + return ops, err +} + +type opModelToOpMapper func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) + +func (m *modelClient) buildOps(ops []ovsdb.Operation, doWhenFound opModelToOpMapper, doWhenNotFound opModelToOpMapper, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) { + if ops == nil { + ops = []ovsdb.Operation{} + } + notfound := []interface{}{} + for _, opModel := range opModels { + // do lookup + err := m.lookup(&opModel) + if err != nil && !errors.Is(err, client.ErrNotFound) { + return nil, nil, fmt.Errorf("unable to lookup model %+v: %w", opModel, err) + } + + // do updates + var hadExistingResults bool + err = onModels(opModel.ExistingResult, func(model interface{}) error { + if hadExistingResults && !opModel.BulkOp { + return errMultipleResults + } + hadExistingResults = true + + if doWhenFound != nil { + o, err := doWhenFound(model, &opModel) + if err != nil { + return err + } + ops = append(ops, o...) + } + return nil + }) + if err != nil { + return nil, nil, err + } + + // otherwise act when not found + if !hadExistingResults { + // return ErrNotFound, + // - if caller explicitly requested for it or + // - failed to provide a Model for us to apply the operation on + if opModel.ErrNotFound || (doWhenNotFound != nil && opModel.Model == nil) { + return nil, nil, client.ErrNotFound + } + if doWhenNotFound != nil && opModel.Model != nil { + o, err := doWhenNotFound(nil, &opModel) + if err != nil { + return nil, nil, err + } + ops = append(ops, o...) + notfound = append(notfound, opModel.Model) + } + } + + if opModel.DoAfter != nil { + opModel.DoAfter() + } + } + + return notfound, ops, nil +} + +/* +create does a bit more than just "create". create needs to set the generated +UUID (because if this function is called we know the item does not exists yet) +then create the item. Generates an until clause and uses a wait operation to avoid +https://bugzilla.redhat.com/show_bug.cgi?id=2042001 +*/ +func (m *modelClient) create(opModel *operationModel) ([]ovsdb.Operation, error) { + uuid := getUUID(opModel.Model) + if uuid == "" { + setUUID(opModel.Model, buildNamedUUID()) + } + + ops, err := m.client.Create(opModel.Model) + if err != nil { + return nil, fmt.Errorf("unable to create model, err: %w", err) + } + + klog.V(5).Infof("Create operations generated as: %+v", ops) + return ops, nil +} + +func (m *modelClient) update(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + o, err = m.client.Where(lookUpModel).Update(opModel.Model, opModel.OnModelUpdates...) + if err != nil { + return nil, fmt.Errorf("unable to update model, err: %w", err) + } + klog.V(5).Infof("Update operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) mutate(lookUpModel interface{}, opModel *operationModel, mutator ovsdb.Mutator) (o []ovsdb.Operation, err error) { + if opModel.OnModelMutations == nil { + return nil, nil + } + modelMutations, err := buildMutationsFromFields(opModel.OnModelMutations, mutator) + if len(modelMutations) == 0 || err != nil { + return nil, err + } + o, err = m.client.Where(lookUpModel).Mutate(opModel.Model, modelMutations...) + if err != nil { + return nil, fmt.Errorf("unable to mutate model, err: %w", err) + } + klog.V(5).Infof("Mutate operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) delete(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + o, err = m.client.Where(lookUpModel).Delete() + if err != nil { + return nil, fmt.Errorf("unable to delete model, err: %w", err) + } + klog.V(5).Infof("Delete operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) Lookup(opModels ...operationModel) error { + _, _, err := m.buildOps(nil, nil, nil, opModels...) + return err +} + +// CreateOrUpdate, Delete and Lookup can be called to +// 1. create or update a single model +// Model should be set, bulkOp = false, errNotfound = false +// 2. update/delete/lookup 0..n models (create can't be done for multiple models at the same time) +// Model index or predicate should be set +// +// The allowed combination of operationModel fields is different for these cases. +// Both Model db index, and ModelPredicate can only be empty for the first case +func lookupRequired(opModel *operationModel) bool { + // we know create is not supposed to be performed, if these fields are set + if opModel.BulkOp || opModel.ErrNotFound { + return true + } + return false +} + +// lookup the model in the cache prioritizing provided indexes over a +// predicate +// If lookup was successful, opModel.Model will have UUID set, +// so that further user operations with the same model are indexed by UUID +func (m *modelClient) lookup(opModel *operationModel) error { + if opModel.ExistingResult == nil && opModel.Model != nil { + opModel.ExistingResult = getListFromModel(opModel.Model) + } + + var err error + if opModel.Model != nil { + err = m.where(opModel) + if err != errNoIndexes { + // if index wasn't provided by the Model, try predicate search + // otherwise return where result + return err + } + } + // if index wasn't provided by the Model (errNoIndexes) or Model == nil, try predicate search + if opModel.ModelPredicate != nil { + return m.whereCache(opModel) + } + // the only operation that can be performed without a lookup (it can have no db indexes and no ModelPredicate set) + // is Create. + if lookupRequired(opModel) { + return fmt.Errorf("missing model indexes or predicate when a lookup was required") + } + return nil +} + +func (m *modelClient) where(opModel *operationModel) error { + copyModel := copyIndexes(opModel.Model) + if reflect.ValueOf(copyModel).Elem().IsZero() { + // no indexes available + return errNoIndexes + } + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + var err error + if err = m.client.Where(copyModel).List(ctx, opModel.ExistingResult); err != nil { + return err + } + if opModel.Model == nil || opModel.BulkOp { + return nil + } + // for non-bulk op cases, copy (the one) uuid found to model provided. + // so that further user operations with the same model are indexed by UUID + err = onModels(opModel.ExistingResult, func(model interface{}) error { + uuid := getUUID(model) + setUUID(opModel.Model, uuid) + return nil + }) + return err +} + +func (m *modelClient) whereCache(opModel *operationModel) error { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + var err error + if err = m.client.WhereCache(opModel.ModelPredicate).List(ctx, opModel.ExistingResult); err != nil { + return err + } + + if opModel.Model == nil || opModel.BulkOp { + return nil + } + + // for non-bulk op cases, copy (the one) uuid found to model provided. + // so that further user operations with the same model are indexed by UUID + err = onModels(opModel.ExistingResult, func(model interface{}) error { + uuid := getUUID(model) + setUUID(opModel.Model, uuid) + return nil + }) + return err +} + +func isGuardOp(op *ovsdb.Operation) bool { + return op != nil && op.Op == ovsdb.OperationWait && op.Timeout != nil && *op.Timeout == types.OVSDBWaitTimeout +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go new file mode 100644 index 000000000..1860668a7 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go @@ -0,0 +1,28 @@ +package ops + +import ( + "fmt" + "sync/atomic" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand" +) + +const ( + namedUUIDPrefix = 'u' +) + +var ( + namedUUIDCounter = cryptorand.Uint32() +) + +// isNamedUUID checks if the passed id is a named-uuid built with +// BuildNamedUUID +func isNamedUUID(id string) bool { + return id != "" && id[0] == namedUUIDPrefix +} + +// buildNamedUUID builds an id that can be used as a named-uuid +// as per OVSDB rfc 7047 section 5.1 +func buildNamedUUID() string { + return fmt.Sprintf("%c%010d", namedUUIDPrefix, atomic.AddUint32(&namedUUIDCounter, 1)) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go new file mode 100644 index 000000000..2bb46d3a8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go @@ -0,0 +1,64 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// GetNBGlobal looks up the NB Global entry from the cache +func GetNBGlobal(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) (*nbdb.NBGlobal, error) { + found := []*nbdb.NBGlobal{} + opModel := operationModel{ + Model: nbGlobal, + ModelPredicate: func(item *nbdb.NBGlobal) bool { return true }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// UpdateNBGlobalSetOptions sets options on the NB Global entry adding any +// missing, removing the ones set to an empty value and updating existing +func UpdateNBGlobalSetOptions(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) error { + // find the nbGlobal table's UUID, we don't have any other way to reliably look this table entry since it can + // only be indexed by UUID + updatedNbGlobal, err := GetNBGlobal(nbClient, nbGlobal) + if err != nil { + return err + } + + if updatedNbGlobal.Options == nil { + updatedNbGlobal.Options = map[string]string{} + } + + for k, v := range nbGlobal.Options { + if v == "" { + delete(updatedNbGlobal.Options, k) + } else { + updatedNbGlobal.Options[k] = v + } + } + + // Update the options column in the nbGlobal entry since we already performed a lookup + opModel := operationModel{ + Model: updatedNbGlobal, + OnModelUpdates: []interface{}{ + &updatedNbGlobal.Options, + }, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go new file mode 100644 index 000000000..861a63cb9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go @@ -0,0 +1,53 @@ +package ops + +import ( + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that +// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds +// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis +// record for the remote zone nodes. +// TODO (numans) remove this function once OVN supports binding a port binding for a remote +// chassis. +func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error { + ch, err := GetChassis(sbClient, chassis) + if err != nil { + return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err) + } + portBinding.Chassis = &ch.UUID + + opModel := operationModel{ + Model: portBinding, + OnModelUpdates: []interface{}{&portBinding.Chassis}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// GetPortBinding looks up a portBinding in SBDB +func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) { + found := []*sbdb.PortBinding{} + opModel := operationModel{ + Model: portBinding, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go new file mode 100644 index 000000000..c8045c09e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go @@ -0,0 +1,329 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type portGroupPredicate func(group *nbdb.PortGroup) bool + +// FindPortGroupsWithPredicate looks up port groups from the cache based on a +// given predicate +func FindPortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) ([]*nbdb.PortGroup, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.PortGroup{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdatePortGroupsOps creates or updates the provided port groups +// returning the corresponding ops +func CreateOrUpdatePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, pgs ...*nbdb.PortGroup) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(pgs)) + for i := range pgs { + pg := pgs[i] + opModel := operationModel{ + Model: pg, + OnModelUpdates: getAllUpdatableFields(pg), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdatePortGroups creates or updates the provided port groups +func CreateOrUpdatePortGroups(nbClient libovsdbclient.Client, pgs ...*nbdb.PortGroup) error { + ops, err := CreateOrUpdatePortGroupsOps(nbClient, nil, pgs...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreatePortGroup creates the provided port group if it doesn't exist +func CreatePortGroup(nbClient libovsdbclient.Client, portGroup *nbdb.PortGroup) error { + opModel := operationModel{ + Model: portGroup, + OnModelUpdates: onModelUpdatesNone(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// GetPortGroup looks up a port group from the cache +func GetPortGroup(nbClient libovsdbclient.Client, pg *nbdb.PortGroup) (*nbdb.PortGroup, error) { + found := []*nbdb.PortGroup{} + opModel := operationModel{ + Model: pg, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +func AddPortsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) { + if len(ports) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + Ports: ports, + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.Ports}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// AddPortsToPortGroup adds the provided ports to the provided port group +func AddPortsToPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error { + ops, err := AddPortsToPortGroupOps(nbClient, nil, name, ports...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortsFromPortGroupOps removes the provided ports from the provided port +// group and returns the corresponding ops +func DeletePortsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) { + if len(ports) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + Ports: ports, + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.Ports}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeletePortsFromPortGroup removes the provided ports from the provided port +// group +func DeletePortsFromPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error { + ops, err := DeletePortsFromPortGroupOps(nbClient, nil, name, ports...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// AddACLsToPortGroupOps adds the provided ACLs to the provided port group and +// returns the corresponding ops +func AddACLsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + if len(acls) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// UpdatePortGroupSetACLsOps updates the provided ACLs on the provided port group and +// returns the corresponding ops. It entirely replaces the existing ACLs on the PG with +// the newly provided list +func UpdatePortGroupSetACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls []*nbdb.ACL) ([]libovsdb.Operation, error) { + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + opModel := operationModel{ + Model: &pg, + OnModelUpdates: []interface{}{&pg.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// DeleteACLsFromPortGroupOps removes the provided ACLs from the provided port +// group and returns the corresponding ops +func DeleteACLsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + if len(acls) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +func DeleteACLsFromPortGroups(nbClient libovsdbclient.Client, names []string, acls ...*nbdb.ACL) error { + var err error + var ops []libovsdb.Operation + for _, pgName := range names { + ops, err = DeleteACLsFromPortGroupOps(nbClient, ops, pgName, acls...) + if err != nil { + return err + } + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func DeleteACLsFromAllPortGroups(nbClient libovsdbclient.Client, acls ...*nbdb.ACL) error { + if len(acls) == 0 { + return nil + } + + pg := nbdb.PortGroup{ + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + ModelPredicate: func(item *nbdb.PortGroup) bool { return true }, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(nil, opModel) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortGroupsOps deletes the provided port groups and returns the +// corresponding ops +func DeletePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, names ...string) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(names)) + for _, name := range names { + pg := nbdb.PortGroup{ + Name: name, + } + opModel := operationModel{ + Model: &pg, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeletePortGroups deletes the provided port groups and returns the +// corresponding ops +func DeletePortGroups(nbClient libovsdbclient.Client, names ...string) error { + ops, err := DeletePortGroupsOps(nbClient, nil, names...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortGroupsWithPredicateOps returns the corresponding ops to delete port groups based on +// a given predicate +func DeletePortGroupsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p portGroupPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.PortGroup{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &deleted, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeletePortGroupsWithPredicate deletes the port groups based on the provided predicate +func DeletePortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) error { + ops, err := DeletePortGroupsWithPredicateOps(nbClient, nil, p) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go new file mode 100644 index 000000000..a83a176df --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go @@ -0,0 +1,119 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type QoSPredicate func(*nbdb.QoS) bool + +// FindQoSesWithPredicate looks up QoSes from the cache based on a +// given predicate +func FindQoSesWithPredicate(nbClient libovsdbclient.Client, p QoSPredicate) ([]*nbdb.QoS, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.QoS{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateQoSesOps returns the ops to create or update the provided QoSes. +func CreateOrUpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + OnModelUpdates: []interface{}{}, // update all fields + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + OnModelUpdates: []interface{}{}, // update all fields + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// AddQoSesToLogicalSwitchOps returns the ops to add the provided QoSes to the switch +func AddQoSesToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels) +} + +// DeleteQoSesOps returns the ops to delete the provided QoSes. +func DeleteQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} + +// RemoveQoSesFromLogicalSwitchOps returns the ops to remove the provided QoSes from the provided switch. +func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go new file mode 100644 index 000000000..0eb8499ab --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go @@ -0,0 +1,1189 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "net" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "k8s.io/apimachinery/pkg/util/sets" +) + +// ROUTER OPs + +type logicalRouterPredicate func(*nbdb.LogicalRouter) bool + +// GetLogicalRouter looks up a logical router from the cache +func GetLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) (*nbdb.LogicalRouter, error) { + found := []*nbdb.LogicalRouter{} + opModel := operationModel{ + Model: router, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// FindLogicalRoutersWithPredicate looks up logical routers from the cache based on a +// given predicate +func FindLogicalRoutersWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPredicate) ([]*nbdb.LogicalRouter, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouter{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateLogicalRouter creates or updates the provided logical router +func CreateOrUpdateLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, fields ...interface{}) error { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + opModel := operationModel{ + Model: router, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// UpdateLogicalRouterSetExternalIDs sets external IDs on the provided logical +// router adding any missing, removing the ones set to an empty value and +// updating existing +func UpdateLogicalRouterSetExternalIDs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error { + externalIds := router.ExternalIDs + router, err := GetLogicalRouter(nbClient, router) + if err != nil { + return err + } + + if router.ExternalIDs == nil { + router.ExternalIDs = map[string]string{} + } + + for k, v := range externalIds { + if v == "" { + delete(router.ExternalIDs, k) + } else { + router.ExternalIDs[k] = v + } + } + + opModel := operationModel{ + Model: router, + OnModelUpdates: []interface{}{&router.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// DeleteLogicalRoutersWithPredicateOps returns the operations to delete the logical routers matching the provided predicate +func DeleteLogicalRoutersWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p logicalRouterPredicate) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalRouter{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalRouterOps returns the operations to delete the provided logical router +func DeleteLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + router *nbdb.LogicalRouter) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: router, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalRouter deletes the provided logical router +func DeleteLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error { + ops, err := DeleteLogicalRouterOps(nbClient, nil, router) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// LOGICAL ROUTER PORT OPs + +type logicalRouterPortPredicate func(*nbdb.LogicalRouterPort) bool + +// FindLogicalRouterPortWithPredicate looks up logical router port from +// the cache based on a given predicate +func FindLogicalRouterPortWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPortPredicate) ([]*nbdb.LogicalRouterPort, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterPort{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalRouterPort looks up a logical router port from the cache +func GetLogicalRouterPort(nbClient libovsdbclient.Client, lrp *nbdb.LogicalRouterPort) (*nbdb.LogicalRouterPort, error) { + found := []*nbdb.LogicalRouterPort{} + opModel := operationModel{ + Model: lrp, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalRouterPort creates or updates the provided logical +// router port together with the gateway chassis (if not nil), and adds it to the provided logical router +func CreateOrUpdateLogicalRouterPort(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, + lrp *nbdb.LogicalRouterPort, chassis *nbdb.GatewayChassis, fields ...interface{}) error { + opModels := []operationModel{} + if chassis != nil { + opModels = append(opModels, operationModel{ + Model: chassis, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lrp.GatewayChassis = []string{chassis.UUID} }, + ErrNotFound: false, + BulkOp: false, + }) + } + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } else if chassis != nil { + fields = append(fields, &lrp.GatewayChassis) + } + originalPorts := router.Ports + router.Ports = []string{} + opModels = append(opModels, operationModel{ + Model: lrp, + OnModelUpdates: fields, + DoAfter: func() { router.Ports = append(router.Ports, lrp.UUID) }, + ErrNotFound: false, + BulkOp: false, + }) + opModels = append(opModels, operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Ports}, + ErrNotFound: true, + BulkOp: false, + }) + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + router.Ports = originalPorts + return err +} + +// DeleteLogicalRouterPorts deletes the provided logical router ports and +// removes them from the provided logical router +func DeleteLogicalRouterPorts(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, lrps ...*nbdb.LogicalRouterPort) error { + originalPorts := router.Ports + router.Ports = make([]string, 0, len(lrps)) + opModels := make([]operationModel, 0, len(lrps)+1) + for i := range lrps { + lrp := lrps[i] + opModel := operationModel{ + Model: lrp, + DoAfter: func() { + if lrp.UUID != "" { + router.Ports = append(router.Ports, lrp.UUID) + } + }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + err := m.Delete(opModels...) + router.Ports = originalPorts + return err +} + +// LOGICAL ROUTER POLICY OPs + +type logicalRouterPolicyPredicate func(*nbdb.LogicalRouterPolicy) bool + +// FindLogicalRouterPoliciesWithPredicate looks up logical router policies from +// the cache based on a given predicate +func FindLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPolicyPredicate) ([]*nbdb.LogicalRouterPolicy, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterPolicy{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalRouterPolicy looks up a logical router policy from the cache +func GetLogicalRouterPolicy(nbClient libovsdbclient.Client, policy *nbdb.LogicalRouterPolicy) (*nbdb.LogicalRouterPolicy, error) { + found := []*nbdb.LogicalRouterPolicy{} + opModel := operationModel{ + Model: policy, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalRouterPolicyWithPredicate looks up a logical router +// policy from the cache based on a given predicate. If it does not exist, it +// creates the provided logical router policy. If it does, it updates it. The +// logical router policy is added to the provided logical router. +// fields determines which columns to updated. Passing no fields is assumes +// all fields need to be updated. Passing a single nil field indicates no fields should be updated. +// Otherwise a caller may pass as many individual fields as desired to specify which columsn need updating. +func CreateOrUpdateLogicalRouterPolicyWithPredicate(nbClient libovsdbclient.Client, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) error { + ops, err := CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, lrp, p, fields...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreateOrUpdateLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router policy. If it does, it +// updates it. The logical router policy is added to the provided logical +// router. Returns the corresponding ops +func CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) ([]libovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrp, + ModelPredicate: p, + OnModelUpdates: fields, + DoAfter: func() { router.Policies = []string{lrp.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lrps)) + for i := range lrps { + lrp := lrps[i] + opModel := []operationModel{ + { + Model: lrp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: true, + BulkOp: false, + }, + } + opModels = append(opModels, opModel...) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate and returns the +// corresponding ops to delete it and remove it from the provided router. +func DeleteLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + deleted := []*nbdb.LogicalRouterPolicy{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { router.Policies = extractUUIDsFromModels(&deleted) }, + ErrNotFound: false, + BulkOp: true, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: false, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteLogicalRouterPoliciesWithPredicate looks up logical router policies +// from the cache based on a given predicate, deletes them and removes them from +// the provided logical router +func DeleteLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate) error { + ops, err := DeleteLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, p) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate. If it doesn't find +// any, it creates the provided logical router policy. If it does, adds any +// missing Nexthops to the existing logical router policy. The logical router +// policy is added to the provided logical router. Returns the corresponding ops +func CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrp, + ModelPredicate: p, + OnModelMutations: []interface{}{&lrp.Nexthops}, + DoAfter: func() { router.Policies = []string{lrp.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteNextHopsFromLogicalRouterPolicyOps removes the Nexthops from the +// provided logical router policies. +func DeleteNextHopsFromLogicalRouterPolicyOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps []*nbdb.LogicalRouterPolicy, nextHops ...string) ([]libovsdb.Operation, error) { + nextHopSet := sets.NewString(nextHops...) + opModels := []operationModel{} + router := &nbdb.LogicalRouter{ + Name: routerName, + Policies: []string{}, + } + + for i := range lrps { + lrp := lrps[i] + if nextHopSet.HasAll(lrp.Nexthops...) { + // if no next-hops remain in the policy, remove it alltogether + router.Policies = append(router.Policies, lrp.UUID) + opModel := operationModel{ + Model: lrp, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } else { + // otherwise just remove the next-hops + lrp.Nexthops = nextHops + opModel := operationModel{ + Model: lrp, + OnModelMutations: []interface{}{&lrp.Nexthops}, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } + } + + if len(router.Policies) > 0 { + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteNextHopsFromLogicalRouterPolicies removes the Nexthops from the +// provided logical router policies. If a logical router policy ends up with no +// Nexthops, it is deleted and removed from the provided logical router. +func DeleteNextHopsFromLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error { + ops := []libovsdb.Operation{} + for _, lrp := range lrps { + nextHops := lrp.Nexthops + lrp, err := GetLogicalRouterPolicy(nbClient, lrp) + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + if err != nil { + return err + } + + ops, err = DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, []*nbdb.LogicalRouterPolicy{lrp}, nextHops...) + if err != nil { + return err + } + } + + _, err := TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate and removes the +// provided Nexthop from it. If the logical router policy ends up with no +// Nexthops, it is deleted and removed from the provided logical router. Returns +// the corresponding ops +func DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate, nextHop string) ([]libovsdb.Operation, error) { + lrps, err := FindLogicalRouterPoliciesWithPredicate(nbClient, p) + if err != nil { + return nil, err + } + + return DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, lrps, nextHop) +} + +// DeleteNextHopFromLogicalRouterPoliciesWithPredicate looks up a logical router +// policy from the cache based on a given predicate and removes the provided +// Nexthop from it. If the logical router policy ends up with no Nexthops, it is +// deleted and removed from the provided logical router. +func DeleteNextHopFromLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate, nextHop string) error { + ops, err := DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient, nil, routerName, p, nextHop) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterPolicies deletes the logical router policies and removes +// them from the provided logical router +func DeleteLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error { + opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...) + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// DeleteLogicalRouterPoliciesOps builds and returns corresponding delete operations for Logical Router +// Policies from the provided logical router. +func DeleteLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) { + opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...) + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +func getDeleteOpModelsForLogicalRouterPolicies(routerName string, lrps ...*nbdb.LogicalRouterPolicy) []operationModel { + router := &nbdb.LogicalRouter{ + Name: routerName, + Policies: make([]string, 0, len(lrps)), + } + + opModels := make([]operationModel, 0, len(lrps)+1) + for _, lrp := range lrps { + router.Policies = append(router.Policies, lrp.UUID) + opModel := operationModel{ + Model: lrp, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + } + + return append(opModels, opModel) +} + +// LOGICAL ROUTER STATIC ROUTES + +type logicalRouterStaticRoutePredicate func(*nbdb.LogicalRouterStaticRoute) bool + +// FindLogicalRouterStaticRoutesWithPredicate looks up logical router static +// routes from the cache based on a given predicate +func FindLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterStaticRoutePredicate) ([]*nbdb.LogicalRouterStaticRoute, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterStaticRoute{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps looks up a logical +// router static route from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router static route. If it does, it +// updates it. The logical router static route is added to the provided logical +// router. Returns the corresponding ops +func CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + routerName string, lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) ([]libovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrsr, + OnModelUpdates: fields, + DoAfter: func() { router.StaticRoutes = []string{lrsr.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + }, + } + + if p != nil { + opModels[0].ModelPredicate = p + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// PolicyEqualPredicate determines if two static routes have the same routing policy (dst-ip or src-ip) +// If policy is nil, OVN considers that as dst-ip +func PolicyEqualPredicate(p1, p2 *nbdb.LogicalRouterStaticRoutePolicy) bool { + if p1 == nil { + return p2 == nil || (p2 != nil && *p2 == nbdb.LogicalRouterStaticRoutePolicyDstIP) + } + + if p2 == nil { + return *p1 == nbdb.LogicalRouterStaticRoutePolicyDstIP + } + + return *p1 == *p2 +} + +// CreateOrReplaceLogicalRouterStaticRouteWithPredicate looks up a logical +// router static route from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router static route. If it does, it +// updates it. The logical router static route is added to the provided logical +// router. +// If more than one route matches the predicate on the router, the additional routes are removed. +func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclient.Client, routerName string, + lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) error { + + lr := &nbdb.LogicalRouter{Name: routerName} + router, err := GetLogicalRouter(nbClient, lr) + if err != nil { + return err + } + newPredicate := func(item *nbdb.LogicalRouterStaticRoute) bool { + for _, routeUUID := range router.StaticRoutes { + if routeUUID == item.UUID && p(item) { + return true + } + } + return false + } + routes, err := FindLogicalRouterStaticRoutesWithPredicate(nbClient, newPredicate) + if err != nil { + return err + } + + var ops []libovsdb.Operation + m := newModelClient(nbClient) + + if len(routes) > 0 { + lrsr.UUID = routes[0].UUID + } + + if len(routes) > 1 { + // should only be a single route remove all except the first + routes = routes[1:] + opModels := make([]operationModel, 0, len(routes)+1) + router.StaticRoutes = []string{} + for _, route := range routes { + route := route + router.StaticRoutes = append(router.StaticRoutes, route.UUID) + opModel := operationModel{ + Model: route, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + ops, err = m.DeleteOps(nil, opModels...) + if err != nil { + return err + } + } + + ops, err = CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, lrsr, nil, fields...) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterStaticRoutesWithPredicate looks up logical router static +// routes from the cache based on a given predicate, deletes them and removes +// them from the provided logical router +func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterStaticRoutePredicate) error { + var ops []libovsdb.Operation + var err error + ops, err = DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, p) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterStaticRoutesWithPredicateOps looks up logical router static +// routes from the cache based on a given predicate, and returns the ops to delete +// them and remove them from the provided logical router +func DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterStaticRoutePredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + deleted := []*nbdb.LogicalRouterStaticRoute{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { router.StaticRoutes = extractUUIDsFromModels(deleted) }, + ErrNotFound: false, + BulkOp: true, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: false, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteLogicalRouterStaticRoutes deletes the logical router static routes and +// removes them from the provided logical router +func DeleteLogicalRouterStaticRoutes(nbClient libovsdbclient.Client, routerName string, lrsrs ...*nbdb.LogicalRouterStaticRoute) error { + router := &nbdb.LogicalRouter{ + Name: routerName, + StaticRoutes: make([]string, 0, len(lrsrs)), + } + + opModels := make([]operationModel, 0, len(lrsrs)+1) + for _, lrsr := range lrsrs { + lrsr := lrsr + router.StaticRoutes = append(router.StaticRoutes, lrsr.UUID) + opModel := operationModel{ + Model: lrsr, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// BFD ops + +// CreateOrUpdateBFDOps creates or updates the provided BFDs and returns +// the corresponding ops +func CreateOrUpdateBFDOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, bfds ...*nbdb.BFD) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(bfds)) + for i := range bfds { + bfd := bfds[i] + opModel := operationModel{ + Model: bfd, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteBFDs deletes the provided BFDs +func DeleteBFDs(nbClient libovsdbclient.Client, bfds ...*nbdb.BFD) error { + opModels := make([]operationModel, 0, len(bfds)) + for i := range bfds { + bfd := bfds[i] + opModel := operationModel{ + Model: bfd, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +func LookupBFD(nbClient libovsdbclient.Client, bfd *nbdb.BFD) (*nbdb.BFD, error) { + found := []*nbdb.BFD{} + opModel := operationModel{ + Model: bfd, + ModelPredicate: func(item *nbdb.BFD) bool { return item.DstIP == bfd.DstIP && item.LogicalPort == bfd.LogicalPort }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + return found[0], nil +} + +// LB OPs + +// AddLoadBalancersToLogicalRouterOps adds the provided load balancers to the +// provided logical router and returns the corresponding ops +func AddLoadBalancersToLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := router.LoadBalancer + router.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + router.LoadBalancer = append(router.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.CreateOrUpdateOps(ops, opModel) + router.LoadBalancer = originalLBs + return ops, err +} + +// RemoveLoadBalancersFromLogicalRouterOps removes the provided load balancers from the +// provided logical router and returns the corresponding ops +func RemoveLoadBalancersFromLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := router.LoadBalancer + router.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + router.LoadBalancer = append(router.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.LoadBalancer}, + // if we want to delete loadbalancer from the router that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.DeleteOps(ops, opModel) + router.LoadBalancer = originalLBs + return ops, err +} + +func buildNAT( + natType nbdb.NATType, + externalIP string, + logicalIP string, + logicalPort string, + externalMac string, + externalIDs map[string]string, +) *nbdb.NAT { + nat := &nbdb.NAT{ + Type: natType, + ExternalIP: externalIP, + LogicalIP: logicalIP, + Options: map[string]string{"stateless": "false"}, + ExternalIDs: externalIDs, + } + + if logicalPort != "" { + nat.LogicalPort = &logicalPort + } + + if externalMac != "" { + nat.ExternalMAC = &externalMac + } + + return nat +} + +// BuildSNAT builds a logical router SNAT +func BuildSNAT( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalIDs map[string]string, +) *nbdb.NAT { + externalIPStr := "" + if externalIP != nil { + externalIPStr = externalIP.String() + } + // Strip out mask of logicalIP only if it is a host mask + logicalIPMask, _ := logicalIP.Mask.Size() + logicalIPStr := logicalIP.IP.String() + if logicalIPMask != 32 && logicalIPMask != 128 { + logicalIPStr = logicalIP.String() + } + return buildNAT(nbdb.NATTypeSNAT, externalIPStr, logicalIPStr, logicalPort, "", externalIDs) +} + +// BuildDNATAndSNAT builds a logical router DNAT/SNAT +func BuildDNATAndSNAT( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalMac string, + externalIDs map[string]string, +) *nbdb.NAT { + externalIPStr := "" + if externalIP != nil { + externalIPStr = externalIP.String() + } + logicalIPStr := "" + if logicalIP != nil { + logicalIPStr = logicalIP.IP.String() + } + return buildNAT( + nbdb.NATTypeDNATAndSNAT, + externalIPStr, + logicalIPStr, + logicalPort, + externalMac, + externalIDs) +} + +// isEquivalentNAT if it has same uuid. Otherwise, check if types match. +// ExternalIP must be unique amonst non-SNATs; +// LogicalIP must be unique amonst SNATs; +// If provided, LogicalPort is expected to match; +func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { + // Simple case: uuid was provided. + if searched.UUID != "" && existing.UUID == searched.UUID { + return true + } + + if searched.Type != existing.Type { + return false + } + + // Compre externalIP if its not empty. + if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { + return false + } + + // Compare logicalIP only for SNAT, since DNAT types must have unique ExternalIP. + if searched.Type == nbdb.NATTypeSNAT && searched.LogicalIP != existing.LogicalIP { + return false + } + + // When searching based on logicalPort, no need to go any further. + if searched.LogicalPort != nil && + (existing.LogicalPort == nil || *searched.LogicalPort != *existing.LogicalPort) { + return false + } + + // When searched external ids is populated, check if provided key,value exist in existing row. + // A usage case is when doing NAT operations where external id "name" is provided. + for externalIdKey, externalIdValue := range searched.ExternalIDs { + if foundValue, found := existing.ExternalIDs[externalIdKey]; !found || foundValue != externalIdValue { + return false + } + } + + return true +} + +type natPredicate func(*nbdb.NAT) bool + +// GetNAT looks up an NAT from the cache +func GetNAT(nbClient libovsdbclient.Client, nat *nbdb.NAT) (*nbdb.NAT, error) { + found := []*nbdb.NAT{} + opModel := operationModel{ + Model: nat, + ModelPredicate: func(item *nbdb.NAT) bool { return isEquivalentNAT(item, nat) }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// FindNATsWithPredicate looks up NATs from the cache based on a given predicate +func FindNATsWithPredicate(nbClient libovsdbclient.Client, predicate natPredicate) ([]*nbdb.NAT, error) { + nats := []*nbdb.NAT{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(predicate).List(ctx, &nats) + return nats, err +} + +// GetRouterNATs looks up NATs associated to the provided logical router from +// the cache +func GetRouterNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) ([]*nbdb.NAT, error) { + r, err := GetLogicalRouter(nbClient, router) + if err != nil { + return nil, fmt.Errorf("failed to get router: %s, error: %w", router.Name, err) + } + + nats := []*nbdb.NAT{} + for _, uuid := range r.Nat { + nat, err := GetNAT(nbClient, &nbdb.NAT{UUID: uuid}) + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + if err != nil { + return nil, fmt.Errorf("failed to lookup NAT entry with uuid: %s, error: %w", uuid, err) + } + nats = append(nats, nat) + } + + return nats, nil +} + +// CreateOrUpdateNATsOps creates or updates the provided NATs, adds them to +// the provided logical router and returns the corresponding ops +func CreateOrUpdateNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) { + routerNats, err := GetRouterNATs(nbClient, router) + if err != nil { + return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err) + } + + originalNats := router.Nat + router.Nat = make([]string, 0, len(nats)) + opModels := make([]operationModel, 0, len(nats)+1) + for i := range nats { + inputNat := nats[i] + for _, routerNat := range routerNats { + if isEquivalentNAT(routerNat, inputNat) { + inputNat.UUID = routerNat.UUID + break + } + } + opModel := operationModel{ + Model: inputNat, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { router.Nat = append(router.Nat, inputNat.UUID) }, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err = m.CreateOrUpdateOps(ops, opModels...) + router.Nat = originalNats + return ops, err +} + +// CreateOrUpdateNATs creates or updates the provided NATs and adds them to +// the provided logical router +func CreateOrUpdateNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error { + ops, err := CreateOrUpdateNATsOps(nbClient, nil, router, nats...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNATsOps deletes the provided NATs, removes them from the provided +// logical router and returns the corresponding ops +func DeleteNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) { + routerNats, err := GetRouterNATs(nbClient, router) + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + if err != nil { + return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err) + } + + originalNats := router.Nat + router.Nat = make([]string, 0, len(nats)) + opModels := make([]operationModel, 0, len(routerNats)+1) + for _, routerNat := range routerNats { + for _, inputNat := range nats { + if isEquivalentNAT(routerNat, inputNat) { + router.Nat = append(router.Nat, routerNat.UUID) + opModel := operationModel{ + Model: routerNat, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + break + } + } + } + if len(router.Nat) == 0 { + return ops, nil + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err = m.DeleteOps(ops, opModels...) + router.Nat = originalNats + return ops, err +} + +// DeleteNATs deletes the provided NATs and removes them from the provided +// logical router +func DeleteNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error { + ops, err := DeleteNATsOps(nbClient, nil, router, nats...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNATsWithPredicateOps looks up NATs from the cache based on a given +// predicate, deletes them, removes them from associated logical routers and +// returns the corresponding ops +func DeleteNATsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p natPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.NAT{} + router := &nbdb.LogicalRouter{} + natUUIDs := sets.Set[string]{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { + router.Nat = extractUUIDsFromModels(&deleted) + natUUIDs.Insert(router.Nat...) + }, + BulkOp: true, + ErrNotFound: false, + }, + { + Model: router, + ModelPredicate: func(lr *nbdb.LogicalRouter) bool { return natUUIDs.HasAny(lr.Nat...) }, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: false, + BulkOp: true, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go new file mode 100644 index 000000000..7f4f527d1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go @@ -0,0 +1,221 @@ +package ops + +import ( + "golang.org/x/net/context" + "hash/fnv" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +func CreateOrUpdateSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func UpdateSampleCollectorExternalIDs(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: []interface{}{&collector.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func DeleteSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + ErrNotFound: false, + BulkOp: false, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func DeleteSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SampleCollector) bool) error { + opModel := operationModel{ + Model: &nbdb.SampleCollector{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(*nbdb.SampleCollector) bool) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.WhereCache(p).List(ctx, &collectors) + return collectors, err +} + +func ListSampleCollectors(nbClient libovsdbclient.Client) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.List(ctx, &collectors) + return collectors, err +} + +func CreateOrUpdateSamplingAppsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingApps ...*nbdb.SamplingApp) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(samplingApps)) + for i := range samplingApps { + // can't use i in the predicate, for loop replaces it in-memory + samplingApp := samplingApps[i] + opModel := operationModel{ + Model: samplingApp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func DeleteSamplingAppsWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SamplingApp) bool) error { + opModel := operationModel{ + Model: &nbdb.SamplingApp{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSample(nbClient libovsdbclient.Client, sampleMetadata int) (*nbdb.Sample, error) { + sample := &nbdb.Sample{ + Metadata: sampleMetadata, + } + return GetSample(nbClient, sample) +} + +func GetSample(nbClient libovsdbclient.Client, sample *nbdb.Sample) (*nbdb.Sample, error) { + found := []*nbdb.Sample{} + opModel := operationModel{ + Model: sample, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + modelClient := newModelClient(nbClient) + err := modelClient.Lookup(opModel) + if err != nil { + return nil, err + } + return found[0], err +} + +type SampleFeature = string + +const ( + EgressFirewallSample SampleFeature = "EgressFirewall" + NetworkPolicySample SampleFeature = "NetworkPolicy" + AdminNetworkPolicySample SampleFeature = "AdminNetworkPolicy" + MulticastSample SampleFeature = "Multicast" + UDNIsolationSample SampleFeature = "UDNIsolation" +) + +// SamplingConfig is used to configure sampling for different db objects. +type SamplingConfig struct { + featureCollectors map[SampleFeature][]string +} + +func NewSamplingConfig(featureCollectors map[SampleFeature][]string) *SamplingConfig { + return &SamplingConfig{ + featureCollectors: featureCollectors, + } +} + +func addSample(c *SamplingConfig, opModels []operationModel, model model.Model) []operationModel { + switch t := model.(type) { + case *nbdb.ACL: + return createOrUpdateSampleForACL(opModels, c, t) + } + return opModels +} + +// createOrUpdateSampleForACL should be called before acl operationModel is appended to opModels. +func createOrUpdateSampleForACL(opModels []operationModel, c *SamplingConfig, acl *nbdb.ACL) []operationModel { + if c == nil { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + collectors := c.featureCollectors[getACLSampleFeature(acl)] + if len(collectors) == 0 { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + aclID := GetACLSampleID(acl) + sample := &nbdb.Sample{ + Collectors: collectors, + // 32 bits + Metadata: int(aclID), + } + opModel := operationModel{ + Model: sample, + DoAfter: func() { + acl.SampleEst = &sample.UUID + acl.SampleNew = &sample.UUID + }, + OnModelUpdates: []interface{}{&sample.Collectors}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + return opModels +} + +func GetACLSampleID(acl *nbdb.ACL) uint32 { + // primaryID is unique for each ACL, but established connections will keep sampleID that is set on + // connection creation. Here is the situation we want to avoid: + // 1. ACL1 is created with sampleID=1 (e.g. based on ANP namespace+name+...+rule index with action Allow) + // 2. connection A is established with sampleID=1, sample is decoded to say "Allowed by ANP namespace+name" + // 3. ACL1 is updated with sampleID=1 (e.g. now same rule in ANP says Deny, but PrimaryIDKey is the same) + // 4. connection A still generates samples with sampleID=1, but now it is "Denied by ANP namespace+name" + // In reality, connection A is still allowed, as existing connections are not affected by ANP updates. + // To avoid this, we encode Match and Action to the sampleID, to ensure a new sampleID is assigned on Match or action change. + // In that case stale sampleIDs will just report messages like "sampling for this connection was updated or deleted". + primaryID := acl.ExternalIDs[PrimaryIDKey.String()] + acl.Match + acl.Action + h := fnv.New32a() + h.Write([]byte(primaryID)) + return h.Sum32() +} + +func getACLSampleFeature(acl *nbdb.ACL) SampleFeature { + switch acl.ExternalIDs[OwnerTypeKey.String()] { + case AdminNetworkPolicyOwnerType, BaselineAdminNetworkPolicyOwnerType: + return AdminNetworkPolicySample + case MulticastNamespaceOwnerType, MulticastClusterOwnerType: + return MulticastSample + case NetpolNodeOwnerType, NetworkPolicyOwnerType, NetpolNamespaceOwnerType: + return NetworkPolicySample + case EgressFirewallOwnerType: + return EgressFirewallSample + case UDNIsolationOwnerType: + return UDNIsolationSample + } + return "" +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go new file mode 100644 index 000000000..1c5dde339 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go @@ -0,0 +1,27 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// GetNBGlobal looks up the SB Global entry from the cache +func GetSBGlobal(sbClient libovsdbclient.Client, sbGlobal *sbdb.SBGlobal) (*sbdb.SBGlobal, error) { + found := []*sbdb.SBGlobal{} + opModel := operationModel{ + Model: sbGlobal, + ModelPredicate: func(item *sbdb.SBGlobal) bool { return true }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go new file mode 100644 index 000000000..fbee34b55 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go @@ -0,0 +1,481 @@ +package ops + +import ( + "context" + "errors" + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// LOGICAL_SWITCH OPs + +type switchPredicate func(*nbdb.LogicalSwitch) bool + +// FindLogicalSwitchesWithPredicate looks up logical switches from the cache +// based on a given predicate +func FindLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate) ([]*nbdb.LogicalSwitch, error) { + found := []*nbdb.LogicalSwitch{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalSwitch looks up a logical switch from the cache +func GetLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) (*nbdb.LogicalSwitch, error) { + found := []*nbdb.LogicalSwitch{} + opModel := operationModel{ + Model: sw, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalSwitch creates or updates the provided logical switch +func CreateOrUpdateLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, fields ...interface{}) error { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + opModel := operationModel{ + Model: sw, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// UpdateLogicalSwitchSetExternalIDs updates the external IDs on the provided logical +// switch. Empty values means the corresponding keys are to be deleted. +func UpdateLogicalSwitchSetExternalIDs(nbClient libovsdbclient.Client, logicalSwitch *nbdb.LogicalSwitch) error { + externalIds := logicalSwitch.ExternalIDs + logicalSwitch, err := GetLogicalSwitch(nbClient, logicalSwitch) + if err != nil { + return err + } + + if logicalSwitch.ExternalIDs == nil { + logicalSwitch.ExternalIDs = map[string]string{} + } + + for k, v := range externalIds { + if v == "" { + delete(logicalSwitch.ExternalIDs, k) + } else { + logicalSwitch.ExternalIDs[k] = v + } + } + + opModel := operationModel{ + Model: logicalSwitch, + OnModelUpdates: []interface{}{&logicalSwitch.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +type logicalSwitchPredicate func(*nbdb.LogicalSwitch) bool + +// DeleteLogicalSwitchesWithPredicateOps returns the operations to delete the logical switches matching the provided predicate +func DeleteLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p logicalSwitchPredicate) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalSwitch{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalSwitchOps returns the operations to delete the provided logical switch +func DeleteLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + swName string) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalSwitch{Name: swName}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalSwitch deletes the provided logical switch +func DeleteLogicalSwitch(nbClient libovsdbclient.Client, swName string) error { + ops, err := DeleteLogicalSwitchOps(nbClient, nil, swName) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// LB ops + +// AddLoadBalancersToLogicalSwitchOps adds the provided load balancers to the +// provided logical switch and returns the corresponding ops +func AddLoadBalancersToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + sw.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModel) +} + +// RemoveLoadBalancersFromLogicalSwitchOps removes the provided load balancers from the +// provided logical switch and returns the corresponding ops +func RemoveLoadBalancersFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + sw.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.LoadBalancer}, + // if we want to delete loadbalancer from the switch that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModel) +} + +// ACL ops + +// AddACLsToLogicalSwitchOps adds the provided ACLs to the provided logical +// switch and returns the corresponding ops +func AddACLsToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + sw.ACLs = append(sw.ACLs, acl.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels) +} + +// RemoveACLsFromLogicalSwitchesWithPredicateOps looks up logical switches from the cache +// based on a given predicate, removes from them the provided ACLs, and returns the +// corresponding ops +func RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p switchPredicate, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + sw := nbdb.LogicalSwitch{ + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + sw.ACLs = append(sw.ACLs, acl.UUID) + } + opModel := operationModel{ + Model: &sw, + ModelPredicate: p, + OnModelMutations: []interface{}{&sw.ACLs}, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// RemoveACLsFromLogicalSwitchesWithPredicate looks up logical switches from the cache +// based on a given predicate and removes from them the provided ACLs +func RemoveACLsFromLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate, acls ...*nbdb.ACL) error { + ops, err := RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient, nil, p, acls...) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// UpdateLogicalSwitchSetOtherConfig sets other config on the provided logical +// switch adding any missing, removing the ones set to an empty value and +// updating existing +func UpdateLogicalSwitchSetOtherConfig(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) error { + otherConfig := sw.OtherConfig + sw, err := GetLogicalSwitch(nbClient, sw) + if err != nil { + return err + } + + if sw.OtherConfig == nil { + sw.OtherConfig = map[string]string{} + } + + for k, v := range otherConfig { + if v == "" { + delete(sw.OtherConfig, k) + } else { + sw.OtherConfig[k] = v + } + } + + opModel := operationModel{ + Model: sw, + OnModelUpdates: []interface{}{&sw.OtherConfig}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// LOGICAL SWITCH PORT OPs + +// GetLogicalSwitchPort looks up a logical switch port from the cache +func GetLogicalSwitchPort(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) (*nbdb.LogicalSwitchPort, error) { + found := []*nbdb.LogicalSwitchPort{} + opModel := operationModel{ + Model: lsp, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +func createOrUpdateLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + originalPorts := sw.Ports + sw.Ports = make([]string, 0, len(lsps)) + opModels := make([]operationModel, 0, len(lsps)+1) + for i := range lsps { + lsp := lsps[i] + opModel := operationModel{ + Model: lsp, + OnModelUpdates: getAllUpdatableFields(lsp), + DoAfter: func() { sw.Ports = append(sw.Ports, lsp.UUID) }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: !createSwitch, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModels...) + sw.Ports = originalPorts + return ops, err +} + +func createOrUpdateLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) error { + ops, err := createOrUpdateLogicalSwitchPortsOps(nbClient, nil, sw, createSwitch, lsps...) + if err != nil { + return err + } + + _, err = TransactAndCheckAndSetUUIDs(nbClient, lsps, ops) + return err +} + +// CreateOrUpdateLogicalSwitchPortsOnSwitchOps creates or updates the provided +// logical switch ports, adds them to the provided logical switch and returns +// the corresponding ops +func CreateOrUpdateLogicalSwitchPortsOnSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, lsps...) +} + +// CreateOrUpdateLogicalSwitchPortsOnSwitch creates or updates the provided +// logical switch ports and adds them to the provided logical switch +func CreateOrUpdateLogicalSwitchPortsOnSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + return createOrUpdateLogicalSwitchPorts(nbClient, sw, false, lsps...) +} + +// CreateOrUpdateLogicalSwitchPortsAndSwitch creates or updates the provided +// logical switch ports and adds them to the provided logical switch creating it +// if it does not exist +func CreateOrUpdateLogicalSwitchPortsAndSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + return createOrUpdateLogicalSwitchPorts(nbClient, sw, true, lsps...) +} + +// DeleteLogicalSwitchPortsOps deletes the provided logical switch ports, removes +// them from the provided logical switch and returns the corresponding ops +func DeleteLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + originalPorts := sw.Ports + sw.Ports = make([]string, 0, len(lsps)) + opModels := make([]operationModel, 0, len(lsps)+1) + for i := range lsps { + lsp := lsps[i] + opModel := operationModel{ + Model: lsp, + DoAfter: func() { + if lsp.UUID != "" { + sw.Ports = append(sw.Ports, lsp.UUID) + } + }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModels...) + sw.Ports = originalPorts + return ops, err +} + +// DeleteLogicalSwitchPorts deletes the provided logical switch ports and +// removes them from the provided logical switch +func DeleteLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + ops, err := DeleteLogicalSwitchPortsOps(nbClient, nil, sw, lsps...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +type logicalSwitchPortPredicate func(*nbdb.LogicalSwitchPort) bool + +// DeleteLogicalSwitchPortsWithPredicateOps looks up logical switch ports from +// the cache based on a given predicate and removes from them the provided +// logical switch +func DeleteLogicalSwitchPortsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, p logicalSwitchPortPredicate) ([]libovsdb.Operation, error) { + swName := sw.Name + sw, err := GetLogicalSwitch(nbClient, sw) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + return nil, fmt.Errorf("error retrieving logical switch %s from libovsdb cache: %w", swName, err) + } + + var lsps []*nbdb.LogicalSwitchPort + for _, port := range sw.Ports { + lsp := &nbdb.LogicalSwitchPort{UUID: port} + lsp, err = GetLogicalSwitchPort(nbClient, lsp) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + return nil, fmt.Errorf("error retrieving logical switch port with UUID %s associated with logical"+ + " switch %s from libovsdb cache: %w", port, swName, err) + } + if p(lsp) { + lsps = append(lsps, lsp) + } + } + + opModels := make([]operationModel, 0, len(lsps)+1) + sw.Ports = make([]string, 0, len(lsps)) + for _, lsp := range lsps { + sw.Ports = append(sw.Ports, lsp.UUID) + opModel := operationModel{ + Model: lsp, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// UpdateLogicalSwitchPortSetOptions sets options on the provided logical switch +// port adding any missing, removing the ones set to an empty value and updating +// existing +func UpdateLogicalSwitchPortSetOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) error { + options := lsp.Options + lsp, err := GetLogicalSwitchPort(nbClient, lsp) + if err != nil { + return err + } + + if lsp.Options == nil { + lsp.Options = map[string]string{} + } + + for k, v := range options { + if v == "" { + delete(lsp.Options, k) + } else { + lsp.Options[k] = v + } + } + + opModel := operationModel{ + // For LSP's Name is a valid index, so no predicate is needed + Model: lsp, + OnModelUpdates: []interface{}{&lsp.Options}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go new file mode 100644 index 000000000..4672a5c0e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go @@ -0,0 +1,112 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type chassisTemplateVarPredicate func(*nbdb.ChassisTemplateVar) bool + +// ListTemplateVar looks up all chassis template variables. +func ListTemplateVar(nbClient libovsdbclient.Client) ([]*nbdb.ChassisTemplateVar, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + + templatesList := []*nbdb.ChassisTemplateVar{} + err := nbClient.List(ctx, &templatesList) + return templatesList, err +} + +// CreateOrUpdateChassisTemplateVarOps creates or updates the provided +// 'template' variable and returns the corresponding ops. +func CreateOrUpdateChassisTemplateVarOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) { + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, operationModel{ + Model: template, + OnModelMutations: []interface{}{&template.Variables}, + ErrNotFound: false, + BulkOp: false, + }) +} + +// deleteChassisTemplateVarVariablesOps removes the variables listed as +// keys of 'template.Variables' and returns the corresponding ops. +// It applies the mutation to all records that are selected by 'predicate'. +func deleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar, + predicate chassisTemplateVarPredicate) ([]libovsdb.Operation, error) { + + deleteTemplate := &nbdb.ChassisTemplateVar{ + Chassis: template.Chassis, + Variables: map[string]string{}, + } + for name := range template.Variables { + deleteTemplate.Variables[name] = "" + } + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, operationModel{ + Model: deleteTemplate, + ModelPredicate: predicate, + OnModelMutations: []interface{}{&deleteTemplate.Variables}, + ErrNotFound: false, + BulkOp: true, + }) +} + +// DeleteChassisTemplateVarVariablesOps removes all variables listed as +// keys of 'template.Variables' from the record matching the same chassis +// as 'template'. It returns the corresponding ops. +func DeleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) { + + return deleteChassisTemplateVarVariablesOps(nbClient, ops, template, nil) +} + +// DeleteAllChassisTemplateVarVariables removes the variables listed as +// in 'varNames' and commits the transaction to the database. It applies +// the mutation to all records that contain these variable names. +func DeleteAllChassisTemplateVarVariables(nbClient libovsdbclient.Client, varNames []string) error { + deleteTemplateVar := &nbdb.ChassisTemplateVar{ + Variables: make(map[string]string, len(varNames)), + } + for _, name := range varNames { + deleteTemplateVar.Variables[name] = "" + } + ops, err := deleteChassisTemplateVarVariablesOps(nbClient, nil, deleteTemplateVar, + func(item *nbdb.ChassisTemplateVar) bool { + for _, name := range varNames { + if _, found := item.Variables[name]; found { + return true + } + } + return false + }) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteChassisTemplateVar deletes all complete Chassis_Template_Var +// records matching 'templates'. +func DeleteChassisTemplateVar(nbClient libovsdbclient.Client, templates ...*nbdb.ChassisTemplateVar) error { + opModels := make([]operationModel, 0, len(templates)) + for i := range templates { + template := templates[i] + opModels = append(opModels, operationModel{ + Model: template, + ErrNotFound: false, + BulkOp: false, + }) + } + m := newModelClient(nbClient) + return m.Delete(opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go new file mode 100644 index 000000000..51fd09cce --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go @@ -0,0 +1,98 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" +) + +// TransactWithRetry will attempt a transaction several times if it receives an error indicating that the client +// was not connected when the transaction occurred. +func TransactWithRetry(ctx context.Context, c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + var results []ovsdb.OperationResult + resultErr := wait.PollUntilContextCancel(ctx, 200*time.Millisecond, true, func(ctx context.Context) (bool, error) { + var err error + results, err = c.Transact(ctx, ops...) + if err == nil { + return true, nil + } + if err != nil && errors.Is(err, client.ErrNotConnected) { + klog.V(5).Infof("Unable to execute transaction: %+v. Client is disconnected, will retry...", ops) + return false, nil + } + return false, err + }) + return results, resultErr +} + +func TransactAndCheck(c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + if len(ops) <= 0 { + return []ovsdb.OperationResult{{}}, nil + } + + klog.V(5).Infof("Configuring OVN: %+v", ops) + + ctx, cancel := context.WithTimeout(context.TODO(), config.Default.OVSDBTxnTimeout) + defer cancel() + + results, err := TransactWithRetry(ctx, c, ops) + if err != nil { + return nil, fmt.Errorf("error in transact with ops %+v: %v", ops, err) + } + + opErrors, err := ovsdb.CheckOperationResults(results, ops) + if err != nil { + return nil, fmt.Errorf("error in transact with ops %+v results %+v and errors %+v: %v", ops, results, opErrors, err) + } + + return results, nil +} + +// TransactAndCheckAndSetUUIDs transacts the given ops against client and returns +// results if no error occurred or an error otherwise. It sets the real uuids for +// the passed models if they were inserted and have a named-uuid (as built by +// BuildNamedUUID) +func TransactAndCheckAndSetUUIDs(client client.Client, models interface{}, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + results, err := TransactAndCheck(client, ops) + if err != nil { + return nil, err + } + + namedModelMap := map[string]model.Model{} + _ = onModels(models, func(model interface{}) error { + uuid := getUUID(model) + if isNamedUUID(uuid) { + namedModelMap[uuid] = model + } + return nil + }) + + if len(namedModelMap) == 0 { + return results, nil + } + + for i, op := range ops { + if op.Op != ovsdb.OperationInsert { + continue + } + + if !isNamedUUID(op.UUIDName) { + continue + } + + if model, ok := namedModelMap[op.UUIDName]; ok { + setUUID(model, results[i].UUID.GoUUID) + } + } + + return results, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go new file mode 100644 index 000000000..0c2840c17 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go @@ -0,0 +1,303 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ACLTable = "ACL" + +type ( + ACLAction = string + ACLDirection = string + ACLSeverity = string +) + +var ( + ACLActionAllow ACLAction = "allow" + ACLActionAllowRelated ACLAction = "allow-related" + ACLActionAllowStateless ACLAction = "allow-stateless" + ACLActionDrop ACLAction = "drop" + ACLActionReject ACLAction = "reject" + ACLActionPass ACLAction = "pass" + ACLDirectionFromLport ACLDirection = "from-lport" + ACLDirectionToLport ACLDirection = "to-lport" + ACLSeverityAlert ACLSeverity = "alert" + ACLSeverityWarning ACLSeverity = "warning" + ACLSeverityNotice ACLSeverity = "notice" + ACLSeverityInfo ACLSeverity = "info" + ACLSeverityDebug ACLSeverity = "debug" +) + +// ACL defines an object in ACL table +type ACL struct { + UUID string `ovsdb:"_uuid"` + Action ACLAction `ovsdb:"action"` + Direction ACLDirection `ovsdb:"direction"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Label int `ovsdb:"label"` + Log bool `ovsdb:"log"` + Match string `ovsdb:"match"` + Meter *string `ovsdb:"meter"` + Name *string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` + SampleEst *string `ovsdb:"sample_est"` + SampleNew *string `ovsdb:"sample_new"` + Severity *ACLSeverity `ovsdb:"severity"` + Tier int `ovsdb:"tier"` +} + +func (a *ACL) GetUUID() string { + return a.UUID +} + +func (a *ACL) GetAction() ACLAction { + return a.Action +} + +func (a *ACL) GetDirection() ACLDirection { + return a.Direction +} + +func (a *ACL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyACLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalACLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ACL) GetLabel() int { + return a.Label +} + +func (a *ACL) GetLog() bool { + return a.Log +} + +func (a *ACL) GetMatch() string { + return a.Match +} + +func (a *ACL) GetMeter() *string { + return a.Meter +} + +func copyACLMeter(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLMeter(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetName() *string { + return a.Name +} + +func copyACLName(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLName(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetOptions() map[string]string { + return a.Options +} + +func copyACLOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalACLOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ACL) GetPriority() int { + return a.Priority +} + +func (a *ACL) GetSampleEst() *string { + return a.SampleEst +} + +func copyACLSampleEst(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleEst(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetSampleNew() *string { + return a.SampleNew +} + +func copyACLSampleNew(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleNew(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetSeverity() *ACLSeverity { + return a.Severity +} + +func copyACLSeverity(a *ACLSeverity) *ACLSeverity { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSeverity(a, b *ACLSeverity) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetTier() int { + return a.Tier +} + +func (a *ACL) DeepCopyInto(b *ACL) { + *b = *a + b.ExternalIDs = copyACLExternalIDs(a.ExternalIDs) + b.Meter = copyACLMeter(a.Meter) + b.Name = copyACLName(a.Name) + b.Options = copyACLOptions(a.Options) + b.SampleEst = copyACLSampleEst(a.SampleEst) + b.SampleNew = copyACLSampleNew(a.SampleNew) + b.Severity = copyACLSeverity(a.Severity) +} + +func (a *ACL) DeepCopy() *ACL { + b := new(ACL) + a.DeepCopyInto(b) + return b +} + +func (a *ACL) CloneModelInto(b model.Model) { + c := b.(*ACL) + a.DeepCopyInto(c) +} + +func (a *ACL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ACL) Equals(b *ACL) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.Direction == b.Direction && + equalACLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Label == b.Label && + a.Log == b.Log && + a.Match == b.Match && + equalACLMeter(a.Meter, b.Meter) && + equalACLName(a.Name, b.Name) && + equalACLOptions(a.Options, b.Options) && + a.Priority == b.Priority && + equalACLSampleEst(a.SampleEst, b.SampleEst) && + equalACLSampleNew(a.SampleNew, b.SampleNew) && + equalACLSeverity(a.Severity, b.Severity) && + a.Tier == b.Tier +} + +func (a *ACL) EqualsModel(b model.Model) bool { + c := b.(*ACL) + return a.Equals(c) +} + +var _ model.CloneableModel = &ACL{} +var _ model.ComparableModel = &ACL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go new file mode 100644 index 000000000..e8a836e2d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const AddressSetTable = "Address_Set" + +// AddressSet defines an object in Address_Set table +type AddressSet struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` +} + +func (a *AddressSet) GetUUID() string { + return a.UUID +} + +func (a *AddressSet) GetAddresses() []string { + return a.Addresses +} + +func copyAddressSetAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalAddressSetAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *AddressSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyAddressSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalAddressSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *AddressSet) GetName() string { + return a.Name +} + +func (a *AddressSet) DeepCopyInto(b *AddressSet) { + *b = *a + b.Addresses = copyAddressSetAddresses(a.Addresses) + b.ExternalIDs = copyAddressSetExternalIDs(a.ExternalIDs) +} + +func (a *AddressSet) DeepCopy() *AddressSet { + b := new(AddressSet) + a.DeepCopyInto(b) + return b +} + +func (a *AddressSet) CloneModelInto(b model.Model) { + c := b.(*AddressSet) + a.DeepCopyInto(c) +} + +func (a *AddressSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AddressSet) Equals(b *AddressSet) bool { + return a.UUID == b.UUID && + equalAddressSetAddresses(a.Addresses, b.Addresses) && + equalAddressSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name +} + +func (a *AddressSet) EqualsModel(b model.Model) bool { + c := b.(*AddressSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &AddressSet{} +var _ model.ComparableModel = &AddressSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go new file mode 100644 index 000000000..46646e81a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go @@ -0,0 +1,237 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const BFDTable = "BFD" + +type ( + BFDStatus = string +) + +var ( + BFDStatusDown BFDStatus = "down" + BFDStatusInit BFDStatus = "init" + BFDStatusUp BFDStatus = "up" + BFDStatusAdminDown BFDStatus = "admin_down" +) + +// BFD defines an object in BFD table +type BFD struct { + UUID string `ovsdb:"_uuid"` + DetectMult *int `ovsdb:"detect_mult"` + DstIP string `ovsdb:"dst_ip"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LogicalPort string `ovsdb:"logical_port"` + MinRx *int `ovsdb:"min_rx"` + MinTx *int `ovsdb:"min_tx"` + Options map[string]string `ovsdb:"options"` + Status *BFDStatus `ovsdb:"status"` +} + +func (a *BFD) GetUUID() string { + return a.UUID +} + +func (a *BFD) GetDetectMult() *int { + return a.DetectMult +} + +func copyBFDDetectMult(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDDetectMult(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetDstIP() string { + return a.DstIP +} + +func (a *BFD) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBFDExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *BFD) GetMinRx() *int { + return a.MinRx +} + +func copyBFDMinRx(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDMinRx(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetMinTx() *int { + return a.MinTx +} + +func copyBFDMinTx(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDMinTx(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetOptions() map[string]string { + return a.Options +} + +func copyBFDOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetStatus() *BFDStatus { + return a.Status +} + +func copyBFDStatus(a *BFDStatus) *BFDStatus { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDStatus(a, b *BFDStatus) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) DeepCopyInto(b *BFD) { + *b = *a + b.DetectMult = copyBFDDetectMult(a.DetectMult) + b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs) + b.MinRx = copyBFDMinRx(a.MinRx) + b.MinTx = copyBFDMinTx(a.MinTx) + b.Options = copyBFDOptions(a.Options) + b.Status = copyBFDStatus(a.Status) +} + +func (a *BFD) DeepCopy() *BFD { + b := new(BFD) + a.DeepCopyInto(b) + return b +} + +func (a *BFD) CloneModelInto(b model.Model) { + c := b.(*BFD) + a.DeepCopyInto(c) +} + +func (a *BFD) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *BFD) Equals(b *BFD) bool { + return a.UUID == b.UUID && + equalBFDDetectMult(a.DetectMult, b.DetectMult) && + a.DstIP == b.DstIP && + equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.LogicalPort == b.LogicalPort && + equalBFDMinRx(a.MinRx, b.MinRx) && + equalBFDMinTx(a.MinTx, b.MinTx) && + equalBFDOptions(a.Options, b.Options) && + equalBFDStatus(a.Status, b.Status) +} + +func (a *BFD) EqualsModel(b model.Model) bool { + c := b.(*BFD) + return a.Equals(c) +} + +var _ model.CloneableModel = &BFD{} +var _ model.ComparableModel = &BFD{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go new file mode 100644 index 000000000..602c3f522 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTemplateVarTable = "Chassis_Template_Var" + +// ChassisTemplateVar defines an object in Chassis_Template_Var table +type ChassisTemplateVar struct { + UUID string `ovsdb:"_uuid"` + Chassis string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Variables map[string]string `ovsdb:"variables"` +} + +func (a *ChassisTemplateVar) GetUUID() string { + return a.UUID +} + +func (a *ChassisTemplateVar) GetChassis() string { + return a.Chassis +} + +func (a *ChassisTemplateVar) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisTemplateVarExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) GetVariables() map[string]string { + return a.Variables +} + +func copyChassisTemplateVarVariables(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarVariables(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) { + *b = *a + b.ExternalIDs = copyChassisTemplateVarExternalIDs(a.ExternalIDs) + b.Variables = copyChassisTemplateVarVariables(a.Variables) +} + +func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar { + b := new(ChassisTemplateVar) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisTemplateVar) CloneModelInto(b model.Model) { + c := b.(*ChassisTemplateVar) + a.DeepCopyInto(c) +} + +func (a *ChassisTemplateVar) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool { + return a.UUID == b.UUID && + a.Chassis == b.Chassis && + equalChassisTemplateVarExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalChassisTemplateVarVariables(a.Variables, b.Variables) +} + +func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool { + c := b.(*ChassisTemplateVar) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisTemplateVar{} +var _ model.ComparableModel = &ChassisTemplateVar{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go new file mode 100644 index 000000000..baf6da344 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go @@ -0,0 +1,209 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ConnectionTable = "Connection" + +// Connection defines an object in Connection table +type Connection struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` +} + +func (a *Connection) GetUUID() string { + return a.UUID +} + +func (a *Connection) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyConnectionExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyConnectionInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Connection) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyConnectionMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyConnectionOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetStatus() map[string]string { + return a.Status +} + +func copyConnectionStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetTarget() string { + return a.Target +} + +func (a *Connection) DeepCopyInto(b *Connection) { + *b = *a + b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe) + b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig) + b.Status = copyConnectionStatus(a.Status) +} + +func (a *Connection) DeepCopy() *Connection { + b := new(Connection) + a.DeepCopyInto(b) + return b +} + +func (a *Connection) CloneModelInto(b model.Model) { + c := b.(*Connection) + a.DeepCopyInto(c) +} + +func (a *Connection) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Connection) Equals(b *Connection) bool { + return a.UUID == b.UUID && + equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) && + equalConnectionStatus(a.Status, b.Status) && + a.Target == b.Target +} + +func (a *Connection) EqualsModel(b model.Model) bool { + c := b.(*Connection) + return a.Equals(c) +} + +var _ model.CloneableModel = &Connection{} +var _ model.ComparableModel = &Connection{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go new file mode 100644 index 000000000..1e146b657 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const CoppTable = "Copp" + +// Copp defines an object in Copp table +type Copp struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Meters map[string]string `ovsdb:"meters"` + Name string `ovsdb:"name"` +} + +func (a *Copp) GetUUID() string { + return a.UUID +} + +func (a *Copp) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyCoppExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCoppExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Copp) GetMeters() map[string]string { + return a.Meters +} + +func copyCoppMeters(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCoppMeters(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Copp) GetName() string { + return a.Name +} + +func (a *Copp) DeepCopyInto(b *Copp) { + *b = *a + b.ExternalIDs = copyCoppExternalIDs(a.ExternalIDs) + b.Meters = copyCoppMeters(a.Meters) +} + +func (a *Copp) DeepCopy() *Copp { + b := new(Copp) + a.DeepCopyInto(b) + return b +} + +func (a *Copp) CloneModelInto(b model.Model) { + c := b.(*Copp) + a.DeepCopyInto(c) +} + +func (a *Copp) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Copp) Equals(b *Copp) bool { + return a.UUID == b.UUID && + equalCoppExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalCoppMeters(a.Meters, b.Meters) && + a.Name == b.Name +} + +func (a *Copp) EqualsModel(b model.Model) bool { + c := b.(*Copp) + return a.Equals(c) +} + +var _ model.CloneableModel = &Copp{} +var _ model.ComparableModel = &Copp{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go new file mode 100644 index 000000000..fd68ebee2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPOptionsTable = "DHCP_Options" + +// DHCPOptions defines an object in DHCP_Options table +type DHCPOptions struct { + UUID string `ovsdb:"_uuid"` + Cidr string `ovsdb:"cidr"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` +} + +func (a *DHCPOptions) GetUUID() string { + return a.UUID +} + +func (a *DHCPOptions) GetCidr() string { + return a.Cidr +} + +func (a *DHCPOptions) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDHCPOptionsExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPOptionsExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPOptions) GetOptions() map[string]string { + return a.Options +} + +func copyDHCPOptionsOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPOptionsOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) { + *b = *a + b.ExternalIDs = copyDHCPOptionsExternalIDs(a.ExternalIDs) + b.Options = copyDHCPOptionsOptions(a.Options) +} + +func (a *DHCPOptions) DeepCopy() *DHCPOptions { + b := new(DHCPOptions) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPOptions) CloneModelInto(b model.Model) { + c := b.(*DHCPOptions) + a.DeepCopyInto(c) +} + +func (a *DHCPOptions) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPOptions) Equals(b *DHCPOptions) bool { + return a.UUID == b.UUID && + a.Cidr == b.Cidr && + equalDHCPOptionsExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDHCPOptionsOptions(a.Options, b.Options) +} + +func (a *DHCPOptions) EqualsModel(b model.Model) bool { + c := b.(*DHCPOptions) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPOptions{} +var _ model.ComparableModel = &DHCPOptions{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go new file mode 100644 index 000000000..f0e973ab7 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go @@ -0,0 +1,145 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPRelayTable = "DHCP_Relay" + +// DHCPRelay defines an object in DHCP_Relay table +type DHCPRelay struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Servers *string `ovsdb:"servers"` +} + +func (a *DHCPRelay) GetUUID() string { + return a.UUID +} + +func (a *DHCPRelay) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDHCPRelayExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetName() string { + return a.Name +} + +func (a *DHCPRelay) GetOptions() map[string]string { + return a.Options +} + +func copyDHCPRelayOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetServers() *string { + return a.Servers +} + +func copyDHCPRelayServers(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDHCPRelayServers(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *DHCPRelay) DeepCopyInto(b *DHCPRelay) { + *b = *a + b.ExternalIDs = copyDHCPRelayExternalIDs(a.ExternalIDs) + b.Options = copyDHCPRelayOptions(a.Options) + b.Servers = copyDHCPRelayServers(a.Servers) +} + +func (a *DHCPRelay) DeepCopy() *DHCPRelay { + b := new(DHCPRelay) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPRelay) CloneModelInto(b model.Model) { + c := b.(*DHCPRelay) + a.DeepCopyInto(c) +} + +func (a *DHCPRelay) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPRelay) Equals(b *DHCPRelay) bool { + return a.UUID == b.UUID && + equalDHCPRelayExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalDHCPRelayOptions(a.Options, b.Options) && + equalDHCPRelayServers(a.Servers, b.Servers) +} + +func (a *DHCPRelay) EqualsModel(b model.Model) bool { + c := b.(*DHCPRelay) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPRelay{} +var _ model.ComparableModel = &DHCPRelay{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go new file mode 100644 index 000000000..285d5df28 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go @@ -0,0 +1,147 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DNSTable = "DNS" + +// DNS defines an object in DNS table +type DNS struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Records map[string]string `ovsdb:"records"` +} + +func (a *DNS) GetUUID() string { + return a.UUID +} + +func (a *DNS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDNSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetOptions() map[string]string { + return a.Options +} + +func copyDNSOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetRecords() map[string]string { + return a.Records +} + +func copyDNSRecords(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSRecords(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) DeepCopyInto(b *DNS) { + *b = *a + b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs) + b.Options = copyDNSOptions(a.Options) + b.Records = copyDNSRecords(a.Records) +} + +func (a *DNS) DeepCopy() *DNS { + b := new(DNS) + a.DeepCopyInto(b) + return b +} + +func (a *DNS) CloneModelInto(b model.Model) { + c := b.(*DNS) + a.DeepCopyInto(c) +} + +func (a *DNS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DNS) Equals(b *DNS) bool { + return a.UUID == b.UUID && + equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDNSOptions(a.Options, b.Options) && + equalDNSRecords(a.Records, b.Records) +} + +func (a *DNS) EqualsModel(b model.Model) bool { + c := b.(*DNS) + return a.Equals(c) +} + +var _ model.CloneableModel = &DNS{} +var _ model.ComparableModel = &DNS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go new file mode 100644 index 000000000..1a0657559 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go @@ -0,0 +1,136 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ForwardingGroupTable = "Forwarding_Group" + +// ForwardingGroup defines an object in Forwarding_Group table +type ForwardingGroup struct { + UUID string `ovsdb:"_uuid"` + ChildPort []string `ovsdb:"child_port"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Liveness bool `ovsdb:"liveness"` + Name string `ovsdb:"name"` + Vip string `ovsdb:"vip"` + Vmac string `ovsdb:"vmac"` +} + +func (a *ForwardingGroup) GetUUID() string { + return a.UUID +} + +func (a *ForwardingGroup) GetChildPort() []string { + return a.ChildPort +} + +func copyForwardingGroupChildPort(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalForwardingGroupChildPort(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *ForwardingGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyForwardingGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalForwardingGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ForwardingGroup) GetLiveness() bool { + return a.Liveness +} + +func (a *ForwardingGroup) GetName() string { + return a.Name +} + +func (a *ForwardingGroup) GetVip() string { + return a.Vip +} + +func (a *ForwardingGroup) GetVmac() string { + return a.Vmac +} + +func (a *ForwardingGroup) DeepCopyInto(b *ForwardingGroup) { + *b = *a + b.ChildPort = copyForwardingGroupChildPort(a.ChildPort) + b.ExternalIDs = copyForwardingGroupExternalIDs(a.ExternalIDs) +} + +func (a *ForwardingGroup) DeepCopy() *ForwardingGroup { + b := new(ForwardingGroup) + a.DeepCopyInto(b) + return b +} + +func (a *ForwardingGroup) CloneModelInto(b model.Model) { + c := b.(*ForwardingGroup) + a.DeepCopyInto(c) +} + +func (a *ForwardingGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ForwardingGroup) Equals(b *ForwardingGroup) bool { + return a.UUID == b.UUID && + equalForwardingGroupChildPort(a.ChildPort, b.ChildPort) && + equalForwardingGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Liveness == b.Liveness && + a.Name == b.Name && + a.Vip == b.Vip && + a.Vmac == b.Vmac +} + +func (a *ForwardingGroup) EqualsModel(b model.Model) bool { + c := b.(*ForwardingGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &ForwardingGroup{} +var _ model.ComparableModel = &ForwardingGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go new file mode 100644 index 000000000..15935847b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go @@ -0,0 +1,132 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const GatewayChassisTable = "Gateway_Chassis" + +// GatewayChassis defines an object in Gateway_Chassis table +type GatewayChassis struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *GatewayChassis) GetUUID() string { + return a.UUID +} + +func (a *GatewayChassis) GetChassisName() string { + return a.ChassisName +} + +func (a *GatewayChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyGatewayChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetName() string { + return a.Name +} + +func (a *GatewayChassis) GetOptions() map[string]string { + return a.Options +} + +func copyGatewayChassisOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetPriority() int { + return a.Priority +} + +func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) { + *b = *a + b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs) + b.Options = copyGatewayChassisOptions(a.Options) +} + +func (a *GatewayChassis) DeepCopy() *GatewayChassis { + b := new(GatewayChassis) + a.DeepCopyInto(b) + return b +} + +func (a *GatewayChassis) CloneModelInto(b model.Model) { + c := b.(*GatewayChassis) + a.DeepCopyInto(c) +} + +func (a *GatewayChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *GatewayChassis) Equals(b *GatewayChassis) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalGatewayChassisOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *GatewayChassis) EqualsModel(b model.Model) bool { + c := b.(*GatewayChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &GatewayChassis{} +var _ model.ComparableModel = &GatewayChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go new file mode 100644 index 000000000..67f8a84f1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go @@ -0,0 +1,3 @@ +package nbdb + +//go:generate modelgen --extended -p nbdb -o . ovn-nb.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go new file mode 100644 index 000000000..dc09d1ec9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go @@ -0,0 +1,93 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisTable = "HA_Chassis" + +// HAChassis defines an object in HA_Chassis table +type HAChassis struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Priority int `ovsdb:"priority"` +} + +func (a *HAChassis) GetUUID() string { + return a.UUID +} + +func (a *HAChassis) GetChassisName() string { + return a.ChassisName +} + +func (a *HAChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassis) GetPriority() int { + return a.Priority +} + +func (a *HAChassis) DeepCopyInto(b *HAChassis) { + *b = *a + b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs) +} + +func (a *HAChassis) DeepCopy() *HAChassis { + b := new(HAChassis) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassis) CloneModelInto(b model.Model) { + c := b.(*HAChassis) + a.DeepCopyInto(c) +} + +func (a *HAChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassis) Equals(b *HAChassis) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Priority == b.Priority +} + +func (a *HAChassis) EqualsModel(b model.Model) bool { + c := b.(*HAChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassis{} +var _ model.ComparableModel = &HAChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go new file mode 100644 index 000000000..bdda95aaf --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisGroupTable = "HA_Chassis_Group" + +// HAChassisGroup defines an object in HA_Chassis_Group table +type HAChassisGroup struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassis []string `ovsdb:"ha_chassis"` + Name string `ovsdb:"name"` +} + +func (a *HAChassisGroup) GetUUID() string { + return a.UUID +} + +func (a *HAChassisGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetHaChassis() []string { + return a.HaChassis +} + +func copyHAChassisGroupHaChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupHaChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetName() string { + return a.Name +} + +func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) { + *b = *a + b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs) + b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis) +} + +func (a *HAChassisGroup) DeepCopy() *HAChassisGroup { + b := new(HAChassisGroup) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassisGroup) CloneModelInto(b model.Model) { + c := b.(*HAChassisGroup) + a.DeepCopyInto(c) +} + +func (a *HAChassisGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool { + return a.UUID == b.UUID && + equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) && + a.Name == b.Name +} + +func (a *HAChassisGroup) EqualsModel(b model.Model) bool { + c := b.(*HAChassisGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassisGroup{} +var _ model.ComparableModel = &HAChassisGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go new file mode 100644 index 000000000..03bcd7601 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go @@ -0,0 +1,290 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerTable = "Load_Balancer" + +type ( + LoadBalancerProtocol = string + LoadBalancerSelectionFields = string +) + +var ( + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" + LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" + LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" + LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" + LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" + LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" + LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" +) + +// LoadBalancer defines an object in Load_Balancer table +type LoadBalancer struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HealthCheck []string `ovsdb:"health_check"` + IPPortMappings map[string]string `ovsdb:"ip_port_mappings"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Protocol *LoadBalancerProtocol `ovsdb:"protocol"` + SelectionFields []LoadBalancerSelectionFields `ovsdb:"selection_fields"` + Vips map[string]string `ovsdb:"vips"` +} + +func (a *LoadBalancer) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancer) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetHealthCheck() []string { + return a.HealthCheck +} + +func copyLoadBalancerHealthCheck(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerHealthCheck(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetIPPortMappings() map[string]string { + return a.IPPortMappings +} + +func copyLoadBalancerIPPortMappings(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerIPPortMappings(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetName() string { + return a.Name +} + +func (a *LoadBalancer) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol { + return a.Protocol +} + +func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetSelectionFields() []LoadBalancerSelectionFields { + return a.SelectionFields +} + +func copyLoadBalancerSelectionFields(a []LoadBalancerSelectionFields) []LoadBalancerSelectionFields { + if a == nil { + return nil + } + b := make([]LoadBalancerSelectionFields, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerSelectionFields(a, b []LoadBalancerSelectionFields) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetVips() map[string]string { + return a.Vips +} + +func copyLoadBalancerVips(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerVips(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) { + *b = *a + b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs) + b.HealthCheck = copyLoadBalancerHealthCheck(a.HealthCheck) + b.IPPortMappings = copyLoadBalancerIPPortMappings(a.IPPortMappings) + b.Options = copyLoadBalancerOptions(a.Options) + b.Protocol = copyLoadBalancerProtocol(a.Protocol) + b.SelectionFields = copyLoadBalancerSelectionFields(a.SelectionFields) + b.Vips = copyLoadBalancerVips(a.Vips) +} + +func (a *LoadBalancer) DeepCopy() *LoadBalancer { + b := new(LoadBalancer) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancer) CloneModelInto(b model.Model) { + c := b.(*LoadBalancer) + a.DeepCopyInto(c) +} + +func (a *LoadBalancer) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancer) Equals(b *LoadBalancer) bool { + return a.UUID == b.UUID && + equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerHealthCheck(a.HealthCheck, b.HealthCheck) && + equalLoadBalancerIPPortMappings(a.IPPortMappings, b.IPPortMappings) && + a.Name == b.Name && + equalLoadBalancerOptions(a.Options, b.Options) && + equalLoadBalancerProtocol(a.Protocol, b.Protocol) && + equalLoadBalancerSelectionFields(a.SelectionFields, b.SelectionFields) && + equalLoadBalancerVips(a.Vips, b.Vips) +} + +func (a *LoadBalancer) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancer) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancer{} +var _ model.ComparableModel = &LoadBalancer{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go new file mode 100644 index 000000000..775924967 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerGroupTable = "Load_Balancer_Group" + +// LoadBalancerGroup defines an object in Load_Balancer_Group table +type LoadBalancerGroup struct { + UUID string `ovsdb:"_uuid"` + LoadBalancer []string `ovsdb:"load_balancer"` + Name string `ovsdb:"name"` +} + +func (a *LoadBalancerGroup) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancerGroup) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLoadBalancerGroupLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerGroupLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancerGroup) GetName() string { + return a.Name +} + +func (a *LoadBalancerGroup) DeepCopyInto(b *LoadBalancerGroup) { + *b = *a + b.LoadBalancer = copyLoadBalancerGroupLoadBalancer(a.LoadBalancer) +} + +func (a *LoadBalancerGroup) DeepCopy() *LoadBalancerGroup { + b := new(LoadBalancerGroup) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancerGroup) CloneModelInto(b model.Model) { + c := b.(*LoadBalancerGroup) + a.DeepCopyInto(c) +} + +func (a *LoadBalancerGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancerGroup) Equals(b *LoadBalancerGroup) bool { + return a.UUID == b.UUID && + equalLoadBalancerGroupLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + a.Name == b.Name +} + +func (a *LoadBalancerGroup) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancerGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancerGroup{} +var _ model.ComparableModel = &LoadBalancerGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go new file mode 100644 index 000000000..c8163fa00 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerHealthCheckTable = "Load_Balancer_Health_Check" + +// LoadBalancerHealthCheck defines an object in Load_Balancer_Health_Check table +type LoadBalancerHealthCheck struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Vip string `ovsdb:"vip"` +} + +func (a *LoadBalancerHealthCheck) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancerHealthCheck) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerHealthCheckExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerHealthCheckExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancerHealthCheck) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerHealthCheckOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerHealthCheckOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancerHealthCheck) GetVip() string { + return a.Vip +} + +func (a *LoadBalancerHealthCheck) DeepCopyInto(b *LoadBalancerHealthCheck) { + *b = *a + b.ExternalIDs = copyLoadBalancerHealthCheckExternalIDs(a.ExternalIDs) + b.Options = copyLoadBalancerHealthCheckOptions(a.Options) +} + +func (a *LoadBalancerHealthCheck) DeepCopy() *LoadBalancerHealthCheck { + b := new(LoadBalancerHealthCheck) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancerHealthCheck) CloneModelInto(b model.Model) { + c := b.(*LoadBalancerHealthCheck) + a.DeepCopyInto(c) +} + +func (a *LoadBalancerHealthCheck) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancerHealthCheck) Equals(b *LoadBalancerHealthCheck) bool { + return a.UUID == b.UUID && + equalLoadBalancerHealthCheckExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerHealthCheckOptions(a.Options, b.Options) && + a.Vip == b.Vip +} + +func (a *LoadBalancerHealthCheck) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancerHealthCheck) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancerHealthCheck{} +var _ model.ComparableModel = &LoadBalancerHealthCheck{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go new file mode 100644 index 000000000..81c5efaf9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go @@ -0,0 +1,356 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterTable = "Logical_Router" + +// LogicalRouter defines an object in Logical_Router table +type LogicalRouter struct { + UUID string `ovsdb:"_uuid"` + Copp *string `ovsdb:"copp"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LoadBalancer []string `ovsdb:"load_balancer"` + LoadBalancerGroup []string `ovsdb:"load_balancer_group"` + Name string `ovsdb:"name"` + Nat []string `ovsdb:"nat"` + Options map[string]string `ovsdb:"options"` + Policies []string `ovsdb:"policies"` + Ports []string `ovsdb:"ports"` + StaticRoutes []string `ovsdb:"static_routes"` +} + +func (a *LogicalRouter) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouter) GetCopp() *string { + return a.Copp +} + +func copyLogicalRouterCopp(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterCopp(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouter) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalRouterEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouter) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouter) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLogicalRouterLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetLoadBalancerGroup() []string { + return a.LoadBalancerGroup +} + +func copyLogicalRouterLoadBalancerGroup(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterLoadBalancerGroup(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetName() string { + return a.Name +} + +func (a *LogicalRouter) GetNat() []string { + return a.Nat +} + +func copyLogicalRouterNat(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterNat(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouter) GetPolicies() []string { + return a.Policies +} + +func copyLogicalRouterPolicies(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicies(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetPorts() []string { + return a.Ports +} + +func copyLogicalRouterPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetStaticRoutes() []string { + return a.StaticRoutes +} + +func copyLogicalRouterStaticRoutes(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterStaticRoutes(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) DeepCopyInto(b *LogicalRouter) { + *b = *a + b.Copp = copyLogicalRouterCopp(a.Copp) + b.Enabled = copyLogicalRouterEnabled(a.Enabled) + b.ExternalIDs = copyLogicalRouterExternalIDs(a.ExternalIDs) + b.LoadBalancer = copyLogicalRouterLoadBalancer(a.LoadBalancer) + b.LoadBalancerGroup = copyLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup) + b.Nat = copyLogicalRouterNat(a.Nat) + b.Options = copyLogicalRouterOptions(a.Options) + b.Policies = copyLogicalRouterPolicies(a.Policies) + b.Ports = copyLogicalRouterPorts(a.Ports) + b.StaticRoutes = copyLogicalRouterStaticRoutes(a.StaticRoutes) +} + +func (a *LogicalRouter) DeepCopy() *LogicalRouter { + b := new(LogicalRouter) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouter) CloneModelInto(b model.Model) { + c := b.(*LogicalRouter) + a.DeepCopyInto(c) +} + +func (a *LogicalRouter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouter) Equals(b *LogicalRouter) bool { + return a.UUID == b.UUID && + equalLogicalRouterCopp(a.Copp, b.Copp) && + equalLogicalRouterEnabled(a.Enabled, b.Enabled) && + equalLogicalRouterExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + equalLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) && + a.Name == b.Name && + equalLogicalRouterNat(a.Nat, b.Nat) && + equalLogicalRouterOptions(a.Options, b.Options) && + equalLogicalRouterPolicies(a.Policies, b.Policies) && + equalLogicalRouterPorts(a.Ports, b.Ports) && + equalLogicalRouterStaticRoutes(a.StaticRoutes, b.StaticRoutes) +} + +func (a *LogicalRouter) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouter) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouter{} +var _ model.ComparableModel = &LogicalRouter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go new file mode 100644 index 000000000..7272dbb8a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go @@ -0,0 +1,229 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterPolicyTable = "Logical_Router_Policy" + +type ( + LogicalRouterPolicyAction = string +) + +var ( + LogicalRouterPolicyActionAllow LogicalRouterPolicyAction = "allow" + LogicalRouterPolicyActionDrop LogicalRouterPolicyAction = "drop" + LogicalRouterPolicyActionReroute LogicalRouterPolicyAction = "reroute" +) + +// LogicalRouterPolicy defines an object in Logical_Router_Policy table +type LogicalRouterPolicy struct { + UUID string `ovsdb:"_uuid"` + Action LogicalRouterPolicyAction `ovsdb:"action"` + BFDSessions []string `ovsdb:"bfd_sessions"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Match string `ovsdb:"match"` + Nexthop *string `ovsdb:"nexthop"` + Nexthops []string `ovsdb:"nexthops"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *LogicalRouterPolicy) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterPolicy) GetAction() LogicalRouterPolicyAction { + return a.Action +} + +func (a *LogicalRouterPolicy) GetBFDSessions() []string { + return a.BFDSessions +} + +func copyLogicalRouterPolicyBFDSessions(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicyBFDSessions(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterPolicyExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPolicyExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetMatch() string { + return a.Match +} + +func (a *LogicalRouterPolicy) GetNexthop() *string { + return a.Nexthop +} + +func copyLogicalRouterPolicyNexthop(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyNexthop(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPolicy) GetNexthops() []string { + return a.Nexthops +} + +func copyLogicalRouterPolicyNexthops(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicyNexthops(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterPolicyOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPolicyOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetPriority() int { + return a.Priority +} + +func (a *LogicalRouterPolicy) DeepCopyInto(b *LogicalRouterPolicy) { + *b = *a + b.BFDSessions = copyLogicalRouterPolicyBFDSessions(a.BFDSessions) + b.ExternalIDs = copyLogicalRouterPolicyExternalIDs(a.ExternalIDs) + b.Nexthop = copyLogicalRouterPolicyNexthop(a.Nexthop) + b.Nexthops = copyLogicalRouterPolicyNexthops(a.Nexthops) + b.Options = copyLogicalRouterPolicyOptions(a.Options) +} + +func (a *LogicalRouterPolicy) DeepCopy() *LogicalRouterPolicy { + b := new(LogicalRouterPolicy) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterPolicy) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterPolicy) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterPolicy) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterPolicy) Equals(b *LogicalRouterPolicy) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + equalLogicalRouterPolicyBFDSessions(a.BFDSessions, b.BFDSessions) && + equalLogicalRouterPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Match == b.Match && + equalLogicalRouterPolicyNexthop(a.Nexthop, b.Nexthop) && + equalLogicalRouterPolicyNexthops(a.Nexthops, b.Nexthops) && + equalLogicalRouterPolicyOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *LogicalRouterPolicy) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterPolicy) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterPolicy{} +var _ model.ComparableModel = &LogicalRouterPolicy{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go new file mode 100644 index 000000000..d39fe0db4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go @@ -0,0 +1,385 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterPortTable = "Logical_Router_Port" + +// LogicalRouterPort defines an object in Logical_Router_Port table +type LogicalRouterPort struct { + UUID string `ovsdb:"_uuid"` + DhcpRelay *string `ovsdb:"dhcp_relay"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + GatewayChassis []string `ovsdb:"gateway_chassis"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + Ipv6Prefix []string `ovsdb:"ipv6_prefix"` + Ipv6RaConfigs map[string]string `ovsdb:"ipv6_ra_configs"` + MAC string `ovsdb:"mac"` + Name string `ovsdb:"name"` + Networks []string `ovsdb:"networks"` + Options map[string]string `ovsdb:"options"` + Peer *string `ovsdb:"peer"` + Status map[string]string `ovsdb:"status"` +} + +func (a *LogicalRouterPort) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterPort) GetDhcpRelay() *string { + return a.DhcpRelay +} + +func copyLogicalRouterPortDhcpRelay(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortDhcpRelay(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalRouterPortEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterPortExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetGatewayChassis() []string { + return a.GatewayChassis +} + +func copyLogicalRouterPortGatewayChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortGatewayChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyLogicalRouterPortHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetIpv6Prefix() []string { + return a.Ipv6Prefix +} + +func copyLogicalRouterPortIpv6Prefix(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortIpv6Prefix(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetIpv6RaConfigs() map[string]string { + return a.Ipv6RaConfigs +} + +func copyLogicalRouterPortIpv6RaConfigs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortIpv6RaConfigs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetMAC() string { + return a.MAC +} + +func (a *LogicalRouterPort) GetName() string { + return a.Name +} + +func (a *LogicalRouterPort) GetNetworks() []string { + return a.Networks +} + +func copyLogicalRouterPortNetworks(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortNetworks(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterPortOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetPeer() *string { + return a.Peer +} + +func copyLogicalRouterPortPeer(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortPeer(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetStatus() map[string]string { + return a.Status +} + +func copyLogicalRouterPortStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) DeepCopyInto(b *LogicalRouterPort) { + *b = *a + b.DhcpRelay = copyLogicalRouterPortDhcpRelay(a.DhcpRelay) + b.Enabled = copyLogicalRouterPortEnabled(a.Enabled) + b.ExternalIDs = copyLogicalRouterPortExternalIDs(a.ExternalIDs) + b.GatewayChassis = copyLogicalRouterPortGatewayChassis(a.GatewayChassis) + b.HaChassisGroup = copyLogicalRouterPortHaChassisGroup(a.HaChassisGroup) + b.Ipv6Prefix = copyLogicalRouterPortIpv6Prefix(a.Ipv6Prefix) + b.Ipv6RaConfigs = copyLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs) + b.Networks = copyLogicalRouterPortNetworks(a.Networks) + b.Options = copyLogicalRouterPortOptions(a.Options) + b.Peer = copyLogicalRouterPortPeer(a.Peer) + b.Status = copyLogicalRouterPortStatus(a.Status) +} + +func (a *LogicalRouterPort) DeepCopy() *LogicalRouterPort { + b := new(LogicalRouterPort) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterPort) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterPort) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterPort) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterPort) Equals(b *LogicalRouterPort) bool { + return a.UUID == b.UUID && + equalLogicalRouterPortDhcpRelay(a.DhcpRelay, b.DhcpRelay) && + equalLogicalRouterPortEnabled(a.Enabled, b.Enabled) && + equalLogicalRouterPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterPortGatewayChassis(a.GatewayChassis, b.GatewayChassis) && + equalLogicalRouterPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + equalLogicalRouterPortIpv6Prefix(a.Ipv6Prefix, b.Ipv6Prefix) && + equalLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs, b.Ipv6RaConfigs) && + a.MAC == b.MAC && + a.Name == b.Name && + equalLogicalRouterPortNetworks(a.Networks, b.Networks) && + equalLogicalRouterPortOptions(a.Options, b.Options) && + equalLogicalRouterPortPeer(a.Peer, b.Peer) && + equalLogicalRouterPortStatus(a.Status, b.Status) +} + +func (a *LogicalRouterPort) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterPort) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterPort{} +var _ model.ComparableModel = &LogicalRouterPort{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go new file mode 100644 index 000000000..ce966e570 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go @@ -0,0 +1,216 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route" + +type ( + LogicalRouterStaticRoutePolicy = string +) + +var ( + LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" + LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" +) + +// LogicalRouterStaticRoute defines an object in Logical_Router_Static_Route table +type LogicalRouterStaticRoute struct { + UUID string `ovsdb:"_uuid"` + BFD *string `ovsdb:"bfd"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + Nexthop string `ovsdb:"nexthop"` + Options map[string]string `ovsdb:"options"` + OutputPort *string `ovsdb:"output_port"` + Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` + RouteTable string `ovsdb:"route_table"` +} + +func (a *LogicalRouterStaticRoute) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterStaticRoute) GetBFD() *string { + return a.BFD +} + +func copyLogicalRouterStaticRouteBFD(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRouteBFD(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterStaticRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterStaticRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterStaticRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *LogicalRouterStaticRoute) GetNexthop() string { + return a.Nexthop +} + +func (a *LogicalRouterStaticRoute) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterStaticRouteOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterStaticRouteOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterStaticRoute) GetOutputPort() *string { + return a.OutputPort +} + +func copyLogicalRouterStaticRouteOutputPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRouteOutputPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetPolicy() *LogicalRouterStaticRoutePolicy { + return a.Policy +} + +func copyLogicalRouterStaticRoutePolicy(a *LogicalRouterStaticRoutePolicy) *LogicalRouterStaticRoutePolicy { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRoutePolicy(a, b *LogicalRouterStaticRoutePolicy) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetRouteTable() string { + return a.RouteTable +} + +func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { + *b = *a + b.BFD = copyLogicalRouterStaticRouteBFD(a.BFD) + b.ExternalIDs = copyLogicalRouterStaticRouteExternalIDs(a.ExternalIDs) + b.Options = copyLogicalRouterStaticRouteOptions(a.Options) + b.OutputPort = copyLogicalRouterStaticRouteOutputPort(a.OutputPort) + b.Policy = copyLogicalRouterStaticRoutePolicy(a.Policy) +} + +func (a *LogicalRouterStaticRoute) DeepCopy() *LogicalRouterStaticRoute { + b := new(LogicalRouterStaticRoute) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterStaticRoute) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterStaticRoute) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterStaticRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterStaticRoute) Equals(b *LogicalRouterStaticRoute) bool { + return a.UUID == b.UUID && + equalLogicalRouterStaticRouteBFD(a.BFD, b.BFD) && + equalLogicalRouterStaticRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.Nexthop == b.Nexthop && + equalLogicalRouterStaticRouteOptions(a.Options, b.Options) && + equalLogicalRouterStaticRouteOutputPort(a.OutputPort, b.OutputPort) && + equalLogicalRouterStaticRoutePolicy(a.Policy, b.Policy) && + a.RouteTable == b.RouteTable +} + +func (a *LogicalRouterStaticRoute) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterStaticRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterStaticRoute{} +var _ model.ComparableModel = &LogicalRouterStaticRoute{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go new file mode 100644 index 000000000..50b8214ad --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go @@ -0,0 +1,362 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalSwitchTable = "Logical_Switch" + +// LogicalSwitch defines an object in Logical_Switch table +type LogicalSwitch struct { + UUID string `ovsdb:"_uuid"` + ACLs []string `ovsdb:"acls"` + Copp *string `ovsdb:"copp"` + DNSRecords []string `ovsdb:"dns_records"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ForwardingGroups []string `ovsdb:"forwarding_groups"` + LoadBalancer []string `ovsdb:"load_balancer"` + LoadBalancerGroup []string `ovsdb:"load_balancer_group"` + Name string `ovsdb:"name"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + QOSRules []string `ovsdb:"qos_rules"` +} + +func (a *LogicalSwitch) GetUUID() string { + return a.UUID +} + +func (a *LogicalSwitch) GetACLs() []string { + return a.ACLs +} + +func copyLogicalSwitchACLs(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchACLs(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetCopp() *string { + return a.Copp +} + +func copyLogicalSwitchCopp(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchCopp(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitch) GetDNSRecords() []string { + return a.DNSRecords +} + +func copyLogicalSwitchDNSRecords(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchDNSRecords(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalSwitchExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetForwardingGroups() []string { + return a.ForwardingGroups +} + +func copyLogicalSwitchForwardingGroups(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchForwardingGroups(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLogicalSwitchLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetLoadBalancerGroup() []string { + return a.LoadBalancerGroup +} + +func copyLogicalSwitchLoadBalancerGroup(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchLoadBalancerGroup(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetName() string { + return a.Name +} + +func (a *LogicalSwitch) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyLogicalSwitchOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetPorts() []string { + return a.Ports +} + +func copyLogicalSwitchPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetQOSRules() []string { + return a.QOSRules +} + +func copyLogicalSwitchQOSRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchQOSRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) DeepCopyInto(b *LogicalSwitch) { + *b = *a + b.ACLs = copyLogicalSwitchACLs(a.ACLs) + b.Copp = copyLogicalSwitchCopp(a.Copp) + b.DNSRecords = copyLogicalSwitchDNSRecords(a.DNSRecords) + b.ExternalIDs = copyLogicalSwitchExternalIDs(a.ExternalIDs) + b.ForwardingGroups = copyLogicalSwitchForwardingGroups(a.ForwardingGroups) + b.LoadBalancer = copyLogicalSwitchLoadBalancer(a.LoadBalancer) + b.LoadBalancerGroup = copyLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup) + b.OtherConfig = copyLogicalSwitchOtherConfig(a.OtherConfig) + b.Ports = copyLogicalSwitchPorts(a.Ports) + b.QOSRules = copyLogicalSwitchQOSRules(a.QOSRules) +} + +func (a *LogicalSwitch) DeepCopy() *LogicalSwitch { + b := new(LogicalSwitch) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalSwitch) CloneModelInto(b model.Model) { + c := b.(*LogicalSwitch) + a.DeepCopyInto(c) +} + +func (a *LogicalSwitch) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalSwitch) Equals(b *LogicalSwitch) bool { + return a.UUID == b.UUID && + equalLogicalSwitchACLs(a.ACLs, b.ACLs) && + equalLogicalSwitchCopp(a.Copp, b.Copp) && + equalLogicalSwitchDNSRecords(a.DNSRecords, b.DNSRecords) && + equalLogicalSwitchExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalSwitchForwardingGroups(a.ForwardingGroups, b.ForwardingGroups) && + equalLogicalSwitchLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + equalLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) && + a.Name == b.Name && + equalLogicalSwitchOtherConfig(a.OtherConfig, b.OtherConfig) && + equalLogicalSwitchPorts(a.Ports, b.Ports) && + equalLogicalSwitchQOSRules(a.QOSRules, b.QOSRules) +} + +func (a *LogicalSwitch) EqualsModel(b model.Model) bool { + c := b.(*LogicalSwitch) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalSwitch{} +var _ model.ComparableModel = &LogicalSwitch{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go new file mode 100644 index 000000000..c048f7654 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go @@ -0,0 +1,444 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalSwitchPortTable = "Logical_Switch_Port" + +// LogicalSwitchPort defines an object in Logical_Switch_Port table +type LogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + Dhcpv4Options *string `ovsdb:"dhcpv4_options"` + Dhcpv6Options *string `ovsdb:"dhcpv6_options"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + MirrorRules []string `ovsdb:"mirror_rules"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + ParentName *string `ovsdb:"parent_name"` + PortSecurity []string `ovsdb:"port_security"` + Tag *int `ovsdb:"tag"` + TagRequest *int `ovsdb:"tag_request"` + Type string `ovsdb:"type"` + Up *bool `ovsdb:"up"` +} + +func (a *LogicalSwitchPort) GetUUID() string { + return a.UUID +} + +func (a *LogicalSwitchPort) GetAddresses() []string { + return a.Addresses +} + +func copyLogicalSwitchPortAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetDhcpv4Options() *string { + return a.Dhcpv4Options +} + +func copyLogicalSwitchPortDhcpv4Options(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDhcpv4Options(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetDhcpv6Options() *string { + return a.Dhcpv6Options +} + +func copyLogicalSwitchPortDhcpv6Options(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDhcpv6Options(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetDynamicAddresses() *string { + return a.DynamicAddresses +} + +func copyLogicalSwitchPortDynamicAddresses(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDynamicAddresses(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalSwitchPortEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalSwitchPortExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchPortExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyLogicalSwitchPortHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetMirrorRules() []string { + return a.MirrorRules +} + +func copyLogicalSwitchPortMirrorRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortMirrorRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetName() string { + return a.Name +} + +func (a *LogicalSwitchPort) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalSwitchPortOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchPortOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetParentName() *string { + return a.ParentName +} + +func copyLogicalSwitchPortParentName(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortParentName(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetPortSecurity() []string { + return a.PortSecurity +} + +func copyLogicalSwitchPortPortSecurity(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortPortSecurity(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetTag() *int { + return a.Tag +} + +func copyLogicalSwitchPortTag(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortTag(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetTagRequest() *int { + return a.TagRequest +} + +func copyLogicalSwitchPortTagRequest(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortTagRequest(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetType() string { + return a.Type +} + +func (a *LogicalSwitchPort) GetUp() *bool { + return a.Up +} + +func copyLogicalSwitchPortUp(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortUp(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) DeepCopyInto(b *LogicalSwitchPort) { + *b = *a + b.Addresses = copyLogicalSwitchPortAddresses(a.Addresses) + b.Dhcpv4Options = copyLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options) + b.Dhcpv6Options = copyLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options) + b.DynamicAddresses = copyLogicalSwitchPortDynamicAddresses(a.DynamicAddresses) + b.Enabled = copyLogicalSwitchPortEnabled(a.Enabled) + b.ExternalIDs = copyLogicalSwitchPortExternalIDs(a.ExternalIDs) + b.HaChassisGroup = copyLogicalSwitchPortHaChassisGroup(a.HaChassisGroup) + b.MirrorRules = copyLogicalSwitchPortMirrorRules(a.MirrorRules) + b.Options = copyLogicalSwitchPortOptions(a.Options) + b.ParentName = copyLogicalSwitchPortParentName(a.ParentName) + b.PortSecurity = copyLogicalSwitchPortPortSecurity(a.PortSecurity) + b.Tag = copyLogicalSwitchPortTag(a.Tag) + b.TagRequest = copyLogicalSwitchPortTagRequest(a.TagRequest) + b.Up = copyLogicalSwitchPortUp(a.Up) +} + +func (a *LogicalSwitchPort) DeepCopy() *LogicalSwitchPort { + b := new(LogicalSwitchPort) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalSwitchPort) CloneModelInto(b model.Model) { + c := b.(*LogicalSwitchPort) + a.DeepCopyInto(c) +} + +func (a *LogicalSwitchPort) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalSwitchPort) Equals(b *LogicalSwitchPort) bool { + return a.UUID == b.UUID && + equalLogicalSwitchPortAddresses(a.Addresses, b.Addresses) && + equalLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options, b.Dhcpv4Options) && + equalLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options, b.Dhcpv6Options) && + equalLogicalSwitchPortDynamicAddresses(a.DynamicAddresses, b.DynamicAddresses) && + equalLogicalSwitchPortEnabled(a.Enabled, b.Enabled) && + equalLogicalSwitchPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalSwitchPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + equalLogicalSwitchPortMirrorRules(a.MirrorRules, b.MirrorRules) && + a.Name == b.Name && + equalLogicalSwitchPortOptions(a.Options, b.Options) && + equalLogicalSwitchPortParentName(a.ParentName, b.ParentName) && + equalLogicalSwitchPortPortSecurity(a.PortSecurity, b.PortSecurity) && + equalLogicalSwitchPortTag(a.Tag, b.Tag) && + equalLogicalSwitchPortTagRequest(a.TagRequest, b.TagRequest) && + a.Type == b.Type && + equalLogicalSwitchPortUp(a.Up, b.Up) +} + +func (a *LogicalSwitchPort) EqualsModel(b model.Model) bool { + c := b.(*LogicalSwitchPort) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalSwitchPort{} +var _ model.ComparableModel = &LogicalSwitchPort{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go new file mode 100644 index 000000000..09b7e9e6a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go @@ -0,0 +1,158 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterTable = "Meter" + +type ( + MeterUnit = string +) + +var ( + MeterUnitKbps MeterUnit = "kbps" + MeterUnitPktps MeterUnit = "pktps" +) + +// Meter defines an object in Meter table +type Meter struct { + UUID string `ovsdb:"_uuid"` + Bands []string `ovsdb:"bands"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Fair *bool `ovsdb:"fair"` + Name string `ovsdb:"name"` + Unit MeterUnit `ovsdb:"unit"` +} + +func (a *Meter) GetUUID() string { + return a.UUID +} + +func (a *Meter) GetBands() []string { + return a.Bands +} + +func copyMeterBands(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMeterBands(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Meter) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMeterExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMeterExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Meter) GetFair() *bool { + return a.Fair +} + +func copyMeterFair(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalMeterFair(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Meter) GetName() string { + return a.Name +} + +func (a *Meter) GetUnit() MeterUnit { + return a.Unit +} + +func (a *Meter) DeepCopyInto(b *Meter) { + *b = *a + b.Bands = copyMeterBands(a.Bands) + b.ExternalIDs = copyMeterExternalIDs(a.ExternalIDs) + b.Fair = copyMeterFair(a.Fair) +} + +func (a *Meter) DeepCopy() *Meter { + b := new(Meter) + a.DeepCopyInto(b) + return b +} + +func (a *Meter) CloneModelInto(b model.Model) { + c := b.(*Meter) + a.DeepCopyInto(c) +} + +func (a *Meter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Meter) Equals(b *Meter) bool { + return a.UUID == b.UUID && + equalMeterBands(a.Bands, b.Bands) && + equalMeterExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalMeterFair(a.Fair, b.Fair) && + a.Name == b.Name && + a.Unit == b.Unit +} + +func (a *Meter) EqualsModel(b model.Model) bool { + c := b.(*Meter) + return a.Equals(c) +} + +var _ model.CloneableModel = &Meter{} +var _ model.ComparableModel = &Meter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go new file mode 100644 index 000000000..4ef0d901a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go @@ -0,0 +1,107 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterBandTable = "Meter_Band" + +type ( + MeterBandAction = string +) + +var ( + MeterBandActionDrop MeterBandAction = "drop" +) + +// MeterBand defines an object in Meter_Band table +type MeterBand struct { + UUID string `ovsdb:"_uuid"` + Action MeterBandAction `ovsdb:"action"` + BurstSize int `ovsdb:"burst_size"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Rate int `ovsdb:"rate"` +} + +func (a *MeterBand) GetUUID() string { + return a.UUID +} + +func (a *MeterBand) GetAction() MeterBandAction { + return a.Action +} + +func (a *MeterBand) GetBurstSize() int { + return a.BurstSize +} + +func (a *MeterBand) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMeterBandExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMeterBandExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *MeterBand) GetRate() int { + return a.Rate +} + +func (a *MeterBand) DeepCopyInto(b *MeterBand) { + *b = *a + b.ExternalIDs = copyMeterBandExternalIDs(a.ExternalIDs) +} + +func (a *MeterBand) DeepCopy() *MeterBand { + b := new(MeterBand) + a.DeepCopyInto(b) + return b +} + +func (a *MeterBand) CloneModelInto(b model.Model) { + c := b.(*MeterBand) + a.DeepCopyInto(c) +} + +func (a *MeterBand) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MeterBand) Equals(b *MeterBand) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.BurstSize == b.BurstSize && + equalMeterBandExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Rate == b.Rate +} + +func (a *MeterBand) EqualsModel(b model.Model) bool { + c := b.(*MeterBand) + return a.Equals(c) +} + +var _ model.CloneableModel = &MeterBand{} +var _ model.ComparableModel = &MeterBand{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go new file mode 100644 index 000000000..57e3b01f6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go @@ -0,0 +1,125 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MirrorTable = "Mirror" + +type ( + MirrorFilter = string + MirrorType = string +) + +var ( + MirrorFilterFromLport MirrorFilter = "from-lport" + MirrorFilterToLport MirrorFilter = "to-lport" + MirrorFilterBoth MirrorFilter = "both" + MirrorTypeGre MirrorType = "gre" + MirrorTypeErspan MirrorType = "erspan" + MirrorTypeLocal MirrorType = "local" +) + +// Mirror defines an object in Mirror table +type Mirror struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Filter MirrorFilter `ovsdb:"filter"` + Index int `ovsdb:"index"` + Name string `ovsdb:"name"` + Sink string `ovsdb:"sink"` + Type MirrorType `ovsdb:"type"` +} + +func (a *Mirror) GetUUID() string { + return a.UUID +} + +func (a *Mirror) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMirrorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) GetFilter() MirrorFilter { + return a.Filter +} + +func (a *Mirror) GetIndex() int { + return a.Index +} + +func (a *Mirror) GetName() string { + return a.Name +} + +func (a *Mirror) GetSink() string { + return a.Sink +} + +func (a *Mirror) GetType() MirrorType { + return a.Type +} + +func (a *Mirror) DeepCopyInto(b *Mirror) { + *b = *a + b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs) +} + +func (a *Mirror) DeepCopy() *Mirror { + b := new(Mirror) + a.DeepCopyInto(b) + return b +} + +func (a *Mirror) CloneModelInto(b model.Model) { + c := b.(*Mirror) + a.DeepCopyInto(c) +} + +func (a *Mirror) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Mirror) Equals(b *Mirror) bool { + return a.UUID == b.UUID && + equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Filter == b.Filter && + a.Index == b.Index && + a.Name == b.Name && + a.Sink == b.Sink && + a.Type == b.Type +} + +func (a *Mirror) EqualsModel(b model.Model) bool { + c := b.(*Mirror) + return a.Equals(c) +} + +var _ model.CloneableModel = &Mirror{} +var _ model.ComparableModel = &Mirror{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go new file mode 100644 index 000000000..daabac453 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go @@ -0,0 +1,2262 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("OVN_Northbound", map[string]model.Model{ + "ACL": &ACL{}, + "Address_Set": &AddressSet{}, + "BFD": &BFD{}, + "Chassis_Template_Var": &ChassisTemplateVar{}, + "Connection": &Connection{}, + "Copp": &Copp{}, + "DHCP_Options": &DHCPOptions{}, + "DHCP_Relay": &DHCPRelay{}, + "DNS": &DNS{}, + "Forwarding_Group": &ForwardingGroup{}, + "Gateway_Chassis": &GatewayChassis{}, + "HA_Chassis": &HAChassis{}, + "HA_Chassis_Group": &HAChassisGroup{}, + "Load_Balancer": &LoadBalancer{}, + "Load_Balancer_Group": &LoadBalancerGroup{}, + "Load_Balancer_Health_Check": &LoadBalancerHealthCheck{}, + "Logical_Router": &LogicalRouter{}, + "Logical_Router_Policy": &LogicalRouterPolicy{}, + "Logical_Router_Port": &LogicalRouterPort{}, + "Logical_Router_Static_Route": &LogicalRouterStaticRoute{}, + "Logical_Switch": &LogicalSwitch{}, + "Logical_Switch_Port": &LogicalSwitchPort{}, + "Meter": &Meter{}, + "Meter_Band": &MeterBand{}, + "Mirror": &Mirror{}, + "NAT": &NAT{}, + "NB_Global": &NBGlobal{}, + "Port_Group": &PortGroup{}, + "QoS": &QoS{}, + "SSL": &SSL{}, + "Sample": &Sample{}, + "Sample_Collector": &SampleCollector{}, + "Sampling_App": &SamplingApp{}, + "Static_MAC_Binding": &StaticMACBinding{}, + }) +} + +var schema = `{ + "name": "OVN_Northbound", + "version": "7.6.0", + "tables": { + "ACL": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "allow", + "allow-related", + "allow-stateless", + "drop", + "reject", + "pass" + ] + ] + } + } + }, + "direction": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport" + ] + ] + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "label": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "log": { + "type": "boolean" + }, + "match": { + "type": "string" + }, + "meter": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": { + "key": { + "type": "string", + "minLength": 63, + "maxLength": 63 + }, + "min": 0, + "max": 1 + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + }, + "sample_est": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "sample_new": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "severity": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "alert", + "warning", + "notice", + "info", + "debug" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "tier": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 3 + } + } + } + } + }, + "Address_Set": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "BFD": { + "columns": { + "detect_mult": { + "type": { + "key": { + "type": "integer", + "minInteger": 1 + }, + "min": 0, + "max": 1 + } + }, + "dst_ip": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "logical_port": { + "type": "string" + }, + "min_rx": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "min_tx": { + "type": { + "key": { + "type": "integer", + "minInteger": 1 + }, + "min": 0, + "max": 1 + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "down", + "init", + "up", + "admin_down" + ] + ] + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "logical_port", + "dst_ip" + ] + ], + "isRoot": true + }, + "Chassis_Template_Var": { + "columns": { + "chassis": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "variables": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "chassis" + ] + ], + "isRoot": true + }, + "Connection": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + } + }, + "indexes": [ + [ + "target" + ] + ] + }, + "Copp": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "meters": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "DHCP_Options": { + "columns": { + "cidr": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "DHCP_Relay": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "servers": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "DNS": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "records": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Forwarding_Group": { + "columns": { + "child_port": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "liveness": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "vip": { + "type": "string" + }, + "vmac": { + "type": "string" + } + } + }, + "Gateway_Chassis": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "HA_Chassis": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "HA_Chassis_Group": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Load_Balancer": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "health_check": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Health_Check", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_port_mappings": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp", + "sctp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "selection_fields": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "eth_src", + "eth_dst", + "ip_src", + "ip_dst", + "tp_src", + "tp_dst" + ] + ] + }, + "min": 0, + "max": "unlimited" + } + }, + "vips": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Load_Balancer_Group": { + "columns": { + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Load_Balancer_Health_Check": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "vip": { + "type": "string" + } + } + }, + "Logical_Router": { + "columns": { + "copp": { + "type": { + "key": { + "type": "uuid", + "refTable": "Copp", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Group" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "nat": { + "type": { + "key": { + "type": "uuid", + "refTable": "NAT", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "policies": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Policy", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Port", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "static_routes": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Static_Route", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_Router_Policy": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "allow", + "drop", + "reroute" + ] + ] + } + } + }, + "bfd_sessions": { + "type": { + "key": { + "type": "uuid", + "refTable": "BFD", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "match": { + "type": "string" + }, + "nexthop": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "nexthops": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "Logical_Router_Port": { + "columns": { + "dhcp_relay": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Relay", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "gateway_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Gateway_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "ipv6_prefix": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ipv6_ra_configs": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mac": { + "type": "string" + }, + "name": { + "type": "string" + }, + "networks": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "peer": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "Logical_Router_Static_Route": { + "columns": { + "bfd": { + "type": { + "key": { + "type": "uuid", + "refTable": "BFD", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "nexthop": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "output_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "policy": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "src-ip", + "dst-ip" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "route_table": { + "type": "string" + } + } + }, + "Logical_Switch": { + "columns": { + "acls": { + "type": { + "key": { + "type": "uuid", + "refTable": "ACL", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "copp": { + "type": { + "key": { + "type": "uuid", + "refTable": "Copp", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dns_records": { + "type": { + "key": { + "type": "uuid", + "refTable": "DNS", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "forwarding_groups": { + "type": { + "key": { + "type": "uuid", + "refTable": "Forwarding_Group", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Group" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "qos_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "QoS", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_Switch_Port": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "dhcpv4_options": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dhcpv6_options": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dynamic_addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "mirror_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "Mirror", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "parent_name": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "port_security": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "tag": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "tag_request": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "type": { + "type": "string" + }, + "up": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "Meter": { + "columns": { + "bands": { + "type": { + "key": { + "type": "uuid", + "refTable": "Meter_Band", + "refType": "strong" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "fair": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string" + }, + "unit": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "kbps", + "pktps" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Meter_Band": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": "drop" + } + } + }, + "burst_size": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + } + }, + "Mirror": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "filter": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport", + "both" + ] + ] + } + } + }, + "index": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "sink": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "gre", + "erspan", + "local" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "NAT": { + "columns": { + "allowed_ext_ips": { + "type": { + "key": { + "type": "uuid", + "refTable": "Address_Set", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "exempted_ext_ips": { + "type": { + "key": { + "type": "uuid", + "refTable": "Address_Set", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ip": { + "type": "string" + }, + "external_mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_port_range": { + "type": "string" + }, + "gateway_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Port", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "logical_ip": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "match": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "dnat", + "snat", + "dnat_and_snat" + ] + ] + } + } + } + } + }, + "NB_Global": { + "columns": { + "connections": { + "type": { + "key": { + "type": "uuid", + "refTable": "Connection" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "hv_cfg": { + "type": "integer" + }, + "hv_cfg_timestamp": { + "type": "integer" + }, + "ipsec": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "nb_cfg_timestamp": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "sb_cfg": { + "type": "integer" + }, + "sb_cfg_timestamp": { + "type": "integer" + }, + "ssl": { + "type": { + "key": { + "type": "uuid", + "refTable": "SSL" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "Port_Group": { + "columns": { + "acls": { + "type": { + "key": { + "type": "uuid", + "refTable": "ACL", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "QoS": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "dscp", + "mark" + ] + ] + }, + "value": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": "unlimited" + } + }, + "bandwidth": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "rate", + "burst" + ] + ] + }, + "value": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 0, + "max": "unlimited" + } + }, + "direction": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport" + ] + ] + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "match": { + "type": "string" + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "SSL": { + "columns": { + "bootstrap_ca_cert": { + "type": "boolean" + }, + "ca_cert": { + "type": "string" + }, + "certificate": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "private_key": { + "type": "string" + }, + "ssl_ciphers": { + "type": "string" + }, + "ssl_protocols": { + "type": "string" + } + } + }, + "Sample": { + "columns": { + "collectors": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample_Collector", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "metadata": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 1, + "max": 1 + } + } + }, + "indexes": [ + [ + "metadata" + ] + ] + }, + "Sample_Collector": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "name": { + "type": "string" + }, + "probability": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "set_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + }, + "indexes": [ + [ + "id" + ] + ], + "isRoot": true + }, + "Sampling_App": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "drop", + "acl-new", + "acl-est" + ] + ] + } + } + } + }, + "indexes": [ + [ + "type" + ] + ], + "isRoot": true + }, + "Static_MAC_Binding": { + "columns": { + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "override_dynamic_mac": { + "type": "boolean" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go new file mode 100644 index 000000000..4bd1b7ed4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go @@ -0,0 +1,285 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const NATTable = "NAT" + +type ( + NATType = string +) + +var ( + NATTypeDNAT NATType = "dnat" + NATTypeSNAT NATType = "snat" + NATTypeDNATAndSNAT NATType = "dnat_and_snat" +) + +// NAT defines an object in NAT table +type NAT struct { + UUID string `ovsdb:"_uuid"` + AllowedExtIPs *string `ovsdb:"allowed_ext_ips"` + ExemptedExtIPs *string `ovsdb:"exempted_ext_ips"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ExternalIP string `ovsdb:"external_ip"` + ExternalMAC *string `ovsdb:"external_mac"` + ExternalPortRange string `ovsdb:"external_port_range"` + GatewayPort *string `ovsdb:"gateway_port"` + LogicalIP string `ovsdb:"logical_ip"` + LogicalPort *string `ovsdb:"logical_port"` + Match string `ovsdb:"match"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` + Type NATType `ovsdb:"type"` +} + +func (a *NAT) GetUUID() string { + return a.UUID +} + +func (a *NAT) GetAllowedExtIPs() *string { + return a.AllowedExtIPs +} + +func copyNATAllowedExtIPs(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATAllowedExtIPs(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExemptedExtIPs() *string { + return a.ExemptedExtIPs +} + +func copyNATExemptedExtIPs(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATExemptedExtIPs(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyNATExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNATExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NAT) GetExternalIP() string { + return a.ExternalIP +} + +func (a *NAT) GetExternalMAC() *string { + return a.ExternalMAC +} + +func copyNATExternalMAC(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATExternalMAC(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExternalPortRange() string { + return a.ExternalPortRange +} + +func (a *NAT) GetGatewayPort() *string { + return a.GatewayPort +} + +func copyNATGatewayPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATGatewayPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetLogicalIP() string { + return a.LogicalIP +} + +func (a *NAT) GetLogicalPort() *string { + return a.LogicalPort +} + +func copyNATLogicalPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATLogicalPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetMatch() string { + return a.Match +} + +func (a *NAT) GetOptions() map[string]string { + return a.Options +} + +func copyNATOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNATOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NAT) GetPriority() int { + return a.Priority +} + +func (a *NAT) GetType() NATType { + return a.Type +} + +func (a *NAT) DeepCopyInto(b *NAT) { + *b = *a + b.AllowedExtIPs = copyNATAllowedExtIPs(a.AllowedExtIPs) + b.ExemptedExtIPs = copyNATExemptedExtIPs(a.ExemptedExtIPs) + b.ExternalIDs = copyNATExternalIDs(a.ExternalIDs) + b.ExternalMAC = copyNATExternalMAC(a.ExternalMAC) + b.GatewayPort = copyNATGatewayPort(a.GatewayPort) + b.LogicalPort = copyNATLogicalPort(a.LogicalPort) + b.Options = copyNATOptions(a.Options) +} + +func (a *NAT) DeepCopy() *NAT { + b := new(NAT) + a.DeepCopyInto(b) + return b +} + +func (a *NAT) CloneModelInto(b model.Model) { + c := b.(*NAT) + a.DeepCopyInto(c) +} + +func (a *NAT) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *NAT) Equals(b *NAT) bool { + return a.UUID == b.UUID && + equalNATAllowedExtIPs(a.AllowedExtIPs, b.AllowedExtIPs) && + equalNATExemptedExtIPs(a.ExemptedExtIPs, b.ExemptedExtIPs) && + equalNATExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ExternalIP == b.ExternalIP && + equalNATExternalMAC(a.ExternalMAC, b.ExternalMAC) && + a.ExternalPortRange == b.ExternalPortRange && + equalNATGatewayPort(a.GatewayPort, b.GatewayPort) && + a.LogicalIP == b.LogicalIP && + equalNATLogicalPort(a.LogicalPort, b.LogicalPort) && + a.Match == b.Match && + equalNATOptions(a.Options, b.Options) && + a.Priority == b.Priority && + a.Type == b.Type +} + +func (a *NAT) EqualsModel(b model.Model) bool { + c := b.(*NAT) + return a.Equals(c) +} + +var _ model.CloneableModel = &NAT{} +var _ model.ComparableModel = &NAT{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go new file mode 100644 index 000000000..bae9e20f2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go @@ -0,0 +1,218 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const NBGlobalTable = "NB_Global" + +// NBGlobal defines an object in NB_Global table +type NBGlobal struct { + UUID string `ovsdb:"_uuid"` + Connections []string `ovsdb:"connections"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HvCfg int `ovsdb:"hv_cfg"` + HvCfgTimestamp int `ovsdb:"hv_cfg_timestamp"` + Ipsec bool `ovsdb:"ipsec"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"` + Options map[string]string `ovsdb:"options"` + SbCfg int `ovsdb:"sb_cfg"` + SbCfgTimestamp int `ovsdb:"sb_cfg_timestamp"` + SSL *string `ovsdb:"ssl"` +} + +func (a *NBGlobal) GetUUID() string { + return a.UUID +} + +func (a *NBGlobal) GetConnections() []string { + return a.Connections +} + +func copyNBGlobalConnections(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalNBGlobalConnections(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *NBGlobal) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyNBGlobalExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNBGlobalExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NBGlobal) GetHvCfg() int { + return a.HvCfg +} + +func (a *NBGlobal) GetHvCfgTimestamp() int { + return a.HvCfgTimestamp +} + +func (a *NBGlobal) GetIpsec() bool { + return a.Ipsec +} + +func (a *NBGlobal) GetName() string { + return a.Name +} + +func (a *NBGlobal) GetNbCfg() int { + return a.NbCfg +} + +func (a *NBGlobal) GetNbCfgTimestamp() int { + return a.NbCfgTimestamp +} + +func (a *NBGlobal) GetOptions() map[string]string { + return a.Options +} + +func copyNBGlobalOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNBGlobalOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NBGlobal) GetSbCfg() int { + return a.SbCfg +} + +func (a *NBGlobal) GetSbCfgTimestamp() int { + return a.SbCfgTimestamp +} + +func (a *NBGlobal) GetSSL() *string { + return a.SSL +} + +func copyNBGlobalSSL(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNBGlobalSSL(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NBGlobal) DeepCopyInto(b *NBGlobal) { + *b = *a + b.Connections = copyNBGlobalConnections(a.Connections) + b.ExternalIDs = copyNBGlobalExternalIDs(a.ExternalIDs) + b.Options = copyNBGlobalOptions(a.Options) + b.SSL = copyNBGlobalSSL(a.SSL) +} + +func (a *NBGlobal) DeepCopy() *NBGlobal { + b := new(NBGlobal) + a.DeepCopyInto(b) + return b +} + +func (a *NBGlobal) CloneModelInto(b model.Model) { + c := b.(*NBGlobal) + a.DeepCopyInto(c) +} + +func (a *NBGlobal) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *NBGlobal) Equals(b *NBGlobal) bool { + return a.UUID == b.UUID && + equalNBGlobalConnections(a.Connections, b.Connections) && + equalNBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.HvCfg == b.HvCfg && + a.HvCfgTimestamp == b.HvCfgTimestamp && + a.Ipsec == b.Ipsec && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + a.NbCfgTimestamp == b.NbCfgTimestamp && + equalNBGlobalOptions(a.Options, b.Options) && + a.SbCfg == b.SbCfg && + a.SbCfgTimestamp == b.SbCfgTimestamp && + equalNBGlobalSSL(a.SSL, b.SSL) +} + +func (a *NBGlobal) EqualsModel(b model.Model) bool { + c := b.(*NBGlobal) + return a.Equals(c) +} + +var _ model.CloneableModel = &NBGlobal{} +var _ model.ComparableModel = &NBGlobal{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go new file mode 100644 index 000000000..bf4fa809b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go @@ -0,0 +1,149 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortGroupTable = "Port_Group" + +// PortGroup defines an object in Port_Group table +type PortGroup struct { + UUID string `ovsdb:"_uuid"` + ACLs []string `ovsdb:"acls"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` +} + +func (a *PortGroup) GetUUID() string { + return a.UUID +} + +func (a *PortGroup) GetACLs() []string { + return a.ACLs +} + +func copyPortGroupACLs(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupACLs(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyPortGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortGroup) GetName() string { + return a.Name +} + +func (a *PortGroup) GetPorts() []string { + return a.Ports +} + +func copyPortGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) DeepCopyInto(b *PortGroup) { + *b = *a + b.ACLs = copyPortGroupACLs(a.ACLs) + b.ExternalIDs = copyPortGroupExternalIDs(a.ExternalIDs) + b.Ports = copyPortGroupPorts(a.Ports) +} + +func (a *PortGroup) DeepCopy() *PortGroup { + b := new(PortGroup) + a.DeepCopyInto(b) + return b +} + +func (a *PortGroup) CloneModelInto(b model.Model) { + c := b.(*PortGroup) + a.DeepCopyInto(c) +} + +func (a *PortGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortGroup) Equals(b *PortGroup) bool { + return a.UUID == b.UUID && + equalPortGroupACLs(a.ACLs, b.ACLs) && + equalPortGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalPortGroupPorts(a.Ports, b.Ports) +} + +func (a *PortGroup) EqualsModel(b model.Model) bool { + c := b.(*PortGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortGroup{} +var _ model.ComparableModel = &PortGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go new file mode 100644 index 000000000..d25322b4b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go @@ -0,0 +1,180 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const QoSTable = "QoS" + +type ( + QoSAction = string + QoSBandwidth = string + QoSDirection = string +) + +var ( + QoSActionDSCP QoSAction = "dscp" + QoSActionMark QoSAction = "mark" + QoSBandwidthRate QoSBandwidth = "rate" + QoSBandwidthBurst QoSBandwidth = "burst" + QoSDirectionFromLport QoSDirection = "from-lport" + QoSDirectionToLport QoSDirection = "to-lport" +) + +// QoS defines an object in QoS table +type QoS struct { + UUID string `ovsdb:"_uuid"` + Action map[string]int `ovsdb:"action"` + Bandwidth map[string]int `ovsdb:"bandwidth"` + Direction QoSDirection `ovsdb:"direction"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Match string `ovsdb:"match"` + Priority int `ovsdb:"priority"` +} + +func (a *QoS) GetUUID() string { + return a.UUID +} + +func (a *QoS) GetAction() map[string]int { + return a.Action +} + +func copyQoSAction(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSAction(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetBandwidth() map[string]int { + return a.Bandwidth +} + +func copyQoSBandwidth(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSBandwidth(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetDirection() QoSDirection { + return a.Direction +} + +func (a *QoS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyQoSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetMatch() string { + return a.Match +} + +func (a *QoS) GetPriority() int { + return a.Priority +} + +func (a *QoS) DeepCopyInto(b *QoS) { + *b = *a + b.Action = copyQoSAction(a.Action) + b.Bandwidth = copyQoSBandwidth(a.Bandwidth) + b.ExternalIDs = copyQoSExternalIDs(a.ExternalIDs) +} + +func (a *QoS) DeepCopy() *QoS { + b := new(QoS) + a.DeepCopyInto(b) + return b +} + +func (a *QoS) CloneModelInto(b model.Model) { + c := b.(*QoS) + a.DeepCopyInto(c) +} + +func (a *QoS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *QoS) Equals(b *QoS) bool { + return a.UUID == b.UUID && + equalQoSAction(a.Action, b.Action) && + equalQoSBandwidth(a.Bandwidth, b.Bandwidth) && + a.Direction == b.Direction && + equalQoSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Match == b.Match && + a.Priority == b.Priority +} + +func (a *QoS) EqualsModel(b model.Model) bool { + c := b.(*QoS) + return a.Equals(c) +} + +var _ model.CloneableModel = &QoS{} +var _ model.ComparableModel = &QoS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go new file mode 100644 index 000000000..639393a1e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleTable = "Sample" + +// Sample defines an object in Sample table +type Sample struct { + UUID string `ovsdb:"_uuid"` + Collectors []string `ovsdb:"collectors"` + Metadata int `ovsdb:"metadata"` +} + +func (a *Sample) GetUUID() string { + return a.UUID +} + +func (a *Sample) GetCollectors() []string { + return a.Collectors +} + +func copySampleCollectors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSampleCollectors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Sample) GetMetadata() int { + return a.Metadata +} + +func (a *Sample) DeepCopyInto(b *Sample) { + *b = *a + b.Collectors = copySampleCollectors(a.Collectors) +} + +func (a *Sample) DeepCopy() *Sample { + b := new(Sample) + a.DeepCopyInto(b) + return b +} + +func (a *Sample) CloneModelInto(b model.Model) { + c := b.(*Sample) + a.DeepCopyInto(c) +} + +func (a *Sample) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Sample) Equals(b *Sample) bool { + return a.UUID == b.UUID && + equalSampleCollectors(a.Collectors, b.Collectors) && + a.Metadata == b.Metadata +} + +func (a *Sample) EqualsModel(b model.Model) bool { + c := b.(*Sample) + return a.Equals(c) +} + +var _ model.CloneableModel = &Sample{} +var _ model.ComparableModel = &Sample{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go new file mode 100644 index 000000000..50f065904 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleCollectorTable = "Sample_Collector" + +// SampleCollector defines an object in Sample_Collector table +type SampleCollector struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Name string `ovsdb:"name"` + Probability int `ovsdb:"probability"` + SetID int `ovsdb:"set_id"` +} + +func (a *SampleCollector) GetUUID() string { + return a.UUID +} + +func (a *SampleCollector) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySampleCollectorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSampleCollectorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SampleCollector) GetID() int { + return a.ID +} + +func (a *SampleCollector) GetName() string { + return a.Name +} + +func (a *SampleCollector) GetProbability() int { + return a.Probability +} + +func (a *SampleCollector) GetSetID() int { + return a.SetID +} + +func (a *SampleCollector) DeepCopyInto(b *SampleCollector) { + *b = *a + b.ExternalIDs = copySampleCollectorExternalIDs(a.ExternalIDs) +} + +func (a *SampleCollector) DeepCopy() *SampleCollector { + b := new(SampleCollector) + a.DeepCopyInto(b) + return b +} + +func (a *SampleCollector) CloneModelInto(b model.Model) { + c := b.(*SampleCollector) + a.DeepCopyInto(c) +} + +func (a *SampleCollector) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SampleCollector) Equals(b *SampleCollector) bool { + return a.UUID == b.UUID && + equalSampleCollectorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Name == b.Name && + a.Probability == b.Probability && + a.SetID == b.SetID +} + +func (a *SampleCollector) EqualsModel(b model.Model) bool { + c := b.(*SampleCollector) + return a.Equals(c) +} + +var _ model.CloneableModel = &SampleCollector{} +var _ model.ComparableModel = &SampleCollector{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go new file mode 100644 index 000000000..a152b4237 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go @@ -0,0 +1,103 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SamplingAppTable = "Sampling_App" + +type ( + SamplingAppType = string +) + +var ( + SamplingAppTypeDrop SamplingAppType = "drop" + SamplingAppTypeACLNew SamplingAppType = "acl-new" + SamplingAppTypeACLEst SamplingAppType = "acl-est" +) + +// SamplingApp defines an object in Sampling_App table +type SamplingApp struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Type SamplingAppType `ovsdb:"type"` +} + +func (a *SamplingApp) GetUUID() string { + return a.UUID +} + +func (a *SamplingApp) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySamplingAppExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSamplingAppExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SamplingApp) GetID() int { + return a.ID +} + +func (a *SamplingApp) GetType() SamplingAppType { + return a.Type +} + +func (a *SamplingApp) DeepCopyInto(b *SamplingApp) { + *b = *a + b.ExternalIDs = copySamplingAppExternalIDs(a.ExternalIDs) +} + +func (a *SamplingApp) DeepCopy() *SamplingApp { + b := new(SamplingApp) + a.DeepCopyInto(b) + return b +} + +func (a *SamplingApp) CloneModelInto(b model.Model) { + c := b.(*SamplingApp) + a.DeepCopyInto(c) +} + +func (a *SamplingApp) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SamplingApp) Equals(b *SamplingApp) bool { + return a.UUID == b.UUID && + equalSamplingAppExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Type == b.Type +} + +func (a *SamplingApp) EqualsModel(b model.Model) bool { + c := b.(*SamplingApp) + return a.Equals(c) +} + +var _ model.CloneableModel = &SamplingApp{} +var _ model.ComparableModel = &SamplingApp{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go new file mode 100644 index 000000000..ddaba5d32 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go @@ -0,0 +1,117 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SSLTable = "SSL" + +// SSL defines an object in SSL table +type SSL struct { + UUID string `ovsdb:"_uuid"` + BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"` + CaCert string `ovsdb:"ca_cert"` + Certificate string `ovsdb:"certificate"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + PrivateKey string `ovsdb:"private_key"` + SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLProtocols string `ovsdb:"ssl_protocols"` +} + +func (a *SSL) GetUUID() string { + return a.UUID +} + +func (a *SSL) GetBootstrapCaCert() bool { + return a.BootstrapCaCert +} + +func (a *SSL) GetCaCert() string { + return a.CaCert +} + +func (a *SSL) GetCertificate() string { + return a.Certificate +} + +func (a *SSL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySSLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSSLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SSL) GetPrivateKey() string { + return a.PrivateKey +} + +func (a *SSL) GetSSLCiphers() string { + return a.SSLCiphers +} + +func (a *SSL) GetSSLProtocols() string { + return a.SSLProtocols +} + +func (a *SSL) DeepCopyInto(b *SSL) { + *b = *a + b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs) +} + +func (a *SSL) DeepCopy() *SSL { + b := new(SSL) + a.DeepCopyInto(b) + return b +} + +func (a *SSL) CloneModelInto(b model.Model) { + c := b.(*SSL) + a.DeepCopyInto(c) +} + +func (a *SSL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SSL) Equals(b *SSL) bool { + return a.UUID == b.UUID && + a.BootstrapCaCert == b.BootstrapCaCert && + a.CaCert == b.CaCert && + a.Certificate == b.Certificate && + equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.PrivateKey == b.PrivateKey && + a.SSLCiphers == b.SSLCiphers && + a.SSLProtocols == b.SSLProtocols +} + +func (a *SSL) EqualsModel(b model.Model) bool { + c := b.(*SSL) + return a.Equals(c) +} + +var _ model.CloneableModel = &SSL{} +var _ model.ComparableModel = &SSL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go new file mode 100644 index 000000000..15207e648 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go @@ -0,0 +1,72 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const StaticMACBindingTable = "Static_MAC_Binding" + +// StaticMACBinding defines an object in Static_MAC_Binding table +type StaticMACBinding struct { + UUID string `ovsdb:"_uuid"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"` +} + +func (a *StaticMACBinding) GetUUID() string { + return a.UUID +} + +func (a *StaticMACBinding) GetIP() string { + return a.IP +} + +func (a *StaticMACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *StaticMACBinding) GetMAC() string { + return a.MAC +} + +func (a *StaticMACBinding) GetOverrideDynamicMAC() bool { + return a.OverrideDynamicMAC +} + +func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) { + *b = *a +} + +func (a *StaticMACBinding) DeepCopy() *StaticMACBinding { + b := new(StaticMACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *StaticMACBinding) CloneModelInto(b model.Model) { + c := b.(*StaticMACBinding) + a.DeepCopyInto(c) +} + +func (a *StaticMACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool { + return a.UUID == b.UUID && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.OverrideDynamicMAC == b.OverrideDynamicMAC +} + +func (a *StaticMACBinding) EqualsModel(b model.Model) bool { + c := b.(*StaticMACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &StaticMACBinding{} +var _ model.ComparableModel = &StaticMACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go new file mode 100644 index 000000000..6349c1519 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go @@ -0,0 +1,303 @@ +package observability + +import ( + "fmt" + "slices" + "strings" + "sync" + "time" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// OVN observ app IDs. Make sure to always add new apps in the end. +const ( + DropSamplingID = iota + 1 + ACLNewTrafficSamplingID + ACLEstTrafficSamplingID +) + +// temporary const, until we have dynamic config +const DefaultObservabilityCollectorSetID = 42 + +// this is inferred from nbdb schema, check Sample_Collector.id +const maxCollectorID = 255 +const collectorFeaturesExternalID = "sample-features" + +// collectorConfig holds the configuration for a collector. +// It is allowed to set different probabilities for every feature. +// collectorSetID is used to set up sampling via OVSDB. +type collectorConfig struct { + collectorSetID int + // probability in percent, 0 to 100 + featuresProbability map[libovsdbops.SampleFeature]int +} + +type Manager struct { + nbClient libovsdbclient.Client + sampConfig *libovsdbops.SamplingConfig + collectorsLock sync.Mutex + // nbdb Collectors have probability. To allow different probabilities for different features, + // multiple nbdb Collectors will be created, one per probability. + // getCollectorKey() => collector.UUID + dbCollectors map[string]string + // cleaning up unused collectors may take time and multiple retries, as all referencing samples must be removed first. + // Therefore, we need to save state between those retries. + // getCollectorKey() => collector.SetID + unusedCollectors map[string]int + unusedCollectorsRetryInterval time.Duration + collectorsCleanupRetries int + // Only maxCollectorID collectors are allowed, each should have unique ID. + // this set is tracking already assigned IDs. + takenCollectorIDs sets.Set[int] +} + +func NewManager(nbClient libovsdbclient.Client) *Manager { + return &Manager{ + nbClient: nbClient, + collectorsLock: sync.Mutex{}, + dbCollectors: make(map[string]string), + unusedCollectors: make(map[string]int), + unusedCollectorsRetryInterval: time.Minute, + takenCollectorIDs: sets.New[int](), + } +} + +func (m *Manager) SamplingConfig() *libovsdbops.SamplingConfig { + return m.sampConfig +} + +func (m *Manager) Init() error { + // this will be read from the kube-api in the future + currentConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.EgressFirewallSample: 100, + libovsdbops.NetworkPolicySample: 100, + libovsdbops.AdminNetworkPolicySample: 100, + libovsdbops.MulticastSample: 100, + libovsdbops.UDNIsolationSample: 100, + }, + } + + return m.initWithConfig(currentConfig) +} + +func (m *Manager) initWithConfig(config *collectorConfig) error { + if err := m.setSamplingAppIDs(); err != nil { + return err + } + if err := m.setDbCollectors(); err != nil { + return err + } + + featuresConfig, err := m.addCollector(config) + if err != nil { + return err + } + m.sampConfig = libovsdbops.NewSamplingConfig(featuresConfig) + + // now cleanup stale collectors + m.deleteStaleCollectorsWithRetry() + return nil +} + +func (m *Manager) setDbCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + clear(m.dbCollectors) + collectors, err := libovsdbops.ListSampleCollectors(m.nbClient) + if err != nil { + return fmt.Errorf("error getting sample collectors: %w", err) + } + for _, collector := range collectors { + collectorKey := getCollectorKey(collector.SetID, collector.Probability) + m.dbCollectors[collectorKey] = collector.UUID + m.takenCollectorIDs.Insert(collector.ID) + // all collectors are unused, until we update existing configs + m.unusedCollectors[collectorKey] = collector.ID + } + return nil +} + +// Stale collectors can't be deleted until all referencing Samples are deleted. +// Samples will be deleted asynchronously by different controllers on their init with the new Manager. +// deleteStaleCollectorsWithRetry will retry, considering deletion should eventually succeed when all controllers +// update their db entries to use the latest observability config. +func (m *Manager) deleteStaleCollectorsWithRetry() { + if err := m.deleteStaleCollectors(); err != nil { + m.collectorsCleanupRetries += 1 + // allow retries for 1 hour, hopefully it will be enough for all handler to complete initial sync + if m.collectorsCleanupRetries > 60 { + m.collectorsCleanupRetries = 0 + klog.Errorf("Cleanup stale collectors failed after 30 retries: %v", err) + return + } + time.AfterFunc(m.unusedCollectorsRetryInterval, m.deleteStaleCollectorsWithRetry) + return + } + m.collectorsCleanupRetries = 0 + klog.Infof("Cleanup stale collectors succeeded.") +} + +func (m *Manager) deleteStaleCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + var lastErr error + for collectorKey, collectorSetID := range m.unusedCollectors { + collectorUUID := m.dbCollectors[collectorKey] + err := libovsdbops.DeleteSampleCollector(m.nbClient, &nbdb.SampleCollector{ + UUID: collectorUUID, + }) + if err != nil { + lastErr = err + klog.Infof("Error deleting collector with ID=%d: %v", collectorSetID, lastErr) + continue + } + delete(m.unusedCollectors, collectorKey) + delete(m.dbCollectors, collectorKey) + delete(m.takenCollectorIDs, collectorSetID) + } + return lastErr +} + +// Cleanup must be called when observability is no longer needed. +// It will return an error if some samples still exist in the db. +// This is expected, and Cleanup may be retried on the next restart. +func Cleanup(nbClient libovsdbclient.Client) error { + // Do the opposite of init + err := libovsdbops.DeleteSamplingAppsWithPredicate(nbClient, func(app *nbdb.SamplingApp) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sampling apps: %w", err) + } + + err = libovsdbops.DeleteSampleCollectorWithPredicate(nbClient, func(collector *nbdb.SampleCollector) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sample collectors: %w", err) + } + return nil +} + +func (m *Manager) setSamplingAppIDs() error { + var ops []libovsdb.Operation + var err error + for _, appConfig := range []struct { + id int + appType nbdb.SamplingAppType + }{ + { + id: DropSamplingID, + appType: nbdb.SamplingAppTypeDrop, + }, + { + id: ACLNewTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLNew, + }, + { + id: ACLEstTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLEst, + }, + } { + samplingApp := &nbdb.SamplingApp{ + ID: appConfig.id, + Type: appConfig.appType, + } + ops, err = libovsdbops.CreateOrUpdateSamplingAppsOps(m.nbClient, ops, samplingApp) + if err != nil { + return fmt.Errorf("error creating or updating sampling app %s: %w", appConfig.appType, err) + } + } + _, err = libovsdbops.TransactAndCheck(m.nbClient, ops) + return err +} + +func groupByProbability(c *collectorConfig) map[int][]libovsdbops.SampleFeature { + probabilities := make(map[int][]libovsdbops.SampleFeature) + for feature, percentProbability := range c.featuresProbability { + probability := percentToProbability(percentProbability) + probabilities[probability] = append(probabilities[probability], feature) + } + return probabilities +} + +func getCollectorKey(collectorID int, probability int) string { + return fmt.Sprintf("%d-%d", collectorID, probability) +} + +func (m *Manager) getFreeCollectorID() (int, error) { + for i := 1; i <= maxCollectorID; i++ { + if !m.takenCollectorIDs.Has(i) { + return i, nil + } + } + return 0, fmt.Errorf("no free collector IDs") +} + +func (m *Manager) addCollector(conf *collectorConfig) (map[libovsdbops.SampleFeature][]string, error) { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + sampleFeaturesConfig := make(map[libovsdbops.SampleFeature][]string) + probabilityConfig := groupByProbability(conf) + + for probability, features := range probabilityConfig { + collectorKey := getCollectorKey(conf.collectorSetID, probability) + var collectorUUID string + var ok bool + // ensure predictable externalID + slices.Sort(features) + collectorFeatures := strings.Join(features, ",") + if collectorUUID, ok = m.dbCollectors[collectorKey]; !ok { + collectorID, err := m.getFreeCollectorID() + if err != nil { + return sampleFeaturesConfig, err + } + collector := &nbdb.SampleCollector{ + ID: collectorID, + SetID: conf.collectorSetID, + Probability: probability, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err = libovsdbops.CreateOrUpdateSampleCollector(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + collectorUUID = collector.UUID + m.dbCollectors[collectorKey] = collectorUUID + m.takenCollectorIDs.Insert(collectorID) + } else { + // update collector's features + collector := &nbdb.SampleCollector{ + UUID: collectorUUID, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err := libovsdbops.UpdateSampleCollectorExternalIDs(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + // collector is used, remove from unused Collectors + delete(m.unusedCollectors, collectorKey) + } + for _, feature := range features { + sampleFeaturesConfig[feature] = append(sampleFeaturesConfig[feature], collectorUUID) + } + } + return sampleFeaturesConfig, nil +} + +func percentToProbability(percent int) int { + return 65535 * percent / 100 +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go new file mode 100644 index 000000000..b3b1c3c2d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const AddressSetTable = "Address_Set" + +// AddressSet defines an object in Address_Set table +type AddressSet struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + Name string `ovsdb:"name"` +} + +func (a *AddressSet) GetUUID() string { + return a.UUID +} + +func (a *AddressSet) GetAddresses() []string { + return a.Addresses +} + +func copyAddressSetAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalAddressSetAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *AddressSet) GetName() string { + return a.Name +} + +func (a *AddressSet) DeepCopyInto(b *AddressSet) { + *b = *a + b.Addresses = copyAddressSetAddresses(a.Addresses) +} + +func (a *AddressSet) DeepCopy() *AddressSet { + b := new(AddressSet) + a.DeepCopyInto(b) + return b +} + +func (a *AddressSet) CloneModelInto(b model.Model) { + c := b.(*AddressSet) + a.DeepCopyInto(c) +} + +func (a *AddressSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AddressSet) Equals(b *AddressSet) bool { + return a.UUID == b.UUID && + equalAddressSetAddresses(a.Addresses, b.Addresses) && + a.Name == b.Name +} + +func (a *AddressSet) EqualsModel(b model.Model) bool { + c := b.(*AddressSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &AddressSet{} +var _ model.ComparableModel = &AddressSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go new file mode 100644 index 000000000..cf27814b5 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go @@ -0,0 +1,179 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const BFDTable = "BFD" + +type ( + BFDStatus = string +) + +var ( + BFDStatusDown BFDStatus = "down" + BFDStatusInit BFDStatus = "init" + BFDStatusUp BFDStatus = "up" + BFDStatusAdminDown BFDStatus = "admin_down" +) + +// BFD defines an object in BFD table +type BFD struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + DetectMult int `ovsdb:"detect_mult"` + Disc int `ovsdb:"disc"` + DstIP string `ovsdb:"dst_ip"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LogicalPort string `ovsdb:"logical_port"` + MinRx int `ovsdb:"min_rx"` + MinTx int `ovsdb:"min_tx"` + Options map[string]string `ovsdb:"options"` + SrcPort int `ovsdb:"src_port"` + Status BFDStatus `ovsdb:"status"` +} + +func (a *BFD) GetUUID() string { + return a.UUID +} + +func (a *BFD) GetChassisName() string { + return a.ChassisName +} + +func (a *BFD) GetDetectMult() int { + return a.DetectMult +} + +func (a *BFD) GetDisc() int { + return a.Disc +} + +func (a *BFD) GetDstIP() string { + return a.DstIP +} + +func (a *BFD) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBFDExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *BFD) GetMinRx() int { + return a.MinRx +} + +func (a *BFD) GetMinTx() int { + return a.MinTx +} + +func (a *BFD) GetOptions() map[string]string { + return a.Options +} + +func copyBFDOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetSrcPort() int { + return a.SrcPort +} + +func (a *BFD) GetStatus() BFDStatus { + return a.Status +} + +func (a *BFD) DeepCopyInto(b *BFD) { + *b = *a + b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs) + b.Options = copyBFDOptions(a.Options) +} + +func (a *BFD) DeepCopy() *BFD { + b := new(BFD) + a.DeepCopyInto(b) + return b +} + +func (a *BFD) CloneModelInto(b model.Model) { + c := b.(*BFD) + a.DeepCopyInto(c) +} + +func (a *BFD) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *BFD) Equals(b *BFD) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + a.DetectMult == b.DetectMult && + a.Disc == b.Disc && + a.DstIP == b.DstIP && + equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.LogicalPort == b.LogicalPort && + a.MinRx == b.MinRx && + a.MinTx == b.MinTx && + equalBFDOptions(a.Options, b.Options) && + a.SrcPort == b.SrcPort && + a.Status == b.Status +} + +func (a *BFD) EqualsModel(b model.Model) bool { + c := b.(*BFD) + return a.Equals(c) +} + +var _ model.CloneableModel = &BFD{} +var _ model.ComparableModel = &BFD{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go new file mode 100644 index 000000000..3526f096f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go @@ -0,0 +1,225 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTable = "Chassis" + +// Chassis defines an object in Chassis table +type Chassis struct { + UUID string `ovsdb:"_uuid"` + Encaps []string `ovsdb:"encaps"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Hostname string `ovsdb:"hostname"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + OtherConfig map[string]string `ovsdb:"other_config"` + TransportZones []string `ovsdb:"transport_zones"` + VtepLogicalSwitches []string `ovsdb:"vtep_logical_switches"` +} + +func (a *Chassis) GetUUID() string { + return a.UUID +} + +func (a *Chassis) GetEncaps() []string { + return a.Encaps +} + +func copyChassisEncaps(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisEncaps(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Chassis) GetHostname() string { + return a.Hostname +} + +func (a *Chassis) GetName() string { + return a.Name +} + +func (a *Chassis) GetNbCfg() int { + return a.NbCfg +} + +func (a *Chassis) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyChassisOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Chassis) GetTransportZones() []string { + return a.TransportZones +} + +func copyChassisTransportZones(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisTransportZones(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) GetVtepLogicalSwitches() []string { + return a.VtepLogicalSwitches +} + +func copyChassisVtepLogicalSwitches(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisVtepLogicalSwitches(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) DeepCopyInto(b *Chassis) { + *b = *a + b.Encaps = copyChassisEncaps(a.Encaps) + b.ExternalIDs = copyChassisExternalIDs(a.ExternalIDs) + b.OtherConfig = copyChassisOtherConfig(a.OtherConfig) + b.TransportZones = copyChassisTransportZones(a.TransportZones) + b.VtepLogicalSwitches = copyChassisVtepLogicalSwitches(a.VtepLogicalSwitches) +} + +func (a *Chassis) DeepCopy() *Chassis { + b := new(Chassis) + a.DeepCopyInto(b) + return b +} + +func (a *Chassis) CloneModelInto(b model.Model) { + c := b.(*Chassis) + a.DeepCopyInto(c) +} + +func (a *Chassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Chassis) Equals(b *Chassis) bool { + return a.UUID == b.UUID && + equalChassisEncaps(a.Encaps, b.Encaps) && + equalChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Hostname == b.Hostname && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + equalChassisOtherConfig(a.OtherConfig, b.OtherConfig) && + equalChassisTransportZones(a.TransportZones, b.TransportZones) && + equalChassisVtepLogicalSwitches(a.VtepLogicalSwitches, b.VtepLogicalSwitches) +} + +func (a *Chassis) EqualsModel(b model.Model) bool { + c := b.(*Chassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &Chassis{} +var _ model.ComparableModel = &Chassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go new file mode 100644 index 000000000..1e8c3764b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go @@ -0,0 +1,124 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisPrivateTable = "Chassis_Private" + +// ChassisPrivate defines an object in Chassis_Private table +type ChassisPrivate struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"` +} + +func (a *ChassisPrivate) GetUUID() string { + return a.UUID +} + +func (a *ChassisPrivate) GetChassis() *string { + return a.Chassis +} + +func copyChassisPrivateChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalChassisPrivateChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ChassisPrivate) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisPrivateExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisPrivateExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisPrivate) GetName() string { + return a.Name +} + +func (a *ChassisPrivate) GetNbCfg() int { + return a.NbCfg +} + +func (a *ChassisPrivate) GetNbCfgTimestamp() int { + return a.NbCfgTimestamp +} + +func (a *ChassisPrivate) DeepCopyInto(b *ChassisPrivate) { + *b = *a + b.Chassis = copyChassisPrivateChassis(a.Chassis) + b.ExternalIDs = copyChassisPrivateExternalIDs(a.ExternalIDs) +} + +func (a *ChassisPrivate) DeepCopy() *ChassisPrivate { + b := new(ChassisPrivate) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisPrivate) CloneModelInto(b model.Model) { + c := b.(*ChassisPrivate) + a.DeepCopyInto(c) +} + +func (a *ChassisPrivate) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisPrivate) Equals(b *ChassisPrivate) bool { + return a.UUID == b.UUID && + equalChassisPrivateChassis(a.Chassis, b.Chassis) && + equalChassisPrivateExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + a.NbCfgTimestamp == b.NbCfgTimestamp +} + +func (a *ChassisPrivate) EqualsModel(b model.Model) bool { + c := b.(*ChassisPrivate) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisPrivate{} +var _ model.ComparableModel = &ChassisPrivate{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go new file mode 100644 index 000000000..212e772be --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go @@ -0,0 +1,87 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTemplateVarTable = "Chassis_Template_Var" + +// ChassisTemplateVar defines an object in Chassis_Template_Var table +type ChassisTemplateVar struct { + UUID string `ovsdb:"_uuid"` + Chassis string `ovsdb:"chassis"` + Variables map[string]string `ovsdb:"variables"` +} + +func (a *ChassisTemplateVar) GetUUID() string { + return a.UUID +} + +func (a *ChassisTemplateVar) GetChassis() string { + return a.Chassis +} + +func (a *ChassisTemplateVar) GetVariables() map[string]string { + return a.Variables +} + +func copyChassisTemplateVarVariables(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarVariables(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) { + *b = *a + b.Variables = copyChassisTemplateVarVariables(a.Variables) +} + +func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar { + b := new(ChassisTemplateVar) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisTemplateVar) CloneModelInto(b model.Model) { + c := b.(*ChassisTemplateVar) + a.DeepCopyInto(c) +} + +func (a *ChassisTemplateVar) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool { + return a.UUID == b.UUID && + a.Chassis == b.Chassis && + equalChassisTemplateVarVariables(a.Variables, b.Variables) +} + +func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool { + c := b.(*ChassisTemplateVar) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisTemplateVar{} +var _ model.ComparableModel = &ChassisTemplateVar{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go new file mode 100644 index 000000000..8f96f5422 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go @@ -0,0 +1,221 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ConnectionTable = "Connection" + +// Connection defines an object in Connection table +type Connection struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + ReadOnly bool `ovsdb:"read_only"` + Role string `ovsdb:"role"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` +} + +func (a *Connection) GetUUID() string { + return a.UUID +} + +func (a *Connection) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyConnectionExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyConnectionInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Connection) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyConnectionMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyConnectionOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetReadOnly() bool { + return a.ReadOnly +} + +func (a *Connection) GetRole() string { + return a.Role +} + +func (a *Connection) GetStatus() map[string]string { + return a.Status +} + +func copyConnectionStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetTarget() string { + return a.Target +} + +func (a *Connection) DeepCopyInto(b *Connection) { + *b = *a + b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe) + b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig) + b.Status = copyConnectionStatus(a.Status) +} + +func (a *Connection) DeepCopy() *Connection { + b := new(Connection) + a.DeepCopyInto(b) + return b +} + +func (a *Connection) CloneModelInto(b model.Model) { + c := b.(*Connection) + a.DeepCopyInto(c) +} + +func (a *Connection) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Connection) Equals(b *Connection) bool { + return a.UUID == b.UUID && + equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) && + a.ReadOnly == b.ReadOnly && + a.Role == b.Role && + equalConnectionStatus(a.Status, b.Status) && + a.Target == b.Target +} + +func (a *Connection) EqualsModel(b model.Model) bool { + c := b.(*Connection) + return a.Equals(c) +} + +var _ model.CloneableModel = &Connection{} +var _ model.ComparableModel = &Connection{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go new file mode 100644 index 000000000..741ffd028 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go @@ -0,0 +1,126 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ControllerEventTable = "Controller_Event" + +type ( + ControllerEventEventType = string +) + +var ( + ControllerEventEventTypeEmptyLbBackends ControllerEventEventType = "empty_lb_backends" +) + +// ControllerEvent defines an object in Controller_Event table +type ControllerEvent struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + EventInfo map[string]string `ovsdb:"event_info"` + EventType ControllerEventEventType `ovsdb:"event_type"` + SeqNum int `ovsdb:"seq_num"` +} + +func (a *ControllerEvent) GetUUID() string { + return a.UUID +} + +func (a *ControllerEvent) GetChassis() *string { + return a.Chassis +} + +func copyControllerEventChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerEventChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ControllerEvent) GetEventInfo() map[string]string { + return a.EventInfo +} + +func copyControllerEventEventInfo(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalControllerEventEventInfo(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ControllerEvent) GetEventType() ControllerEventEventType { + return a.EventType +} + +func (a *ControllerEvent) GetSeqNum() int { + return a.SeqNum +} + +func (a *ControllerEvent) DeepCopyInto(b *ControllerEvent) { + *b = *a + b.Chassis = copyControllerEventChassis(a.Chassis) + b.EventInfo = copyControllerEventEventInfo(a.EventInfo) +} + +func (a *ControllerEvent) DeepCopy() *ControllerEvent { + b := new(ControllerEvent) + a.DeepCopyInto(b) + return b +} + +func (a *ControllerEvent) CloneModelInto(b model.Model) { + c := b.(*ControllerEvent) + a.DeepCopyInto(c) +} + +func (a *ControllerEvent) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ControllerEvent) Equals(b *ControllerEvent) bool { + return a.UUID == b.UUID && + equalControllerEventChassis(a.Chassis, b.Chassis) && + equalControllerEventEventInfo(a.EventInfo, b.EventInfo) && + a.EventType == b.EventType && + a.SeqNum == b.SeqNum +} + +func (a *ControllerEvent) EqualsModel(b model.Model) bool { + c := b.(*ControllerEvent) + return a.Equals(c) +} + +var _ model.CloneableModel = &ControllerEvent{} +var _ model.ComparableModel = &ControllerEvent{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go new file mode 100644 index 000000000..10247286f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DatapathBindingTable = "Datapath_Binding" + +// DatapathBinding defines an object in Datapath_Binding table +type DatapathBinding struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LoadBalancers []string `ovsdb:"load_balancers"` + TunnelKey int `ovsdb:"tunnel_key"` +} + +func (a *DatapathBinding) GetUUID() string { + return a.UUID +} + +func (a *DatapathBinding) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDatapathBindingExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDatapathBindingExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DatapathBinding) GetLoadBalancers() []string { + return a.LoadBalancers +} + +func copyDatapathBindingLoadBalancers(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalDatapathBindingLoadBalancers(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *DatapathBinding) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *DatapathBinding) DeepCopyInto(b *DatapathBinding) { + *b = *a + b.ExternalIDs = copyDatapathBindingExternalIDs(a.ExternalIDs) + b.LoadBalancers = copyDatapathBindingLoadBalancers(a.LoadBalancers) +} + +func (a *DatapathBinding) DeepCopy() *DatapathBinding { + b := new(DatapathBinding) + a.DeepCopyInto(b) + return b +} + +func (a *DatapathBinding) CloneModelInto(b model.Model) { + c := b.(*DatapathBinding) + a.DeepCopyInto(c) +} + +func (a *DatapathBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DatapathBinding) Equals(b *DatapathBinding) bool { + return a.UUID == b.UUID && + equalDatapathBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDatapathBindingLoadBalancers(a.LoadBalancers, b.LoadBalancers) && + a.TunnelKey == b.TunnelKey +} + +func (a *DatapathBinding) EqualsModel(b model.Model) bool { + c := b.(*DatapathBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &DatapathBinding{} +var _ model.ComparableModel = &DatapathBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go new file mode 100644 index 000000000..e9ec44ce2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go @@ -0,0 +1,82 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPOptionsTable = "DHCP_Options" + +type ( + DHCPOptionsType = string +) + +var ( + DHCPOptionsTypeBool DHCPOptionsType = "bool" + DHCPOptionsTypeUint8 DHCPOptionsType = "uint8" + DHCPOptionsTypeUint16 DHCPOptionsType = "uint16" + DHCPOptionsTypeUint32 DHCPOptionsType = "uint32" + DHCPOptionsTypeIpv4 DHCPOptionsType = "ipv4" + DHCPOptionsTypeStaticRoutes DHCPOptionsType = "static_routes" + DHCPOptionsTypeStr DHCPOptionsType = "str" + DHCPOptionsTypeHostID DHCPOptionsType = "host_id" + DHCPOptionsTypeDomains DHCPOptionsType = "domains" +) + +// DHCPOptions defines an object in DHCP_Options table +type DHCPOptions struct { + UUID string `ovsdb:"_uuid"` + Code int `ovsdb:"code"` + Name string `ovsdb:"name"` + Type DHCPOptionsType `ovsdb:"type"` +} + +func (a *DHCPOptions) GetUUID() string { + return a.UUID +} + +func (a *DHCPOptions) GetCode() int { + return a.Code +} + +func (a *DHCPOptions) GetName() string { + return a.Name +} + +func (a *DHCPOptions) GetType() DHCPOptionsType { + return a.Type +} + +func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) { + *b = *a +} + +func (a *DHCPOptions) DeepCopy() *DHCPOptions { + b := new(DHCPOptions) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPOptions) CloneModelInto(b model.Model) { + c := b.(*DHCPOptions) + a.DeepCopyInto(c) +} + +func (a *DHCPOptions) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPOptions) Equals(b *DHCPOptions) bool { + return a.UUID == b.UUID && + a.Code == b.Code && + a.Name == b.Name && + a.Type == b.Type +} + +func (a *DHCPOptions) EqualsModel(b model.Model) bool { + c := b.(*DHCPOptions) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPOptions{} +var _ model.ComparableModel = &DHCPOptions{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go new file mode 100644 index 000000000..908d1e0ad --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go @@ -0,0 +1,77 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPv6OptionsTable = "DHCPv6_Options" + +type ( + DHCPv6OptionsType = string +) + +var ( + DHCPv6OptionsTypeIpv6 DHCPv6OptionsType = "ipv6" + DHCPv6OptionsTypeStr DHCPv6OptionsType = "str" + DHCPv6OptionsTypeMAC DHCPv6OptionsType = "mac" + DHCPv6OptionsTypeDomain DHCPv6OptionsType = "domain" +) + +// DHCPv6Options defines an object in DHCPv6_Options table +type DHCPv6Options struct { + UUID string `ovsdb:"_uuid"` + Code int `ovsdb:"code"` + Name string `ovsdb:"name"` + Type DHCPv6OptionsType `ovsdb:"type"` +} + +func (a *DHCPv6Options) GetUUID() string { + return a.UUID +} + +func (a *DHCPv6Options) GetCode() int { + return a.Code +} + +func (a *DHCPv6Options) GetName() string { + return a.Name +} + +func (a *DHCPv6Options) GetType() DHCPv6OptionsType { + return a.Type +} + +func (a *DHCPv6Options) DeepCopyInto(b *DHCPv6Options) { + *b = *a +} + +func (a *DHCPv6Options) DeepCopy() *DHCPv6Options { + b := new(DHCPv6Options) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPv6Options) CloneModelInto(b model.Model) { + c := b.(*DHCPv6Options) + a.DeepCopyInto(c) +} + +func (a *DHCPv6Options) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPv6Options) Equals(b *DHCPv6Options) bool { + return a.UUID == b.UUID && + a.Code == b.Code && + a.Name == b.Name && + a.Type == b.Type +} + +func (a *DHCPv6Options) EqualsModel(b model.Model) bool { + c := b.(*DHCPv6Options) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPv6Options{} +var _ model.ComparableModel = &DHCPv6Options{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go new file mode 100644 index 000000000..95c0a52d1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go @@ -0,0 +1,178 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DNSTable = "DNS" + +// DNS defines an object in DNS table +type DNS struct { + UUID string `ovsdb:"_uuid"` + Datapaths []string `ovsdb:"datapaths"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Records map[string]string `ovsdb:"records"` +} + +func (a *DNS) GetUUID() string { + return a.UUID +} + +func (a *DNS) GetDatapaths() []string { + return a.Datapaths +} + +func copyDNSDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalDNSDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *DNS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDNSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetOptions() map[string]string { + return a.Options +} + +func copyDNSOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetRecords() map[string]string { + return a.Records +} + +func copyDNSRecords(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSRecords(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) DeepCopyInto(b *DNS) { + *b = *a + b.Datapaths = copyDNSDatapaths(a.Datapaths) + b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs) + b.Options = copyDNSOptions(a.Options) + b.Records = copyDNSRecords(a.Records) +} + +func (a *DNS) DeepCopy() *DNS { + b := new(DNS) + a.DeepCopyInto(b) + return b +} + +func (a *DNS) CloneModelInto(b model.Model) { + c := b.(*DNS) + a.DeepCopyInto(c) +} + +func (a *DNS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DNS) Equals(b *DNS) bool { + return a.UUID == b.UUID && + equalDNSDatapaths(a.Datapaths, b.Datapaths) && + equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDNSOptions(a.Options, b.Options) && + equalDNSRecords(a.Records, b.Records) +} + +func (a *DNS) EqualsModel(b model.Model) bool { + c := b.(*DNS) + return a.Equals(c) +} + +var _ model.CloneableModel = &DNS{} +var _ model.ComparableModel = &DNS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go new file mode 100644 index 000000000..9a2f17fba --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go @@ -0,0 +1,109 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const EncapTable = "Encap" + +type ( + EncapType = string +) + +var ( + EncapTypeGeneve EncapType = "geneve" + EncapTypeSTT EncapType = "stt" + EncapTypeVxlan EncapType = "vxlan" +) + +// Encap defines an object in Encap table +type Encap struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + IP string `ovsdb:"ip"` + Options map[string]string `ovsdb:"options"` + Type EncapType `ovsdb:"type"` +} + +func (a *Encap) GetUUID() string { + return a.UUID +} + +func (a *Encap) GetChassisName() string { + return a.ChassisName +} + +func (a *Encap) GetIP() string { + return a.IP +} + +func (a *Encap) GetOptions() map[string]string { + return a.Options +} + +func copyEncapOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalEncapOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Encap) GetType() EncapType { + return a.Type +} + +func (a *Encap) DeepCopyInto(b *Encap) { + *b = *a + b.Options = copyEncapOptions(a.Options) +} + +func (a *Encap) DeepCopy() *Encap { + b := new(Encap) + a.DeepCopyInto(b) + return b +} + +func (a *Encap) CloneModelInto(b model.Model) { + c := b.(*Encap) + a.DeepCopyInto(c) +} + +func (a *Encap) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Encap) Equals(b *Encap) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + a.IP == b.IP && + equalEncapOptions(a.Options, b.Options) && + a.Type == b.Type +} + +func (a *Encap) EqualsModel(b model.Model) bool { + c := b.(*Encap) + return a.Equals(c) +} + +var _ model.CloneableModel = &Encap{} +var _ model.ComparableModel = &Encap{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go new file mode 100644 index 000000000..8253e7059 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go @@ -0,0 +1,72 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const FDBTable = "FDB" + +// FDB defines an object in FDB table +type FDB struct { + UUID string `ovsdb:"_uuid"` + DpKey int `ovsdb:"dp_key"` + MAC string `ovsdb:"mac"` + PortKey int `ovsdb:"port_key"` + Timestamp int `ovsdb:"timestamp"` +} + +func (a *FDB) GetUUID() string { + return a.UUID +} + +func (a *FDB) GetDpKey() int { + return a.DpKey +} + +func (a *FDB) GetMAC() string { + return a.MAC +} + +func (a *FDB) GetPortKey() int { + return a.PortKey +} + +func (a *FDB) GetTimestamp() int { + return a.Timestamp +} + +func (a *FDB) DeepCopyInto(b *FDB) { + *b = *a +} + +func (a *FDB) DeepCopy() *FDB { + b := new(FDB) + a.DeepCopyInto(b) + return b +} + +func (a *FDB) CloneModelInto(b model.Model) { + c := b.(*FDB) + a.DeepCopyInto(c) +} + +func (a *FDB) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FDB) Equals(b *FDB) bool { + return a.UUID == b.UUID && + a.DpKey == b.DpKey && + a.MAC == b.MAC && + a.PortKey == b.PortKey && + a.Timestamp == b.Timestamp +} + +func (a *FDB) EqualsModel(b model.Model) bool { + c := b.(*FDB) + return a.Equals(c) +} + +var _ model.CloneableModel = &FDB{} +var _ model.ComparableModel = &FDB{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go new file mode 100644 index 000000000..a84ad7fc4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go @@ -0,0 +1,151 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const GatewayChassisTable = "Gateway_Chassis" + +// GatewayChassis defines an object in Gateway_Chassis table +type GatewayChassis struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *GatewayChassis) GetUUID() string { + return a.UUID +} + +func (a *GatewayChassis) GetChassis() *string { + return a.Chassis +} + +func copyGatewayChassisChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalGatewayChassisChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *GatewayChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyGatewayChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetName() string { + return a.Name +} + +func (a *GatewayChassis) GetOptions() map[string]string { + return a.Options +} + +func copyGatewayChassisOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetPriority() int { + return a.Priority +} + +func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) { + *b = *a + b.Chassis = copyGatewayChassisChassis(a.Chassis) + b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs) + b.Options = copyGatewayChassisOptions(a.Options) +} + +func (a *GatewayChassis) DeepCopy() *GatewayChassis { + b := new(GatewayChassis) + a.DeepCopyInto(b) + return b +} + +func (a *GatewayChassis) CloneModelInto(b model.Model) { + c := b.(*GatewayChassis) + a.DeepCopyInto(c) +} + +func (a *GatewayChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *GatewayChassis) Equals(b *GatewayChassis) bool { + return a.UUID == b.UUID && + equalGatewayChassisChassis(a.Chassis, b.Chassis) && + equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalGatewayChassisOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *GatewayChassis) EqualsModel(b model.Model) bool { + c := b.(*GatewayChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &GatewayChassis{} +var _ model.ComparableModel = &GatewayChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go new file mode 100644 index 000000000..6507d071e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go @@ -0,0 +1,3 @@ +package sbdb + +//go:generate modelgen --extended -p sbdb -o . ovn-sb.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go new file mode 100644 index 000000000..b0b3cebbb --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go @@ -0,0 +1,112 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisTable = "HA_Chassis" + +// HAChassis defines an object in HA_Chassis table +type HAChassis struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Priority int `ovsdb:"priority"` +} + +func (a *HAChassis) GetUUID() string { + return a.UUID +} + +func (a *HAChassis) GetChassis() *string { + return a.Chassis +} + +func copyHAChassisChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalHAChassisChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *HAChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassis) GetPriority() int { + return a.Priority +} + +func (a *HAChassis) DeepCopyInto(b *HAChassis) { + *b = *a + b.Chassis = copyHAChassisChassis(a.Chassis) + b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs) +} + +func (a *HAChassis) DeepCopy() *HAChassis { + b := new(HAChassis) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassis) CloneModelInto(b model.Model) { + c := b.(*HAChassis) + a.DeepCopyInto(c) +} + +func (a *HAChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassis) Equals(b *HAChassis) bool { + return a.UUID == b.UUID && + equalHAChassisChassis(a.Chassis, b.Chassis) && + equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Priority == b.Priority +} + +func (a *HAChassis) EqualsModel(b model.Model) bool { + c := b.(*HAChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassis{} +var _ model.ComparableModel = &HAChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go new file mode 100644 index 000000000..1cc013c70 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go @@ -0,0 +1,149 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisGroupTable = "HA_Chassis_Group" + +// HAChassisGroup defines an object in HA_Chassis_Group table +type HAChassisGroup struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassis []string `ovsdb:"ha_chassis"` + Name string `ovsdb:"name"` + RefChassis []string `ovsdb:"ref_chassis"` +} + +func (a *HAChassisGroup) GetUUID() string { + return a.UUID +} + +func (a *HAChassisGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetHaChassis() []string { + return a.HaChassis +} + +func copyHAChassisGroupHaChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupHaChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetName() string { + return a.Name +} + +func (a *HAChassisGroup) GetRefChassis() []string { + return a.RefChassis +} + +func copyHAChassisGroupRefChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupRefChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) { + *b = *a + b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs) + b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis) + b.RefChassis = copyHAChassisGroupRefChassis(a.RefChassis) +} + +func (a *HAChassisGroup) DeepCopy() *HAChassisGroup { + b := new(HAChassisGroup) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassisGroup) CloneModelInto(b model.Model) { + c := b.(*HAChassisGroup) + a.DeepCopyInto(c) +} + +func (a *HAChassisGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool { + return a.UUID == b.UUID && + equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) && + a.Name == b.Name && + equalHAChassisGroupRefChassis(a.RefChassis, b.RefChassis) +} + +func (a *HAChassisGroup) EqualsModel(b model.Model) bool { + c := b.(*HAChassisGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassisGroup{} +var _ model.ComparableModel = &HAChassisGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go new file mode 100644 index 000000000..73a0bb943 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go @@ -0,0 +1,147 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const IGMPGroupTable = "IGMP_Group" + +// IGMPGroup defines an object in IGMP_Group table +type IGMPGroup struct { + UUID string `ovsdb:"_uuid"` + Address string `ovsdb:"address"` + Chassis *string `ovsdb:"chassis"` + ChassisName string `ovsdb:"chassis_name"` + Datapath *string `ovsdb:"datapath"` + Ports []string `ovsdb:"ports"` + Protocol string `ovsdb:"protocol"` +} + +func (a *IGMPGroup) GetUUID() string { + return a.UUID +} + +func (a *IGMPGroup) GetAddress() string { + return a.Address +} + +func (a *IGMPGroup) GetChassis() *string { + return a.Chassis +} + +func copyIGMPGroupChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIGMPGroupChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IGMPGroup) GetChassisName() string { + return a.ChassisName +} + +func (a *IGMPGroup) GetDatapath() *string { + return a.Datapath +} + +func copyIGMPGroupDatapath(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIGMPGroupDatapath(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IGMPGroup) GetPorts() []string { + return a.Ports +} + +func copyIGMPGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalIGMPGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *IGMPGroup) GetProtocol() string { + return a.Protocol +} + +func (a *IGMPGroup) DeepCopyInto(b *IGMPGroup) { + *b = *a + b.Chassis = copyIGMPGroupChassis(a.Chassis) + b.Datapath = copyIGMPGroupDatapath(a.Datapath) + b.Ports = copyIGMPGroupPorts(a.Ports) +} + +func (a *IGMPGroup) DeepCopy() *IGMPGroup { + b := new(IGMPGroup) + a.DeepCopyInto(b) + return b +} + +func (a *IGMPGroup) CloneModelInto(b model.Model) { + c := b.(*IGMPGroup) + a.DeepCopyInto(c) +} + +func (a *IGMPGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *IGMPGroup) Equals(b *IGMPGroup) bool { + return a.UUID == b.UUID && + a.Address == b.Address && + equalIGMPGroupChassis(a.Chassis, b.Chassis) && + a.ChassisName == b.ChassisName && + equalIGMPGroupDatapath(a.Datapath, b.Datapath) && + equalIGMPGroupPorts(a.Ports, b.Ports) && + a.Protocol == b.Protocol +} + +func (a *IGMPGroup) EqualsModel(b model.Model) bool { + c := b.(*IGMPGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &IGMPGroup{} +var _ model.ComparableModel = &IGMPGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go new file mode 100644 index 000000000..493cd342d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go @@ -0,0 +1,228 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const IPMulticastTable = "IP_Multicast" + +// IPMulticast defines an object in IP_Multicast table +type IPMulticast struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + Enabled *bool `ovsdb:"enabled"` + EthSrc string `ovsdb:"eth_src"` + IdleTimeout *int `ovsdb:"idle_timeout"` + Ip4Src string `ovsdb:"ip4_src"` + Ip6Src string `ovsdb:"ip6_src"` + Querier *bool `ovsdb:"querier"` + QueryInterval *int `ovsdb:"query_interval"` + QueryMaxResp *int `ovsdb:"query_max_resp"` + SeqNo int `ovsdb:"seq_no"` + TableSize *int `ovsdb:"table_size"` +} + +func (a *IPMulticast) GetUUID() string { + return a.UUID +} + +func (a *IPMulticast) GetDatapath() string { + return a.Datapath +} + +func (a *IPMulticast) GetEnabled() *bool { + return a.Enabled +} + +func copyIPMulticastEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetEthSrc() string { + return a.EthSrc +} + +func (a *IPMulticast) GetIdleTimeout() *int { + return a.IdleTimeout +} + +func copyIPMulticastIdleTimeout(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastIdleTimeout(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetIp4Src() string { + return a.Ip4Src +} + +func (a *IPMulticast) GetIp6Src() string { + return a.Ip6Src +} + +func (a *IPMulticast) GetQuerier() *bool { + return a.Querier +} + +func copyIPMulticastQuerier(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQuerier(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetQueryInterval() *int { + return a.QueryInterval +} + +func copyIPMulticastQueryInterval(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQueryInterval(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetQueryMaxResp() *int { + return a.QueryMaxResp +} + +func copyIPMulticastQueryMaxResp(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQueryMaxResp(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetSeqNo() int { + return a.SeqNo +} + +func (a *IPMulticast) GetTableSize() *int { + return a.TableSize +} + +func copyIPMulticastTableSize(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastTableSize(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) DeepCopyInto(b *IPMulticast) { + *b = *a + b.Enabled = copyIPMulticastEnabled(a.Enabled) + b.IdleTimeout = copyIPMulticastIdleTimeout(a.IdleTimeout) + b.Querier = copyIPMulticastQuerier(a.Querier) + b.QueryInterval = copyIPMulticastQueryInterval(a.QueryInterval) + b.QueryMaxResp = copyIPMulticastQueryMaxResp(a.QueryMaxResp) + b.TableSize = copyIPMulticastTableSize(a.TableSize) +} + +func (a *IPMulticast) DeepCopy() *IPMulticast { + b := new(IPMulticast) + a.DeepCopyInto(b) + return b +} + +func (a *IPMulticast) CloneModelInto(b model.Model) { + c := b.(*IPMulticast) + a.DeepCopyInto(c) +} + +func (a *IPMulticast) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *IPMulticast) Equals(b *IPMulticast) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalIPMulticastEnabled(a.Enabled, b.Enabled) && + a.EthSrc == b.EthSrc && + equalIPMulticastIdleTimeout(a.IdleTimeout, b.IdleTimeout) && + a.Ip4Src == b.Ip4Src && + a.Ip6Src == b.Ip6Src && + equalIPMulticastQuerier(a.Querier, b.Querier) && + equalIPMulticastQueryInterval(a.QueryInterval, b.QueryInterval) && + equalIPMulticastQueryMaxResp(a.QueryMaxResp, b.QueryMaxResp) && + a.SeqNo == b.SeqNo && + equalIPMulticastTableSize(a.TableSize, b.TableSize) +} + +func (a *IPMulticast) EqualsModel(b model.Model) bool { + c := b.(*IPMulticast) + return a.Equals(c) +} + +var _ model.CloneableModel = &IPMulticast{} +var _ model.ComparableModel = &IPMulticast{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go new file mode 100644 index 000000000..bc341807e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go @@ -0,0 +1,294 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerTable = "Load_Balancer" + +type ( + LoadBalancerProtocol = string +) + +var ( + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" +) + +// LoadBalancer defines an object in Load_Balancer table +type LoadBalancer struct { + UUID string `ovsdb:"_uuid"` + DatapathGroup *string `ovsdb:"datapath_group"` + Datapaths []string `ovsdb:"datapaths"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LrDatapathGroup *string `ovsdb:"lr_datapath_group"` + LsDatapathGroup *string `ovsdb:"ls_datapath_group"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Protocol *LoadBalancerProtocol `ovsdb:"protocol"` + Vips map[string]string `ovsdb:"vips"` +} + +func (a *LoadBalancer) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancer) GetDatapathGroup() *string { + return a.DatapathGroup +} + +func copyLoadBalancerDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetDatapaths() []string { + return a.Datapaths +} + +func copyLoadBalancerDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetLrDatapathGroup() *string { + return a.LrDatapathGroup +} + +func copyLoadBalancerLrDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerLrDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetLsDatapathGroup() *string { + return a.LsDatapathGroup +} + +func copyLoadBalancerLsDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerLsDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetName() string { + return a.Name +} + +func (a *LoadBalancer) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol { + return a.Protocol +} + +func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetVips() map[string]string { + return a.Vips +} + +func copyLoadBalancerVips(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerVips(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) { + *b = *a + b.DatapathGroup = copyLoadBalancerDatapathGroup(a.DatapathGroup) + b.Datapaths = copyLoadBalancerDatapaths(a.Datapaths) + b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs) + b.LrDatapathGroup = copyLoadBalancerLrDatapathGroup(a.LrDatapathGroup) + b.LsDatapathGroup = copyLoadBalancerLsDatapathGroup(a.LsDatapathGroup) + b.Options = copyLoadBalancerOptions(a.Options) + b.Protocol = copyLoadBalancerProtocol(a.Protocol) + b.Vips = copyLoadBalancerVips(a.Vips) +} + +func (a *LoadBalancer) DeepCopy() *LoadBalancer { + b := new(LoadBalancer) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancer) CloneModelInto(b model.Model) { + c := b.(*LoadBalancer) + a.DeepCopyInto(c) +} + +func (a *LoadBalancer) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancer) Equals(b *LoadBalancer) bool { + return a.UUID == b.UUID && + equalLoadBalancerDatapathGroup(a.DatapathGroup, b.DatapathGroup) && + equalLoadBalancerDatapaths(a.Datapaths, b.Datapaths) && + equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerLrDatapathGroup(a.LrDatapathGroup, b.LrDatapathGroup) && + equalLoadBalancerLsDatapathGroup(a.LsDatapathGroup, b.LsDatapathGroup) && + a.Name == b.Name && + equalLoadBalancerOptions(a.Options, b.Options) && + equalLoadBalancerProtocol(a.Protocol, b.Protocol) && + equalLoadBalancerVips(a.Vips, b.Vips) +} + +func (a *LoadBalancer) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancer) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancer{} +var _ model.ComparableModel = &LoadBalancer{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go new file mode 100644 index 000000000..911de2eed --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go @@ -0,0 +1,79 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalDPGroupTable = "Logical_DP_Group" + +// LogicalDPGroup defines an object in Logical_DP_Group table +type LogicalDPGroup struct { + UUID string `ovsdb:"_uuid"` + Datapaths []string `ovsdb:"datapaths"` +} + +func (a *LogicalDPGroup) GetUUID() string { + return a.UUID +} + +func (a *LogicalDPGroup) GetDatapaths() []string { + return a.Datapaths +} + +func copyLogicalDPGroupDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalDPGroupDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalDPGroup) DeepCopyInto(b *LogicalDPGroup) { + *b = *a + b.Datapaths = copyLogicalDPGroupDatapaths(a.Datapaths) +} + +func (a *LogicalDPGroup) DeepCopy() *LogicalDPGroup { + b := new(LogicalDPGroup) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalDPGroup) CloneModelInto(b model.Model) { + c := b.(*LogicalDPGroup) + a.DeepCopyInto(c) +} + +func (a *LogicalDPGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalDPGroup) Equals(b *LogicalDPGroup) bool { + return a.UUID == b.UUID && + equalLogicalDPGroupDatapaths(a.Datapaths, b.Datapaths) +} + +func (a *LogicalDPGroup) EqualsModel(b model.Model) bool { + c := b.(*LogicalDPGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalDPGroup{} +var _ model.ComparableModel = &LogicalDPGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go new file mode 100644 index 000000000..42af1cdf5 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go @@ -0,0 +1,253 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalFlowTable = "Logical_Flow" + +type ( + LogicalFlowPipeline = string +) + +var ( + LogicalFlowPipelineIngress LogicalFlowPipeline = "ingress" + LogicalFlowPipelineEgress LogicalFlowPipeline = "egress" +) + +// LogicalFlow defines an object in Logical_Flow table +type LogicalFlow struct { + UUID string `ovsdb:"_uuid"` + Actions string `ovsdb:"actions"` + ControllerMeter *string `ovsdb:"controller_meter"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FlowDesc *string `ovsdb:"flow_desc"` + LogicalDatapath *string `ovsdb:"logical_datapath"` + LogicalDpGroup *string `ovsdb:"logical_dp_group"` + Match string `ovsdb:"match"` + Pipeline LogicalFlowPipeline `ovsdb:"pipeline"` + Priority int `ovsdb:"priority"` + TableID int `ovsdb:"table_id"` + Tags map[string]string `ovsdb:"tags"` +} + +func (a *LogicalFlow) GetUUID() string { + return a.UUID +} + +func (a *LogicalFlow) GetActions() string { + return a.Actions +} + +func (a *LogicalFlow) GetControllerMeter() *string { + return a.ControllerMeter +} + +func copyLogicalFlowControllerMeter(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowControllerMeter(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalFlowExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalFlowExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalFlow) GetFlowDesc() *string { + return a.FlowDesc +} + +func copyLogicalFlowFlowDesc(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowFlowDesc(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetLogicalDatapath() *string { + return a.LogicalDatapath +} + +func copyLogicalFlowLogicalDatapath(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowLogicalDatapath(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetLogicalDpGroup() *string { + return a.LogicalDpGroup +} + +func copyLogicalFlowLogicalDpGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowLogicalDpGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetMatch() string { + return a.Match +} + +func (a *LogicalFlow) GetPipeline() LogicalFlowPipeline { + return a.Pipeline +} + +func (a *LogicalFlow) GetPriority() int { + return a.Priority +} + +func (a *LogicalFlow) GetTableID() int { + return a.TableID +} + +func (a *LogicalFlow) GetTags() map[string]string { + return a.Tags +} + +func copyLogicalFlowTags(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalFlowTags(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalFlow) DeepCopyInto(b *LogicalFlow) { + *b = *a + b.ControllerMeter = copyLogicalFlowControllerMeter(a.ControllerMeter) + b.ExternalIDs = copyLogicalFlowExternalIDs(a.ExternalIDs) + b.FlowDesc = copyLogicalFlowFlowDesc(a.FlowDesc) + b.LogicalDatapath = copyLogicalFlowLogicalDatapath(a.LogicalDatapath) + b.LogicalDpGroup = copyLogicalFlowLogicalDpGroup(a.LogicalDpGroup) + b.Tags = copyLogicalFlowTags(a.Tags) +} + +func (a *LogicalFlow) DeepCopy() *LogicalFlow { + b := new(LogicalFlow) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalFlow) CloneModelInto(b model.Model) { + c := b.(*LogicalFlow) + a.DeepCopyInto(c) +} + +func (a *LogicalFlow) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalFlow) Equals(b *LogicalFlow) bool { + return a.UUID == b.UUID && + a.Actions == b.Actions && + equalLogicalFlowControllerMeter(a.ControllerMeter, b.ControllerMeter) && + equalLogicalFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalFlowFlowDesc(a.FlowDesc, b.FlowDesc) && + equalLogicalFlowLogicalDatapath(a.LogicalDatapath, b.LogicalDatapath) && + equalLogicalFlowLogicalDpGroup(a.LogicalDpGroup, b.LogicalDpGroup) && + a.Match == b.Match && + a.Pipeline == b.Pipeline && + a.Priority == b.Priority && + a.TableID == b.TableID && + equalLogicalFlowTags(a.Tags, b.Tags) +} + +func (a *LogicalFlow) EqualsModel(b model.Model) bool { + c := b.(*LogicalFlow) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalFlow{} +var _ model.ComparableModel = &LogicalFlow{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go new file mode 100644 index 000000000..705431f1d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go @@ -0,0 +1,78 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MACBindingTable = "MAC_Binding" + +// MACBinding defines an object in MAC_Binding table +type MACBinding struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + Timestamp int `ovsdb:"timestamp"` +} + +func (a *MACBinding) GetUUID() string { + return a.UUID +} + +func (a *MACBinding) GetDatapath() string { + return a.Datapath +} + +func (a *MACBinding) GetIP() string { + return a.IP +} + +func (a *MACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *MACBinding) GetMAC() string { + return a.MAC +} + +func (a *MACBinding) GetTimestamp() int { + return a.Timestamp +} + +func (a *MACBinding) DeepCopyInto(b *MACBinding) { + *b = *a +} + +func (a *MACBinding) DeepCopy() *MACBinding { + b := new(MACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *MACBinding) CloneModelInto(b model.Model) { + c := b.(*MACBinding) + a.DeepCopyInto(c) +} + +func (a *MACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MACBinding) Equals(b *MACBinding) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.Timestamp == b.Timestamp +} + +func (a *MACBinding) EqualsModel(b model.Model) bool { + c := b.(*MACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &MACBinding{} +var _ model.ComparableModel = &MACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go new file mode 100644 index 000000000..95c4daec2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go @@ -0,0 +1,100 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterTable = "Meter" + +type ( + MeterUnit = string +) + +var ( + MeterUnitKbps MeterUnit = "kbps" + MeterUnitPktps MeterUnit = "pktps" +) + +// Meter defines an object in Meter table +type Meter struct { + UUID string `ovsdb:"_uuid"` + Bands []string `ovsdb:"bands"` + Name string `ovsdb:"name"` + Unit MeterUnit `ovsdb:"unit"` +} + +func (a *Meter) GetUUID() string { + return a.UUID +} + +func (a *Meter) GetBands() []string { + return a.Bands +} + +func copyMeterBands(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMeterBands(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Meter) GetName() string { + return a.Name +} + +func (a *Meter) GetUnit() MeterUnit { + return a.Unit +} + +func (a *Meter) DeepCopyInto(b *Meter) { + *b = *a + b.Bands = copyMeterBands(a.Bands) +} + +func (a *Meter) DeepCopy() *Meter { + b := new(Meter) + a.DeepCopyInto(b) + return b +} + +func (a *Meter) CloneModelInto(b model.Model) { + c := b.(*Meter) + a.DeepCopyInto(c) +} + +func (a *Meter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Meter) Equals(b *Meter) bool { + return a.UUID == b.UUID && + equalMeterBands(a.Bands, b.Bands) && + a.Name == b.Name && + a.Unit == b.Unit +} + +func (a *Meter) EqualsModel(b model.Model) bool { + c := b.(*Meter) + return a.Equals(c) +} + +var _ model.CloneableModel = &Meter{} +var _ model.ComparableModel = &Meter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go new file mode 100644 index 000000000..addb01b64 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go @@ -0,0 +1,74 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterBandTable = "Meter_Band" + +type ( + MeterBandAction = string +) + +var ( + MeterBandActionDrop MeterBandAction = "drop" +) + +// MeterBand defines an object in Meter_Band table +type MeterBand struct { + UUID string `ovsdb:"_uuid"` + Action MeterBandAction `ovsdb:"action"` + BurstSize int `ovsdb:"burst_size"` + Rate int `ovsdb:"rate"` +} + +func (a *MeterBand) GetUUID() string { + return a.UUID +} + +func (a *MeterBand) GetAction() MeterBandAction { + return a.Action +} + +func (a *MeterBand) GetBurstSize() int { + return a.BurstSize +} + +func (a *MeterBand) GetRate() int { + return a.Rate +} + +func (a *MeterBand) DeepCopyInto(b *MeterBand) { + *b = *a +} + +func (a *MeterBand) DeepCopy() *MeterBand { + b := new(MeterBand) + a.DeepCopyInto(b) + return b +} + +func (a *MeterBand) CloneModelInto(b model.Model) { + c := b.(*MeterBand) + a.DeepCopyInto(c) +} + +func (a *MeterBand) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MeterBand) Equals(b *MeterBand) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.BurstSize == b.BurstSize && + a.Rate == b.Rate +} + +func (a *MeterBand) EqualsModel(b model.Model) bool { + c := b.(*MeterBand) + return a.Equals(c) +} + +var _ model.CloneableModel = &MeterBand{} +var _ model.ComparableModel = &MeterBand{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go new file mode 100644 index 000000000..69444ea73 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go @@ -0,0 +1,125 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MirrorTable = "Mirror" + +type ( + MirrorFilter = string + MirrorType = string +) + +var ( + MirrorFilterFromLport MirrorFilter = "from-lport" + MirrorFilterToLport MirrorFilter = "to-lport" + MirrorFilterBoth MirrorFilter = "both" + MirrorTypeGre MirrorType = "gre" + MirrorTypeErspan MirrorType = "erspan" + MirrorTypeLocal MirrorType = "local" +) + +// Mirror defines an object in Mirror table +type Mirror struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Filter MirrorFilter `ovsdb:"filter"` + Index int `ovsdb:"index"` + Name string `ovsdb:"name"` + Sink string `ovsdb:"sink"` + Type MirrorType `ovsdb:"type"` +} + +func (a *Mirror) GetUUID() string { + return a.UUID +} + +func (a *Mirror) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMirrorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) GetFilter() MirrorFilter { + return a.Filter +} + +func (a *Mirror) GetIndex() int { + return a.Index +} + +func (a *Mirror) GetName() string { + return a.Name +} + +func (a *Mirror) GetSink() string { + return a.Sink +} + +func (a *Mirror) GetType() MirrorType { + return a.Type +} + +func (a *Mirror) DeepCopyInto(b *Mirror) { + *b = *a + b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs) +} + +func (a *Mirror) DeepCopy() *Mirror { + b := new(Mirror) + a.DeepCopyInto(b) + return b +} + +func (a *Mirror) CloneModelInto(b model.Model) { + c := b.(*Mirror) + a.DeepCopyInto(c) +} + +func (a *Mirror) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Mirror) Equals(b *Mirror) bool { + return a.UUID == b.UUID && + equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Filter == b.Filter && + a.Index == b.Index && + a.Name == b.Name && + a.Sink == b.Sink && + a.Type == b.Type +} + +func (a *Mirror) EqualsModel(b model.Model) bool { + c := b.(*Mirror) + return a.Equals(c) +} + +var _ model.CloneableModel = &Mirror{} +var _ model.ComparableModel = &Mirror{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go new file mode 100644 index 000000000..bc838fe49 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go @@ -0,0 +1,1884 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("OVN_Southbound", map[string]model.Model{ + "Address_Set": &AddressSet{}, + "BFD": &BFD{}, + "Chassis": &Chassis{}, + "Chassis_Private": &ChassisPrivate{}, + "Chassis_Template_Var": &ChassisTemplateVar{}, + "Connection": &Connection{}, + "Controller_Event": &ControllerEvent{}, + "DHCP_Options": &DHCPOptions{}, + "DHCPv6_Options": &DHCPv6Options{}, + "DNS": &DNS{}, + "Datapath_Binding": &DatapathBinding{}, + "Encap": &Encap{}, + "FDB": &FDB{}, + "Gateway_Chassis": &GatewayChassis{}, + "HA_Chassis": &HAChassis{}, + "HA_Chassis_Group": &HAChassisGroup{}, + "IGMP_Group": &IGMPGroup{}, + "IP_Multicast": &IPMulticast{}, + "Load_Balancer": &LoadBalancer{}, + "Logical_DP_Group": &LogicalDPGroup{}, + "Logical_Flow": &LogicalFlow{}, + "MAC_Binding": &MACBinding{}, + "Meter": &Meter{}, + "Meter_Band": &MeterBand{}, + "Mirror": &Mirror{}, + "Multicast_Group": &MulticastGroup{}, + "Port_Binding": &PortBinding{}, + "Port_Group": &PortGroup{}, + "RBAC_Permission": &RBACPermission{}, + "RBAC_Role": &RBACRole{}, + "SB_Global": &SBGlobal{}, + "SSL": &SSL{}, + "Service_Monitor": &ServiceMonitor{}, + "Static_MAC_Binding": &StaticMACBinding{}, + }) +} + +var schema = `{ + "name": "OVN_Southbound", + "version": "20.37.0", + "tables": { + "Address_Set": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "BFD": { + "columns": { + "chassis_name": { + "type": "string" + }, + "detect_mult": { + "type": "integer" + }, + "disc": { + "type": "integer" + }, + "dst_ip": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "logical_port": { + "type": "string" + }, + "min_rx": { + "type": "integer" + }, + "min_tx": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "src_port": { + "type": { + "key": { + "type": "integer", + "minInteger": 49152, + "maxInteger": 65535 + } + } + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "down", + "init", + "up", + "admin_down" + ] + ] + } + } + } + }, + "indexes": [ + [ + "logical_port", + "dst_ip", + "src_port", + "disc" + ] + ], + "isRoot": true + }, + "Chassis": { + "columns": { + "encaps": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "hostname": { + "type": "string" + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "transport_zones": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "vtep_logical_switches": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Chassis_Private": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "nb_cfg_timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Chassis_Template_Var": { + "columns": { + "chassis": { + "type": "string" + }, + "variables": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "chassis" + ] + ], + "isRoot": true + }, + "Connection": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "read_only": { + "type": "boolean" + }, + "role": { + "type": "string" + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + } + }, + "indexes": [ + [ + "target" + ] + ] + }, + "Controller_Event": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "event_info": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "event_type": { + "type": { + "key": { + "type": "string", + "enum": "empty_lb_backends" + } + } + }, + "seq_num": { + "type": "integer" + } + }, + "isRoot": true + }, + "DHCP_Options": { + "columns": { + "code": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 254 + } + } + }, + "name": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "bool", + "uint8", + "uint16", + "uint32", + "ipv4", + "static_routes", + "str", + "host_id", + "domains" + ] + ] + } + } + } + }, + "isRoot": true + }, + "DHCPv6_Options": { + "columns": { + "code": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 254 + } + } + }, + "name": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "ipv6", + "str", + "mac", + "domain" + ] + ] + } + } + } + }, + "isRoot": true + }, + "DNS": { + "columns": { + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "records": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Datapath_Binding": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancers": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": "unlimited" + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + } + }, + "indexes": [ + [ + "tunnel_key" + ] + ], + "isRoot": true + }, + "Encap": { + "columns": { + "chassis_name": { + "type": "string" + }, + "ip": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "geneve", + "stt", + "vxlan" + ] + ] + } + } + } + }, + "indexes": [ + [ + "type", + "ip" + ] + ] + }, + "FDB": { + "columns": { + "dp_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + }, + "mac": { + "type": "string" + }, + "port_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + }, + "timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "mac", + "dp_key" + ] + ], + "isRoot": true + }, + "Gateway_Chassis": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "HA_Chassis": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "HA_Chassis_Group": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "ref_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "IGMP_Group": { + "columns": { + "address": { + "type": "string" + }, + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "chassis_name": { + "type": "string" + }, + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": "string" + } + }, + "indexes": [ + [ + "address", + "datapath", + "chassis" + ] + ], + "isRoot": true + }, + "IP_Multicast": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + } + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "eth_src": { + "type": "string" + }, + "idle_timeout": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "ip4_src": { + "type": "string" + }, + "ip6_src": { + "type": "string" + }, + "querier": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "query_interval": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "query_max_resp": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "seq_no": { + "type": "integer" + }, + "table_size": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath" + ] + ], + "isRoot": true + }, + "Load_Balancer": { + "columns": { + "datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "lr_datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "ls_datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp", + "sctp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "vips": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_DP_Group": { + "columns": { + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + } + }, + "Logical_Flow": { + "columns": { + "actions": { + "type": "string" + }, + "controller_meter": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "flow_desc": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "logical_datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 0, + "max": 1 + } + }, + "logical_dp_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "match": { + "type": "string" + }, + "pipeline": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "ingress", + "egress" + ] + ] + } + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "table_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32 + } + } + }, + "tags": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "MAC_Binding": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + }, + "Meter": { + "columns": { + "bands": { + "type": { + "key": { + "type": "uuid", + "refTable": "Meter_Band", + "refType": "strong" + }, + "min": 1, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "unit": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "kbps", + "pktps" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Meter_Band": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": "drop" + } + } + }, + "burst_size": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + } + }, + "Mirror": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "filter": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport", + "both" + ] + ] + } + } + }, + "index": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "sink": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "gre", + "erspan", + "local" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Multicast_Group": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 32768, + "maxInteger": 65535 + } + } + } + }, + "indexes": [ + [ + "datapath", + "tunnel_key" + ], + [ + "datapath", + "name" + ] + ], + "isRoot": true + }, + "Port_Binding": { + "columns": { + "additional_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "additional_encap": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "encap": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "gateway_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Gateway_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mirror_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "Mirror", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "nat_addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "parent_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "port_security": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "requested_additional_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "requested_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "tag": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 32767 + } + } + }, + "type": { + "type": "string" + }, + "up": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "virtual_parent": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath", + "tunnel_key" + ], + [ + "logical_port" + ] + ], + "isRoot": true + }, + "Port_Group": { + "columns": { + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "RBAC_Permission": { + "columns": { + "authorization": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "insert_delete": { + "type": "boolean" + }, + "table": { + "type": "string" + }, + "update": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "RBAC_Role": { + "columns": { + "name": { + "type": "string" + }, + "permissions": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "uuid", + "refTable": "RBAC_Permission", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "SB_Global": { + "columns": { + "connections": { + "type": { + "key": { + "type": "uuid", + "refTable": "Connection" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ipsec": { + "type": "boolean" + }, + "nb_cfg": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ssl": { + "type": { + "key": { + "type": "uuid", + "refTable": "SSL" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "SSL": { + "columns": { + "bootstrap_ca_cert": { + "type": "boolean" + }, + "ca_cert": { + "type": "string" + }, + "certificate": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "private_key": { + "type": "string" + }, + "ssl_ciphers": { + "type": "string" + }, + "ssl_protocols": { + "type": "string" + } + } + }, + "Service_Monitor": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "port": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "src_ip": { + "type": "string" + }, + "src_mac": { + "type": "string" + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "online", + "offline", + "error" + ] + ] + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "logical_port", + "ip", + "port", + "protocol" + ] + ], + "isRoot": true + }, + "Static_MAC_Binding": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "override_dynamic_mac": { + "type": "boolean" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go new file mode 100644 index 000000000..1af933ea6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go @@ -0,0 +1,97 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MulticastGroupTable = "Multicast_Group" + +// MulticastGroup defines an object in Multicast_Group table +type MulticastGroup struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + TunnelKey int `ovsdb:"tunnel_key"` +} + +func (a *MulticastGroup) GetUUID() string { + return a.UUID +} + +func (a *MulticastGroup) GetDatapath() string { + return a.Datapath +} + +func (a *MulticastGroup) GetName() string { + return a.Name +} + +func (a *MulticastGroup) GetPorts() []string { + return a.Ports +} + +func copyMulticastGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMulticastGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *MulticastGroup) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *MulticastGroup) DeepCopyInto(b *MulticastGroup) { + *b = *a + b.Ports = copyMulticastGroupPorts(a.Ports) +} + +func (a *MulticastGroup) DeepCopy() *MulticastGroup { + b := new(MulticastGroup) + a.DeepCopyInto(b) + return b +} + +func (a *MulticastGroup) CloneModelInto(b model.Model) { + c := b.(*MulticastGroup) + a.DeepCopyInto(c) +} + +func (a *MulticastGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MulticastGroup) Equals(b *MulticastGroup) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.Name == b.Name && + equalMulticastGroupPorts(a.Ports, b.Ports) && + a.TunnelKey == b.TunnelKey +} + +func (a *MulticastGroup) EqualsModel(b model.Model) bool { + c := b.(*MulticastGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &MulticastGroup{} +var _ model.ComparableModel = &MulticastGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go new file mode 100644 index 000000000..b3d30f843 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go @@ -0,0 +1,586 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortBindingTable = "Port_Binding" + +// PortBinding defines an object in Port_Binding table +type PortBinding struct { + UUID string `ovsdb:"_uuid"` + AdditionalChassis []string `ovsdb:"additional_chassis"` + AdditionalEncap []string `ovsdb:"additional_encap"` + Chassis *string `ovsdb:"chassis"` + Datapath string `ovsdb:"datapath"` + Encap *string `ovsdb:"encap"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + GatewayChassis []string `ovsdb:"gateway_chassis"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + LogicalPort string `ovsdb:"logical_port"` + MAC []string `ovsdb:"mac"` + MirrorRules []string `ovsdb:"mirror_rules"` + NatAddresses []string `ovsdb:"nat_addresses"` + Options map[string]string `ovsdb:"options"` + ParentPort *string `ovsdb:"parent_port"` + PortSecurity []string `ovsdb:"port_security"` + RequestedAdditionalChassis []string `ovsdb:"requested_additional_chassis"` + RequestedChassis *string `ovsdb:"requested_chassis"` + Tag *int `ovsdb:"tag"` + TunnelKey int `ovsdb:"tunnel_key"` + Type string `ovsdb:"type"` + Up *bool `ovsdb:"up"` + VirtualParent *string `ovsdb:"virtual_parent"` +} + +func (a *PortBinding) GetUUID() string { + return a.UUID +} + +func (a *PortBinding) GetAdditionalChassis() []string { + return a.AdditionalChassis +} + +func copyPortBindingAdditionalChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingAdditionalChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetAdditionalEncap() []string { + return a.AdditionalEncap +} + +func copyPortBindingAdditionalEncap(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingAdditionalEncap(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetChassis() *string { + return a.Chassis +} + +func copyPortBindingChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetDatapath() string { + return a.Datapath +} + +func (a *PortBinding) GetEncap() *string { + return a.Encap +} + +func copyPortBindingEncap(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingEncap(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyPortBindingExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortBindingExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortBinding) GetGatewayChassis() []string { + return a.GatewayChassis +} + +func copyPortBindingGatewayChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingGatewayChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyPortBindingHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *PortBinding) GetMAC() []string { + return a.MAC +} + +func copyPortBindingMAC(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingMAC(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetMirrorRules() []string { + return a.MirrorRules +} + +func copyPortBindingMirrorRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingMirrorRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetNatAddresses() []string { + return a.NatAddresses +} + +func copyPortBindingNatAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingNatAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetOptions() map[string]string { + return a.Options +} + +func copyPortBindingOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortBindingOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortBinding) GetParentPort() *string { + return a.ParentPort +} + +func copyPortBindingParentPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingParentPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetPortSecurity() []string { + return a.PortSecurity +} + +func copyPortBindingPortSecurity(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingPortSecurity(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetRequestedAdditionalChassis() []string { + return a.RequestedAdditionalChassis +} + +func copyPortBindingRequestedAdditionalChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingRequestedAdditionalChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetRequestedChassis() *string { + return a.RequestedChassis +} + +func copyPortBindingRequestedChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingRequestedChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetTag() *int { + return a.Tag +} + +func copyPortBindingTag(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingTag(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *PortBinding) GetType() string { + return a.Type +} + +func (a *PortBinding) GetUp() *bool { + return a.Up +} + +func copyPortBindingUp(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingUp(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetVirtualParent() *string { + return a.VirtualParent +} + +func copyPortBindingVirtualParent(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingVirtualParent(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) DeepCopyInto(b *PortBinding) { + *b = *a + b.AdditionalChassis = copyPortBindingAdditionalChassis(a.AdditionalChassis) + b.AdditionalEncap = copyPortBindingAdditionalEncap(a.AdditionalEncap) + b.Chassis = copyPortBindingChassis(a.Chassis) + b.Encap = copyPortBindingEncap(a.Encap) + b.ExternalIDs = copyPortBindingExternalIDs(a.ExternalIDs) + b.GatewayChassis = copyPortBindingGatewayChassis(a.GatewayChassis) + b.HaChassisGroup = copyPortBindingHaChassisGroup(a.HaChassisGroup) + b.MAC = copyPortBindingMAC(a.MAC) + b.MirrorRules = copyPortBindingMirrorRules(a.MirrorRules) + b.NatAddresses = copyPortBindingNatAddresses(a.NatAddresses) + b.Options = copyPortBindingOptions(a.Options) + b.ParentPort = copyPortBindingParentPort(a.ParentPort) + b.PortSecurity = copyPortBindingPortSecurity(a.PortSecurity) + b.RequestedAdditionalChassis = copyPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis) + b.RequestedChassis = copyPortBindingRequestedChassis(a.RequestedChassis) + b.Tag = copyPortBindingTag(a.Tag) + b.Up = copyPortBindingUp(a.Up) + b.VirtualParent = copyPortBindingVirtualParent(a.VirtualParent) +} + +func (a *PortBinding) DeepCopy() *PortBinding { + b := new(PortBinding) + a.DeepCopyInto(b) + return b +} + +func (a *PortBinding) CloneModelInto(b model.Model) { + c := b.(*PortBinding) + a.DeepCopyInto(c) +} + +func (a *PortBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortBinding) Equals(b *PortBinding) bool { + return a.UUID == b.UUID && + equalPortBindingAdditionalChassis(a.AdditionalChassis, b.AdditionalChassis) && + equalPortBindingAdditionalEncap(a.AdditionalEncap, b.AdditionalEncap) && + equalPortBindingChassis(a.Chassis, b.Chassis) && + a.Datapath == b.Datapath && + equalPortBindingEncap(a.Encap, b.Encap) && + equalPortBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalPortBindingGatewayChassis(a.GatewayChassis, b.GatewayChassis) && + equalPortBindingHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + a.LogicalPort == b.LogicalPort && + equalPortBindingMAC(a.MAC, b.MAC) && + equalPortBindingMirrorRules(a.MirrorRules, b.MirrorRules) && + equalPortBindingNatAddresses(a.NatAddresses, b.NatAddresses) && + equalPortBindingOptions(a.Options, b.Options) && + equalPortBindingParentPort(a.ParentPort, b.ParentPort) && + equalPortBindingPortSecurity(a.PortSecurity, b.PortSecurity) && + equalPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis, b.RequestedAdditionalChassis) && + equalPortBindingRequestedChassis(a.RequestedChassis, b.RequestedChassis) && + equalPortBindingTag(a.Tag, b.Tag) && + a.TunnelKey == b.TunnelKey && + a.Type == b.Type && + equalPortBindingUp(a.Up, b.Up) && + equalPortBindingVirtualParent(a.VirtualParent, b.VirtualParent) +} + +func (a *PortBinding) EqualsModel(b model.Model) bool { + c := b.(*PortBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortBinding{} +var _ model.ComparableModel = &PortBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go new file mode 100644 index 000000000..358e26b33 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortGroupTable = "Port_Group" + +// PortGroup defines an object in Port_Group table +type PortGroup struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` +} + +func (a *PortGroup) GetUUID() string { + return a.UUID +} + +func (a *PortGroup) GetName() string { + return a.Name +} + +func (a *PortGroup) GetPorts() []string { + return a.Ports +} + +func copyPortGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) DeepCopyInto(b *PortGroup) { + *b = *a + b.Ports = copyPortGroupPorts(a.Ports) +} + +func (a *PortGroup) DeepCopy() *PortGroup { + b := new(PortGroup) + a.DeepCopyInto(b) + return b +} + +func (a *PortGroup) CloneModelInto(b model.Model) { + c := b.(*PortGroup) + a.DeepCopyInto(c) +} + +func (a *PortGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortGroup) Equals(b *PortGroup) bool { + return a.UUID == b.UUID && + a.Name == b.Name && + equalPortGroupPorts(a.Ports, b.Ports) +} + +func (a *PortGroup) EqualsModel(b model.Model) bool { + c := b.(*PortGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortGroup{} +var _ model.ComparableModel = &PortGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go new file mode 100644 index 000000000..9d760527e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go @@ -0,0 +1,122 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const RBACPermissionTable = "RBAC_Permission" + +// RBACPermission defines an object in RBAC_Permission table +type RBACPermission struct { + UUID string `ovsdb:"_uuid"` + Authorization []string `ovsdb:"authorization"` + InsertDelete bool `ovsdb:"insert_delete"` + Table string `ovsdb:"table"` + Update []string `ovsdb:"update"` +} + +func (a *RBACPermission) GetUUID() string { + return a.UUID +} + +func (a *RBACPermission) GetAuthorization() []string { + return a.Authorization +} + +func copyRBACPermissionAuthorization(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalRBACPermissionAuthorization(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *RBACPermission) GetInsertDelete() bool { + return a.InsertDelete +} + +func (a *RBACPermission) GetTable() string { + return a.Table +} + +func (a *RBACPermission) GetUpdate() []string { + return a.Update +} + +func copyRBACPermissionUpdate(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalRBACPermissionUpdate(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *RBACPermission) DeepCopyInto(b *RBACPermission) { + *b = *a + b.Authorization = copyRBACPermissionAuthorization(a.Authorization) + b.Update = copyRBACPermissionUpdate(a.Update) +} + +func (a *RBACPermission) DeepCopy() *RBACPermission { + b := new(RBACPermission) + a.DeepCopyInto(b) + return b +} + +func (a *RBACPermission) CloneModelInto(b model.Model) { + c := b.(*RBACPermission) + a.DeepCopyInto(c) +} + +func (a *RBACPermission) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *RBACPermission) Equals(b *RBACPermission) bool { + return a.UUID == b.UUID && + equalRBACPermissionAuthorization(a.Authorization, b.Authorization) && + a.InsertDelete == b.InsertDelete && + a.Table == b.Table && + equalRBACPermissionUpdate(a.Update, b.Update) +} + +func (a *RBACPermission) EqualsModel(b model.Model) bool { + c := b.(*RBACPermission) + return a.Equals(c) +} + +var _ model.CloneableModel = &RBACPermission{} +var _ model.ComparableModel = &RBACPermission{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go new file mode 100644 index 000000000..ce8798645 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go @@ -0,0 +1,87 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const RBACRoleTable = "RBAC_Role" + +// RBACRole defines an object in RBAC_Role table +type RBACRole struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Permissions map[string]string `ovsdb:"permissions"` +} + +func (a *RBACRole) GetUUID() string { + return a.UUID +} + +func (a *RBACRole) GetName() string { + return a.Name +} + +func (a *RBACRole) GetPermissions() map[string]string { + return a.Permissions +} + +func copyRBACRolePermissions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalRBACRolePermissions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *RBACRole) DeepCopyInto(b *RBACRole) { + *b = *a + b.Permissions = copyRBACRolePermissions(a.Permissions) +} + +func (a *RBACRole) DeepCopy() *RBACRole { + b := new(RBACRole) + a.DeepCopyInto(b) + return b +} + +func (a *RBACRole) CloneModelInto(b model.Model) { + c := b.(*RBACRole) + a.DeepCopyInto(c) +} + +func (a *RBACRole) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *RBACRole) Equals(b *RBACRole) bool { + return a.UUID == b.UUID && + a.Name == b.Name && + equalRBACRolePermissions(a.Permissions, b.Permissions) +} + +func (a *RBACRole) EqualsModel(b model.Model) bool { + c := b.(*RBACRole) + return a.Equals(c) +} + +var _ model.CloneableModel = &RBACRole{} +var _ model.ComparableModel = &RBACRole{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go new file mode 100644 index 000000000..2374478db --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go @@ -0,0 +1,182 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const SBGlobalTable = "SB_Global" + +// SBGlobal defines an object in SB_Global table +type SBGlobal struct { + UUID string `ovsdb:"_uuid"` + Connections []string `ovsdb:"connections"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Ipsec bool `ovsdb:"ipsec"` + NbCfg int `ovsdb:"nb_cfg"` + Options map[string]string `ovsdb:"options"` + SSL *string `ovsdb:"ssl"` +} + +func (a *SBGlobal) GetUUID() string { + return a.UUID +} + +func (a *SBGlobal) GetConnections() []string { + return a.Connections +} + +func copySBGlobalConnections(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSBGlobalConnections(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *SBGlobal) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySBGlobalExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSBGlobalExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SBGlobal) GetIpsec() bool { + return a.Ipsec +} + +func (a *SBGlobal) GetNbCfg() int { + return a.NbCfg +} + +func (a *SBGlobal) GetOptions() map[string]string { + return a.Options +} + +func copySBGlobalOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSBGlobalOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SBGlobal) GetSSL() *string { + return a.SSL +} + +func copySBGlobalSSL(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSBGlobalSSL(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SBGlobal) DeepCopyInto(b *SBGlobal) { + *b = *a + b.Connections = copySBGlobalConnections(a.Connections) + b.ExternalIDs = copySBGlobalExternalIDs(a.ExternalIDs) + b.Options = copySBGlobalOptions(a.Options) + b.SSL = copySBGlobalSSL(a.SSL) +} + +func (a *SBGlobal) DeepCopy() *SBGlobal { + b := new(SBGlobal) + a.DeepCopyInto(b) + return b +} + +func (a *SBGlobal) CloneModelInto(b model.Model) { + c := b.(*SBGlobal) + a.DeepCopyInto(c) +} + +func (a *SBGlobal) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SBGlobal) Equals(b *SBGlobal) bool { + return a.UUID == b.UUID && + equalSBGlobalConnections(a.Connections, b.Connections) && + equalSBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Ipsec == b.Ipsec && + a.NbCfg == b.NbCfg && + equalSBGlobalOptions(a.Options, b.Options) && + equalSBGlobalSSL(a.SSL, b.SSL) +} + +func (a *SBGlobal) EqualsModel(b model.Model) bool { + c := b.(*SBGlobal) + return a.Equals(c) +} + +var _ model.CloneableModel = &SBGlobal{} +var _ model.ComparableModel = &SBGlobal{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go new file mode 100644 index 000000000..d3e118868 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go @@ -0,0 +1,213 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ServiceMonitorTable = "Service_Monitor" + +type ( + ServiceMonitorProtocol = string + ServiceMonitorStatus = string +) + +var ( + ServiceMonitorProtocolTCP ServiceMonitorProtocol = "tcp" + ServiceMonitorProtocolUDP ServiceMonitorProtocol = "udp" + ServiceMonitorStatusOnline ServiceMonitorStatus = "online" + ServiceMonitorStatusOffline ServiceMonitorStatus = "offline" + ServiceMonitorStatusError ServiceMonitorStatus = "error" +) + +// ServiceMonitor defines an object in Service_Monitor table +type ServiceMonitor struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + Options map[string]string `ovsdb:"options"` + Port int `ovsdb:"port"` + Protocol *ServiceMonitorProtocol `ovsdb:"protocol"` + SrcIP string `ovsdb:"src_ip"` + SrcMAC string `ovsdb:"src_mac"` + Status *ServiceMonitorStatus `ovsdb:"status"` +} + +func (a *ServiceMonitor) GetUUID() string { + return a.UUID +} + +func (a *ServiceMonitor) GetChassisName() string { + return a.ChassisName +} + +func (a *ServiceMonitor) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyServiceMonitorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalServiceMonitorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ServiceMonitor) GetIP() string { + return a.IP +} + +func (a *ServiceMonitor) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *ServiceMonitor) GetOptions() map[string]string { + return a.Options +} + +func copyServiceMonitorOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalServiceMonitorOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ServiceMonitor) GetPort() int { + return a.Port +} + +func (a *ServiceMonitor) GetProtocol() *ServiceMonitorProtocol { + return a.Protocol +} + +func copyServiceMonitorProtocol(a *ServiceMonitorProtocol) *ServiceMonitorProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalServiceMonitorProtocol(a, b *ServiceMonitorProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ServiceMonitor) GetSrcIP() string { + return a.SrcIP +} + +func (a *ServiceMonitor) GetSrcMAC() string { + return a.SrcMAC +} + +func (a *ServiceMonitor) GetStatus() *ServiceMonitorStatus { + return a.Status +} + +func copyServiceMonitorStatus(a *ServiceMonitorStatus) *ServiceMonitorStatus { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalServiceMonitorStatus(a, b *ServiceMonitorStatus) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ServiceMonitor) DeepCopyInto(b *ServiceMonitor) { + *b = *a + b.ExternalIDs = copyServiceMonitorExternalIDs(a.ExternalIDs) + b.Options = copyServiceMonitorOptions(a.Options) + b.Protocol = copyServiceMonitorProtocol(a.Protocol) + b.Status = copyServiceMonitorStatus(a.Status) +} + +func (a *ServiceMonitor) DeepCopy() *ServiceMonitor { + b := new(ServiceMonitor) + a.DeepCopyInto(b) + return b +} + +func (a *ServiceMonitor) CloneModelInto(b model.Model) { + c := b.(*ServiceMonitor) + a.DeepCopyInto(c) +} + +func (a *ServiceMonitor) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ServiceMonitor) Equals(b *ServiceMonitor) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalServiceMonitorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + equalServiceMonitorOptions(a.Options, b.Options) && + a.Port == b.Port && + equalServiceMonitorProtocol(a.Protocol, b.Protocol) && + a.SrcIP == b.SrcIP && + a.SrcMAC == b.SrcMAC && + equalServiceMonitorStatus(a.Status, b.Status) +} + +func (a *ServiceMonitor) EqualsModel(b model.Model) bool { + c := b.(*ServiceMonitor) + return a.Equals(c) +} + +var _ model.CloneableModel = &ServiceMonitor{} +var _ model.ComparableModel = &ServiceMonitor{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go new file mode 100644 index 000000000..3fab5fd1e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go @@ -0,0 +1,117 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const SSLTable = "SSL" + +// SSL defines an object in SSL table +type SSL struct { + UUID string `ovsdb:"_uuid"` + BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"` + CaCert string `ovsdb:"ca_cert"` + Certificate string `ovsdb:"certificate"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + PrivateKey string `ovsdb:"private_key"` + SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLProtocols string `ovsdb:"ssl_protocols"` +} + +func (a *SSL) GetUUID() string { + return a.UUID +} + +func (a *SSL) GetBootstrapCaCert() bool { + return a.BootstrapCaCert +} + +func (a *SSL) GetCaCert() string { + return a.CaCert +} + +func (a *SSL) GetCertificate() string { + return a.Certificate +} + +func (a *SSL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySSLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSSLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SSL) GetPrivateKey() string { + return a.PrivateKey +} + +func (a *SSL) GetSSLCiphers() string { + return a.SSLCiphers +} + +func (a *SSL) GetSSLProtocols() string { + return a.SSLProtocols +} + +func (a *SSL) DeepCopyInto(b *SSL) { + *b = *a + b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs) +} + +func (a *SSL) DeepCopy() *SSL { + b := new(SSL) + a.DeepCopyInto(b) + return b +} + +func (a *SSL) CloneModelInto(b model.Model) { + c := b.(*SSL) + a.DeepCopyInto(c) +} + +func (a *SSL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SSL) Equals(b *SSL) bool { + return a.UUID == b.UUID && + a.BootstrapCaCert == b.BootstrapCaCert && + a.CaCert == b.CaCert && + a.Certificate == b.Certificate && + equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.PrivateKey == b.PrivateKey && + a.SSLCiphers == b.SSLCiphers && + a.SSLProtocols == b.SSLProtocols +} + +func (a *SSL) EqualsModel(b model.Model) bool { + c := b.(*SSL) + return a.Equals(c) +} + +var _ model.CloneableModel = &SSL{} +var _ model.ComparableModel = &SSL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go new file mode 100644 index 000000000..370968f60 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go @@ -0,0 +1,78 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const StaticMACBindingTable = "Static_MAC_Binding" + +// StaticMACBinding defines an object in Static_MAC_Binding table +type StaticMACBinding struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"` +} + +func (a *StaticMACBinding) GetUUID() string { + return a.UUID +} + +func (a *StaticMACBinding) GetDatapath() string { + return a.Datapath +} + +func (a *StaticMACBinding) GetIP() string { + return a.IP +} + +func (a *StaticMACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *StaticMACBinding) GetMAC() string { + return a.MAC +} + +func (a *StaticMACBinding) GetOverrideDynamicMAC() bool { + return a.OverrideDynamicMAC +} + +func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) { + *b = *a +} + +func (a *StaticMACBinding) DeepCopy() *StaticMACBinding { + b := new(StaticMACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *StaticMACBinding) CloneModelInto(b model.Model) { + c := b.(*StaticMACBinding) + a.DeepCopyInto(c) +} + +func (a *StaticMACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.OverrideDynamicMAC == b.OverrideDynamicMAC +} + +func (a *StaticMACBinding) EqualsModel(b model.Model) bool { + c := b.(*StaticMACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &StaticMACBinding{} +var _ model.ComparableModel = &StaticMACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go new file mode 100644 index 000000000..7b021716e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go @@ -0,0 +1,245 @@ +package types + +import "time" + +const ( + // Default network name + DefaultNetworkName = "default" + K8sPrefix = "k8s-" + HybridOverlayPrefix = "int-" + HybridOverlayGRSubfix = "-gr" + + // K8sMgmtIntfNamePrefix name to be used as an OVS internal port on the node as prefix for networs + K8sMgmtIntfNamePrefix = "ovn-k8s-mp" + + // UDNVRFDeviceSuffix vrf device suffix associated with every user defined primary network. + UDNVRFDeviceSuffix = "-udn-vrf" + // UDNVRFDevicePrefix vrf device prefix associated with every user + UDNVRFDevicePrefix = "mp" + + // K8sMgmtIntfName name to be used as an OVS internal port on the node + K8sMgmtIntfName = K8sMgmtIntfNamePrefix + "0" + + // PhysicalNetworkName is the name that maps to an OVS bridge that provides + // access to physical/external network + PhysicalNetworkName = "physnet" + PhysicalNetworkExGwName = "exgwphysnet" + + // LocalNetworkName is the name that maps to an OVS bridge that provides + // access to local service + LocalNetworkName = "locnet" + + // Local Bridge used for DGP access + LocalBridgeName = "br-local" + LocalnetGatewayNextHopPort = "ovn-k8s-gw0" + + // OVS Bridge Datapath types + DatapathUserspace = "netdev" + + // types.OVNClusterRouter is the name of the distributed router + OVNClusterRouter = "ovn_cluster_router" + OVNJoinSwitch = "join" + + JoinSwitchPrefix = "join_" + ExternalSwitchPrefix = "ext_" + GWRouterPrefix = "GR_" + GWRouterLocalLBPostfix = "_local" + RouterToSwitchPrefix = "rtos-" + InterPrefix = "inter-" + HybridSubnetPrefix = "hybrid-subnet-" + SwitchToRouterPrefix = "stor-" + JoinSwitchToGWRouterPrefix = "jtor-" + GWRouterToJoinSwitchPrefix = "rtoj-" + DistRouterToJoinSwitchPrefix = "dtoj-" + JoinSwitchToDistRouterPrefix = "jtod-" + EXTSwitchToGWRouterPrefix = "etor-" + GWRouterToExtSwitchPrefix = "rtoe-" + EgressGWSwitchPrefix = "exgw-" + PatchPortPrefix = "patch-" + PatchPortSuffix = "-to-br-int" + + NodeLocalSwitch = "node_local_switch" + + // types.OVNLayer2Switch is the name of layer2 topology switch + OVNLayer2Switch = "ovn_layer2_switch" + // types.OVNLocalnetSwitch is the name of localnet topology switch + OVNLocalnetSwitch = "ovn_localnet_switch" + // types.OVNLocalnetPort is the name of localnet topology localnet port + OVNLocalnetPort = "ovn_localnet_port" + + TransitSwitch = "transit_switch" + TransitSwitchToRouterPrefix = "tstor-" + RouterToTransitSwitchPrefix = "rtots-" + + // ACL Default Tier Priorities + + // Default routed multicast allow acl rule priority + DefaultRoutedMcastAllowPriority = 1013 + // Default multicast allow acl rule priority + DefaultMcastAllowPriority = 1012 + // Default multicast deny acl rule priority + DefaultMcastDenyPriority = 1011 + // Default allow acl rule priority + DefaultAllowPriority = 1001 + // Default deny acl rule priority + DefaultDenyPriority = 1000 + + // ACL PlaceHolderACL Tier Priorities + PrimaryUDNAllowPriority = 1001 + // Default deny acl rule priority + PrimaryUDNDenyPriority = 1000 + + // ACL Tiers + // Tier 0 is called Primary as it is evaluated before any other feature-related Tiers. + // Currently used for User Defined Network Feature. + // NOTE: When we upgrade from an OVN version without tiers to the new version with + // tiers, all values in the new ACL.Tier column will be set to 0. + PrimaryACLTier = 0 + // Default Tier for all ACLs + DefaultACLTier = 2 + // Default Tier for all ACLs belonging to Admin Network Policy + DefaultANPACLTier = 1 + // Default Tier for all ACLs belonging to Baseline Admin Network Policy + DefaultBANPACLTier = 3 + + // priority of logical router policies on the OVNClusterRouter + EgressFirewallStartPriority = 10000 + MinimumReservedEgressFirewallPriority = 2000 + MGMTPortPolicyPriority = "1005" + NodeSubnetPolicyPriority = "1004" + InterNodePolicyPriority = "1003" + HybridOverlaySubnetPriority = 1002 + HybridOverlayReroutePriority = 501 + DefaultNoRereoutePriority = 102 + EgressSVCReroutePriority = 101 + EgressIPReroutePriority = 100 + EgressIPRerouteQoSRulePriority = 103 + EgressLiveMigrationReroutePiority = 10 + + // EndpointSliceMirrorControllerName mirror EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) + EndpointSliceMirrorControllerName = "endpointslice-mirror-controller.k8s.ovn.org" + // EndpointSliceDefaultControllerName default kubernetes EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) + EndpointSliceDefaultControllerName = "endpointslice-controller.k8s.io" + // LabelSourceEndpointSlice label key used in mirrored EndpointSlice + // that has the value of the default EndpointSlice name + LabelSourceEndpointSlice = "k8s.ovn.org/source-endpointslice" + // LabelSourceEndpointSliceVersion label key used in mirrored EndpointSlice + // that has the value of the last known default EndpointSlice ResourceVersion + LabelSourceEndpointSliceVersion = "k8s.ovn.org/source-endpointslice-version" + // LabelUserDefinedEndpointSliceNetwork label key used in mirrored EndpointSlices that contains the current primary user defined network name + LabelUserDefinedEndpointSliceNetwork = "k8s.ovn.org/endpointslice-network" + // LabelUserDefinedServiceName label key used in mirrored EndpointSlices that contains the service name matching the EndpointSlice + LabelUserDefinedServiceName = "k8s.ovn.org/service-name" + + // Packet marking + EgressIPNodeConnectionMark = "1008" + EgressIPReplyTrafficConnectionMark = 42 + + // primary user defined network's default join subnet value + // users can configure custom values using NADs + UserDefinedPrimaryNetworkJoinSubnetV4 = "100.65.0.0/16" + UserDefinedPrimaryNetworkJoinSubnetV6 = "fd99::/64" + + // OpenFlow and Networking constants + RouteAdvertisementICMPType = 134 + NeighborAdvertisementICMPType = 136 + + // Meter constants + OvnACLLoggingMeter = "acl-logging" + OvnRateLimitingMeter = "rate-limiter" + PacketsPerSecond = "pktps" + MeterAction = "drop" + + // OVN-K8S annotation & taint constants + OvnK8sPrefix = "k8s.ovn.org" + // Deprecated: we used to set topology version as an annotation on the node. We don't do this anymore. + OvnK8sTopoAnno = OvnK8sPrefix + "/" + "topology-version" + OvnK8sSmallMTUTaintKey = OvnK8sPrefix + "/" + "mtu-too-small" + + // name of the configmap used to synchronize status (e.g. watch for topology changes) + OvnK8sStatusCMName = "control-plane-status" + OvnK8sStatusKeyTopoVersion = "topology-version" + + // Monitoring constants + SFlowAgent = "ovn-k8s-mp0" + + // OVNKube-Node Node types + NodeModeFull = "full" + NodeModeDPU = "dpu" + NodeModeDPUHost = "dpu-host" + + // Geneve header length for IPv4 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) + GeneveHeaderLengthIPv4 = 58 + // Geneve header length for IPv6 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) + GeneveHeaderLengthIPv6 = GeneveHeaderLengthIPv4 + 20 + + ClusterPortGroupNameBase = "clusterPortGroup" + ClusterRtrPortGroupNameBase = "clusterRtrPortGroup" + + OVSDBTimeout = 10 * time.Second + OVSDBWaitTimeout = 0 + + ClusterLBGroupName = "clusterLBGroup" + ClusterSwitchLBGroupName = "clusterSwitchLBGroup" + ClusterRouterLBGroupName = "clusterRouterLBGroup" + + // key for network name external-id + NetworkExternalID = OvnK8sPrefix + "/" + "network" + // key for network role external-id: possible values are "default", "primary", "secondary" + NetworkRoleExternalID = OvnK8sPrefix + "/" + "role" + // key for NAD name external-id, only used for secondary logical switch port of a pod + // key for network name external-id + NADExternalID = OvnK8sPrefix + "/" + "nad" + // key for topology type external-id, only used for secondary network logical entities + TopologyExternalID = OvnK8sPrefix + "/" + "topology" + // key for load_balancer kind external-id + LoadBalancerKindExternalID = OvnK8sPrefix + "/" + "kind" + // key for load_balancer service external-id + LoadBalancerOwnerExternalID = OvnK8sPrefix + "/" + "owner" + + // different secondary network topology type defined in CNI netconf + Layer3Topology = "layer3" + Layer2Topology = "layer2" + LocalnetTopology = "localnet" + + // different types of network roles + // defined in CNI netconf as a user defined network + NetworkRolePrimary = "primary" + NetworkRoleSecondary = "secondary" + NetworkRoleDefault = "default" + // defined internally by ovnkube to recognize "default" + // network's role as a "infrastructure-locked" network + // when user defined network is the primary network for + // the pod which makes "default" network niether primary + // nor secondary + NetworkRoleInfrastructure = "infrastructure-locked" + + // db index keys + // PrimaryIDKey is used as a primary client index + PrimaryIDKey = OvnK8sPrefix + "/id" + + OvnDefaultZone = "global" + + // EgressService "reserved" hosts - when set on an EgressService they have a special meaning + + EgressServiceNoHost = "" // set on services with no allocated node + EgressServiceNoSNATHost = "ALL" // set on services with sourceIPBy=Network + + // MaxLogicalPortTunnelKey is maximum tunnel key that can be requested for a + // Logical Switch or Router Port + MaxLogicalPortTunnelKey = 32767 + + // InformerSyncTimeout is used when waiting for the initial informer cache sync + // (i.e. all existing objects should be listed by the informer). + // It allows ~4 list() retries with the default reflector exponential backoff config + InformerSyncTimeout = 20 * time.Second + + // HandlerSyncTimeout is used when waiting for initial object handler sync. + // (i.e. all the ADD events should be processed for the existing objects by the event handler) + HandlerSyncTimeout = 20 * time.Second + + // GRMACBindingAgeThreshold is the lifetime in seconds of each MAC binding + // entry for the gateway routers. After this time, the entry is removed and + // may be refreshed with a new ARP request. + GRMACBindingAgeThreshold = "300" +) diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go new file mode 100644 index 000000000..566f03fa9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go @@ -0,0 +1,44 @@ +package types + +import ( + "errors" + "fmt" + + kerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type SuppressedError struct { + Inner error +} + +func (e *SuppressedError) Error() string { + return fmt.Sprintf("suppressed error logged: %v", e.Inner.Error()) +} + +func (e *SuppressedError) Unwrap() error { + return e.Inner +} + +func NewSuppressedError(err error) error { + return &SuppressedError{ + Inner: err, + } +} + +func IsSuppressedError(err error) bool { + var suppressedError *SuppressedError + // errors.As() is not supported with Aggregate type error. Aggregate.Errors() converts an + // Aggregate type error into a slice of builtin error and then errors.As() can be used + if agg, ok := err.(kerrors.Aggregate); ok && err != nil { + suppress := false + for _, err := range agg.Errors() { + if errors.As(err, &suppressedError) { + suppress = true + } else { + return false + } + } + return suppress + } + return errors.As(err, &suppressedError) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go new file mode 100644 index 000000000..2a69fd57c --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go @@ -0,0 +1,21 @@ +package types + +import ( + "fmt" + "strings" +) + +// this file defines error messages that are used to figure out if a resource reconciliation failed +const ( + APBRouteErrorMsg = "failed to apply policy" + EgressFirewallErrorMsg = "EgressFirewall Rules not correctly applied" + EgressQoSErrorMsg = "EgressQoS Rules not correctly applied" +) + +func GetZoneStatus(zoneID, message string) string { + return fmt.Sprintf("%s: %s", zoneID, message) +} + +func GetZoneFromStatus(status string) string { + return strings.Split(status, ":")[0] +} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 000000000..75623dccc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 000000000..b0b525a5a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 000000000..2885af360 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 000000000..d9c08a22f --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 000000000..dcd61e6e3 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //

+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 000000000..57ff152a0 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 000000000..a2c3edb69 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 000000000..6ab60102c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 000000000..cb4f26e30 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the
tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/safchain/ethtool/.gitignore b/vendor/github.com/safchain/ethtool/.gitignore new file mode 100644 index 000000000..db6cadffd --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Skip compiled example binary file +/example/example diff --git a/vendor/github.com/safchain/ethtool/.golangci.yml b/vendor/github.com/safchain/ethtool/.golangci.yml new file mode 100644 index 000000000..77ccf927e --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.golangci.yml @@ -0,0 +1,14 @@ +linters: + disable: + - gosimple + - unused + enable: + - gci + - gofmt + - misspell +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/safchain/ethtool) diff --git a/vendor/github.com/safchain/ethtool/.yamllint b/vendor/github.com/safchain/ethtool/.yamllint new file mode 100644 index 000000000..9862c5f78 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.yamllint @@ -0,0 +1,7 @@ +--- +extends: default + +rules: + document-start: disable + truthy: + check-keys: false diff --git a/vendor/github.com/safchain/ethtool/LICENSE b/vendor/github.com/safchain/ethtool/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/safchain/ethtool/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/safchain/ethtool/Makefile b/vendor/github.com/safchain/ethtool/Makefile new file mode 100644 index 000000000..67d2da395 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/Makefile @@ -0,0 +1,4 @@ +all: build + +build: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build diff --git a/vendor/github.com/safchain/ethtool/README.md b/vendor/github.com/safchain/ethtool/README.md new file mode 100644 index 000000000..e44367582 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/README.md @@ -0,0 +1,55 @@ +# ethtool go package # + +![Build Status](https://github.com/safchain/ethtool/actions/workflows/unittests.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/safchain/ethtool?status.svg)](https://godoc.org/github.com/safchain/ethtool) + + +The ethtool package aims to provide a library that provides easy access to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information from a network device such as statistics, driver related information or even the peer of a VETH interface. + +# Installation + +```shell +go get github.com/safchain/ethtool +``` + +# How to use + +```go +package main + +import ( + "fmt" + + "github.com/safchain/ethtool" +) + +func main() { + ethHandle, err := ethtool.NewEthtool() + if err != nil { + panic(err.Error()) + } + defer ethHandle.Close() + + // Retrieve tx from eth0 + stats, err := ethHandle.Stats("eth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("TX: %d\n", stats["tx_bytes"]) + + // Retrieve peer index of a veth interface + stats, err = ethHandle.Stats("veth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("Peer Index: %d\n", stats["peer_ifindex"]) +} +``` + +## LICENSE ## + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/safchain/ethtool/ethtool.go b/vendor/github.com/safchain/ethtool/ethtool.go new file mode 100644 index 000000000..42fc34520 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool.go @@ -0,0 +1,1012 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// The ethtool package aims to provide a library that provides easy access +// to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information +// from a network device such as statistics, driver related information or even +// the peer of a VETH interface. +package ethtool + +import ( + "bytes" + "encoding/hex" + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Maximum size of an interface name +const ( + IFNAMSIZ = 16 +) + +// ioctl ethtool request +const ( + SIOCETHTOOL = 0x8946 +) + +// ethtool stats related constants. +const ( + ETH_GSTRING_LEN = 32 + ETH_SS_STATS = 1 + ETH_SS_PRIV_FLAGS = 2 + ETH_SS_FEATURES = 4 + + // CMD supported + ETHTOOL_GSET = 0x00000001 /* Get settings. */ + ETHTOOL_SSET = 0x00000002 /* Set settings. */ + ETHTOOL_GDRVINFO = 0x00000003 /* Get driver info. */ + ETHTOOL_GMSGLVL = 0x00000007 /* Get driver message level */ + ETHTOOL_SMSGLVL = 0x00000008 /* Set driver msg level. */ + + /* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ + ETHTOOL_GLINK = 0x0000000a + ETHTOOL_GCOALESCE = 0x0000000e /* Get coalesce config */ + ETHTOOL_GRINGPARAM = 0x00000010 /* Get ring parameters */ + ETHTOOL_SRINGPARAM = 0x00000011 /* Set ring parameters. */ + ETHTOOL_GPAUSEPARAM = 0x00000012 /* Get pause parameters */ + ETHTOOL_SPAUSEPARAM = 0x00000013 /* Set pause parameters. */ + ETHTOOL_GSTRINGS = 0x0000001b /* Get specified string set */ + ETHTOOL_GSTATS = 0x0000001d /* Get NIC-specific statistics */ + ETHTOOL_GPERMADDR = 0x00000020 /* Get permanent hardware address */ + ETHTOOL_GFLAGS = 0x00000025 /* Get flags bitmap(ethtool_value) */ + ETHTOOL_GPFLAGS = 0x00000027 /* Get driver-private flags bitmap */ + ETHTOOL_SPFLAGS = 0x00000028 /* Set driver-private flags bitmap */ + ETHTOOL_GSSET_INFO = 0x00000037 /* Get string set info */ + ETHTOOL_GFEATURES = 0x0000003a /* Get device offload settings */ + ETHTOOL_SFEATURES = 0x0000003b /* Change device offload settings */ + ETHTOOL_GCHANNELS = 0x0000003c /* Get no of channels */ + ETHTOOL_SCHANNELS = 0x0000003d /* Set no of channels */ + ETHTOOL_GET_TS_INFO = 0x00000041 /* Get time stamping and PHC info */ + ETHTOOL_GMODULEINFO = 0x00000042 /* Get plug-in module information */ + ETHTOOL_GMODULEEEPROM = 0x00000043 /* Get plug-in module eeprom */ +) + +// MAX_GSTRINGS maximum number of stats entries that ethtool can +// retrieve currently. +const ( + MAX_GSTRINGS = 32768 + MAX_FEATURE_BLOCKS = (MAX_GSTRINGS + 32 - 1) / 32 + EEPROM_LEN = 640 + PERMADDR_LEN = 32 +) + +// ethtool sset_info related constants +const ( + MAX_SSET_INFO = 64 +) + +var supportedCapabilities = []struct { + name string + mask uint64 + speed uint64 +}{ + {"10baseT_Half", unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT, 10_000_000}, + {"10baseT_Full", unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT, 10_000_000}, + {"100baseT_Half", unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT, 100_000_000}, + {"100baseT_Full", unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT, 100_000_000}, + {"1000baseT_Half", unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1_000_000_000}, + {"1000baseT_Full", unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1_000_000_000}, + {"10000baseT_Full", unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 10_000_000_000}, + {"2500baseT_Full", unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 2_500_000_000}, + {"1000baseKX_Full", unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1_000_000_000}, + {"10000baseKX_Full", unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 10_000_000_000}, + {"10000baseKR_Full", unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 10_000_000_000}, + {"10000baseR_FEC", unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 10_000_000_000}, + {"20000baseMLD2_Full", unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, 20_000_000_000}, + {"20000baseKR2_Full", unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 20_000_000_000}, + {"40000baseKR4_Full", unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 40_000_000_000}, + {"40000baseCR4_Full", unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 40_000_000_000}, + {"40000baseSR4_Full", unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 40_000_000_000}, + {"40000baseLR4_Full", unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 40_000_000_000}, + {"56000baseKR4_Full", unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 56_000_000_000}, + {"56000baseCR4_Full", unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 56_000_000_000}, + {"56000baseSR4_Full", unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 56_000_000_000}, + {"56000baseLR4_Full", unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 56_000_000_000}, + {"25000baseCR_Full", unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 25_000_000_000}, +} + +type ifreq struct { + ifr_name [IFNAMSIZ]byte + ifr_data uintptr +} + +// following structures comes from uapi/linux/ethtool.h +type ethtoolSsetInfo struct { + cmd uint32 + reserved uint32 + sset_mask uint64 + data [MAX_SSET_INFO]uint32 +} + +type ethtoolGetFeaturesBlock struct { + available uint32 + requested uint32 + active uint32 + never_changed uint32 +} + +type ethtoolGfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock +} + +type ethtoolSetFeaturesBlock struct { + valid uint32 + requested uint32 +} + +type ethtoolSfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock +} + +type ethtoolDrvInfo struct { + cmd uint32 + driver [32]byte + version [32]byte + fw_version [32]byte + bus_info [32]byte + erom_version [32]byte + reserved2 [12]byte + n_priv_flags uint32 + n_stats uint32 + testinfo_len uint32 + eedump_len uint32 + regdump_len uint32 +} + +// DrvInfo contains driver information +// ethtool.h v3.5: struct ethtool_drvinfo +type DrvInfo struct { + Cmd uint32 + Driver string + Version string + FwVersion string + BusInfo string + EromVersion string + Reserved2 string + NPrivFlags uint32 + NStats uint32 + TestInfoLen uint32 + EedumpLen uint32 + RegdumpLen uint32 +} + +// Channels contains the number of channels for a given interface. +type Channels struct { + Cmd uint32 + MaxRx uint32 + MaxTx uint32 + MaxOther uint32 + MaxCombined uint32 + RxCount uint32 + TxCount uint32 + OtherCount uint32 + CombinedCount uint32 +} + +// Coalesce is a coalesce config for an interface +type Coalesce struct { + Cmd uint32 + RxCoalesceUsecs uint32 + RxMaxCoalescedFrames uint32 + RxCoalesceUsecsIrq uint32 + RxMaxCoalescedFramesIrq uint32 + TxCoalesceUsecs uint32 + TxMaxCoalescedFrames uint32 + TxCoalesceUsecsIrq uint32 + TxMaxCoalescedFramesIrq uint32 + StatsBlockCoalesceUsecs uint32 + UseAdaptiveRxCoalesce uint32 + UseAdaptiveTxCoalesce uint32 + PktRateLow uint32 + RxCoalesceUsecsLow uint32 + RxMaxCoalescedFramesLow uint32 + TxCoalesceUsecsLow uint32 + TxMaxCoalescedFramesLow uint32 + PktRateHigh uint32 + RxCoalesceUsecsHigh uint32 + RxMaxCoalescedFramesHigh uint32 + TxCoalesceUsecsHigh uint32 + TxMaxCoalescedFramesHigh uint32 + RateSampleInterval uint32 +} + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = (1 << 0) + SOF_TIMESTAMPING_TX_SOFTWARE = (1 << 1) + SOF_TIMESTAMPING_RX_HARDWARE = (1 << 2) + SOF_TIMESTAMPING_RX_SOFTWARE = (1 << 3) + SOF_TIMESTAMPING_SOFTWARE = (1 << 4) + SOF_TIMESTAMPING_SYS_HARDWARE = (1 << 5) + SOF_TIMESTAMPING_RAW_HARDWARE = (1 << 6) + SOF_TIMESTAMPING_OPT_ID = (1 << 7) + SOF_TIMESTAMPING_TX_SCHED = (1 << 8) + SOF_TIMESTAMPING_TX_ACK = (1 << 9) + SOF_TIMESTAMPING_OPT_CMSG = (1 << 10) + SOF_TIMESTAMPING_OPT_TSONLY = (1 << 11) + SOF_TIMESTAMPING_OPT_STATS = (1 << 12) + SOF_TIMESTAMPING_OPT_PKTINFO = (1 << 13) + SOF_TIMESTAMPING_OPT_TX_SWHW = (1 << 14) + SOF_TIMESTAMPING_BIND_PHC = (1 << 15) +) + +const ( + /* + * No outgoing packet will need hardware time stamping; + * should a packet arrive which asks for it, no hardware + * time stamping will be done. + */ + HWTSTAMP_TX_OFF = iota + + /* + * Enables hardware time stamping for outgoing packets; + * the sender of the packet decides which are to be + * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE + * before sending the packet. + */ + HWTSTAMP_TX_ON + + /* + * Enables time stamping for outgoing packets just as + * HWTSTAMP_TX_ON does, but also enables time stamp insertion + * directly into Sync packets. In this case, transmitted Sync + * packets will not received a time stamp via the socket error + * queue. + */ + HWTSTAMP_TX_ONESTEP_SYNC + + /* + * Same as HWTSTAMP_TX_ONESTEP_SYNC, but also enables time + * stamp insertion directly into PDelay_Resp packets. In this + * case, neither transmitted Sync nor PDelay_Resp packets will + * receive a time stamp via the socket error queue. + */ + HWTSTAMP_TX_ONESTEP_P2P +) + +const ( + HWTSTAMP_FILTER_NONE = iota /* time stamp no incoming packet at all */ + HWTSTAMP_FILTER_ALL /* time stamp any incoming packet */ + HWTSTAMP_FILTER_SOME /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_PTP_V1_L4_EVENT /* PTP v1, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V1_L4_SYNC /* PTP v1, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ /* PTP v1, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L4_EVENT /* PTP v2, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L4_SYNC /* PTP v2, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ /* PTP v2, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L2_EVENT /* 802.AS1, Ethernet, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L2_SYNC /* 802.AS1, Ethernet, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ /* 802.AS1, Ethernet, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_EVENT /* PTP v2/802.AS1, any layer, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_SYNC /* PTP v2/802.AS1, any layer, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ /* PTP v2/802.AS1, any layer, Delay_req packet */ + HWTSTAMP_FILTER_NTP_ALL /* NTP, UDP, all versions and packet modes */ +) + +type TimestampingInformation struct { + Cmd uint32 + SoTimestamping uint32 /* SOF_TIMESTAMPING_* bitmask */ + PhcIndex int32 + TxTypes uint32 /* HWTSTAMP_TX_* */ + txReserved [3]uint32 + RxFilters uint32 /* HWTSTAMP_FILTER_ */ + rxReserved [3]uint32 +} + +type ethtoolGStrings struct { + cmd uint32 + string_set uint32 + len uint32 + data [MAX_GSTRINGS * ETH_GSTRING_LEN]byte +} + +type ethtoolStats struct { + cmd uint32 + n_stats uint32 + data [MAX_GSTRINGS]uint64 +} + +type ethtoolEeprom struct { + cmd uint32 + magic uint32 + offset uint32 + len uint32 + data [EEPROM_LEN]byte +} + +type ethtoolModInfo struct { + cmd uint32 + tpe uint32 + eeprom_len uint32 + reserved [8]uint32 +} + +type ethtoolLink struct { + cmd uint32 + data uint32 +} + +type ethtoolPermAddr struct { + cmd uint32 + size uint32 + data [PERMADDR_LEN]byte +} + +// Ring is a ring config for an interface +type Ring struct { + Cmd uint32 + RxMaxPending uint32 + RxMiniMaxPending uint32 + RxJumboMaxPending uint32 + TxMaxPending uint32 + RxPending uint32 + RxMiniPending uint32 + RxJumboPending uint32 + TxPending uint32 +} + +// Pause is a pause config for an interface +type Pause struct { + Cmd uint32 + Autoneg uint32 + RxPause uint32 + TxPause uint32 +} + +type Ethtool struct { + fd int +} + +// Convert zero-terminated array of chars (string in C) to a Go string. +func goString(s []byte) string { + strEnd := bytes.IndexByte(s, 0) + if strEnd == -1 { + return string(s[:]) + } + return string(s[:strEnd]) +} + +// DriverName returns the driver name of the given interface name. +func (e *Ethtool) DriverName(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.driver[:]), nil +} + +// BusInfo returns the bus information of the given interface name. +func (e *Ethtool) BusInfo(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.bus_info[:]), nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEeprom(intf string) ([]byte, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return nil, err + } + + return eeprom.data[:eeprom.len], nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEepromHex(intf string) (string, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return "", err + } + + return hex.EncodeToString(eeprom.data[:eeprom.len]), nil +} + +// DriverInfo returns driver information of the given interface name. +func (e *Ethtool) DriverInfo(intf string) (DrvInfo, error) { + i, err := e.getDriverInfo(intf) + if err != nil { + return DrvInfo{}, err + } + + drvInfo := DrvInfo{ + Cmd: i.cmd, + Driver: goString(i.driver[:]), + Version: goString(i.version[:]), + FwVersion: goString(i.fw_version[:]), + BusInfo: goString(i.bus_info[:]), + EromVersion: goString(i.erom_version[:]), + Reserved2: goString(i.reserved2[:]), + NPrivFlags: i.n_priv_flags, + NStats: i.n_stats, + TestInfoLen: i.testinfo_len, + EedumpLen: i.eedump_len, + RegdumpLen: i.regdump_len, + } + + return drvInfo, nil +} + +// GetChannels returns the number of channels for the given interface name. +func (e *Ethtool) GetChannels(intf string) (Channels, error) { + channels, err := e.getChannels(intf) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// SetChannels sets the number of channels for the given interface name and +// returns the new number of channels. +func (e *Ethtool) SetChannels(intf string, channels Channels) (Channels, error) { + channels, err := e.setChannels(intf, channels) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// GetCoalesce returns the coalesce config for the given interface name. +func (e *Ethtool) GetCoalesce(intf string) (Coalesce, error) { + coalesce, err := e.getCoalesce(intf) + if err != nil { + return Coalesce{}, err + } + return coalesce, nil +} + +// GetTimestampingInformation returns the PTP timestamping information for the given interface name. +func (e *Ethtool) GetTimestampingInformation(intf string) (TimestampingInformation, error) { + ts, err := e.getTimestampingInformation(intf) + if err != nil { + return TimestampingInformation{}, err + } + return ts, nil +} + +// PermAddr returns permanent address of the given interface name. +func (e *Ethtool) PermAddr(intf string) (string, error) { + permAddr, err := e.getPermAddr(intf) + if err != nil { + return "", err + } + + if permAddr.data[0] == 0 && permAddr.data[1] == 0 && + permAddr.data[2] == 0 && permAddr.data[3] == 0 && + permAddr.data[4] == 0 && permAddr.data[5] == 0 { + return "", nil + } + + return fmt.Sprintf("%x:%x:%x:%x:%x:%x", + permAddr.data[0:1], + permAddr.data[1:2], + permAddr.data[2:3], + permAddr.data[3:4], + permAddr.data[4:5], + permAddr.data[5:6], + ), nil +} + +func (e *Ethtool) ioctl(intf string, data uintptr) error { + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: data, + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return ep + } + + return nil +} + +func (e *Ethtool) getDriverInfo(intf string) (ethtoolDrvInfo, error) { + drvinfo := ethtoolDrvInfo{ + cmd: ETHTOOL_GDRVINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&drvinfo))); err != nil { + return ethtoolDrvInfo{}, err + } + + return drvinfo, nil +} + +func (e *Ethtool) getChannels(intf string) (Channels, error) { + channels := Channels{ + Cmd: ETHTOOL_GCHANNELS, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) setChannels(intf string, channels Channels) (Channels, error) { + channels.Cmd = ETHTOOL_SCHANNELS + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) getCoalesce(intf string) (Coalesce, error) { + coalesce := Coalesce{ + Cmd: ETHTOOL_GCOALESCE, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&coalesce))); err != nil { + return Coalesce{}, err + } + + return coalesce, nil +} + +func (e *Ethtool) getTimestampingInformation(intf string) (TimestampingInformation, error) { + ts := TimestampingInformation{ + Cmd: ETHTOOL_GET_TS_INFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ts))); err != nil { + return TimestampingInformation{}, err + } + + return ts, nil +} + +func (e *Ethtool) getPermAddr(intf string) (ethtoolPermAddr, error) { + permAddr := ethtoolPermAddr{ + cmd: ETHTOOL_GPERMADDR, + size: PERMADDR_LEN, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&permAddr))); err != nil { + return ethtoolPermAddr{}, err + } + + return permAddr, nil +} + +func (e *Ethtool) getModuleEeprom(intf string) (ethtoolEeprom, ethtoolModInfo, error) { + modInfo := ethtoolModInfo{ + cmd: ETHTOOL_GMODULEINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&modInfo))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + eeprom := ethtoolEeprom{ + cmd: ETHTOOL_GMODULEEEPROM, + len: modInfo.eeprom_len, + offset: 0, + } + + if modInfo.eeprom_len > EEPROM_LEN { + return ethtoolEeprom{}, ethtoolModInfo{}, fmt.Errorf("eeprom size: %d is larger than buffer size: %d", modInfo.eeprom_len, EEPROM_LEN) + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&eeprom))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + return eeprom, modInfo, nil +} + +// GetRing retrieves ring parameters of the given interface name. +func (e *Ethtool) GetRing(intf string) (Ring, error) { + ring := Ring{ + Cmd: ETHTOOL_GRINGPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// SetRing sets ring parameters of the given interface name. +func (e *Ethtool) SetRing(intf string, ring Ring) (Ring, error) { + ring.Cmd = ETHTOOL_SRINGPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// GetPause retrieves pause parameters of the given interface name. +func (e *Ethtool) GetPause(intf string) (Pause, error) { + pause := Pause{ + Cmd: ETHTOOL_GPAUSEPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +// SetPause sets pause parameters of the given interface name. +func (e *Ethtool) SetPause(intf string, pause Pause) (Pause, error) { + pause.Cmd = ETHTOOL_SPAUSEPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +func isFeatureBitSet(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index uint) bool { + return (blocks)[index/32].active&(1<<(index%32)) != 0 +} + +func setFeatureBit(blocks *[MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock, index uint, value bool) { + blockIndex, bitIndex := index/32, index%32 + + blocks[blockIndex].valid |= 1 << bitIndex + + if value { + blocks[blockIndex].requested |= 1 << bitIndex + } else { + blocks[blockIndex].requested &= ^(1 << bitIndex) + } +} + +func (e *Ethtool) getNames(intf string, mask int) (map[string]uint, error) { + ssetInfo := ethtoolSsetInfo{ + cmd: ETHTOOL_GSSET_INFO, + sset_mask: 1 << mask, + data: [MAX_SSET_INFO]uint32{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ssetInfo))); err != nil { + return nil, err + } + + /* we only read data on first index because single bit was set in sset_mask(0x10) */ + length := ssetInfo.data[0] + if length == 0 { + return map[string]uint{}, nil + } else if length > MAX_GSTRINGS { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, length) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: uint32(mask), + len: length, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + result := make(map[string]uint) + for i := 0; i != int(length); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + key := goString(b) + if key != "" { + result[key] = uint(i) + } + } + + return result, nil +} + +// FeatureNames shows supported features by their name. +func (e *Ethtool) FeatureNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_FEATURES) +} + +// Features retrieves features of the given interface name. +func (e *Ethtool) Features(intf string) (map[string]bool, error) { + names, err := e.FeatureNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + features := ethtoolGfeatures{ + cmd: ETHTOOL_GFEATURES, + size: (length + 32 - 1) / 32, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&features))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for key, index := range names { + result[key] = isFeatureBitSet(features.blocks, index) + } + + return result, nil +} + +// Change requests a change in the given device's features. +func (e *Ethtool) Change(intf string, config map[string]bool) error { + names, err := e.FeatureNames(intf) + if err != nil { + return err + } + + length := uint32(len(names)) + + features := ethtoolSfeatures{ + cmd: ETHTOOL_SFEATURES, + size: (length + 32 - 1) / 32, + } + + for key, value := range config { + if index, ok := names[key]; ok { + setFeatureBit(&features.blocks, index, value) + } else { + return fmt.Errorf("unsupported feature %q", key) + } + } + + return e.ioctl(intf, uintptr(unsafe.Pointer(&features))) +} + +// PrivFlagsNames shows supported private flags by their name. +func (e *Ethtool) PrivFlagsNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_PRIV_FLAGS) +} + +// PrivFlags retrieves private flags of the given interface name. +func (e *Ethtool) PrivFlags(intf string) (map[string]bool, error) { + names, err := e.PrivFlagsNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + var val ethtoolLink + val.cmd = ETHTOOL_GPFLAGS + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&val))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for name, mask := range names { + result[name] = val.data&(1< MAX_GSTRINGS*ETH_GSTRING_LEN { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, drvinfo.n_stats) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: ETH_SS_STATS, + len: drvinfo.n_stats, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + n_stats: drvinfo.n_stats, + data: [MAX_GSTRINGS]uint64{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&stats))); err != nil { + return nil, err + } + + result := make(map[string]uint64) + for i := 0; i != int(drvinfo.n_stats); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + strEnd := strings.Index(string(b), "\x00") + if strEnd == -1 { + strEnd = ETH_GSTRING_LEN + } + key := string(b[:strEnd]) + if len(key) != 0 { + result[key] = stats.data[i] + } + } + + return result, nil +} + +// Close closes the ethool handler +func (e *Ethtool) Close() { + unix.Close(e.fd) +} + +// NewEthtool returns a new ethtool handler +func NewEthtool() (*Ethtool, error) { + fd, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP) + if err != nil { + return nil, err + } + + return &Ethtool{ + fd: int(fd), + }, nil +} + +// BusInfo returns bus information of the given interface name. +func BusInfo(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.BusInfo(intf) +} + +// DriverName returns the driver name of the given interface name. +func DriverName(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.DriverName(intf) +} + +// Stats retrieves stats of the given interface name. +func Stats(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.Stats(intf) +} + +// PermAddr returns permanent address of the given interface name. +func PermAddr(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.PermAddr(intf) +} + +func supportedSpeeds(mask uint64) (ret []struct { + name string + mask uint64 + speed uint64 +}) { + for _, mode := range supportedCapabilities { + if ((1 << mode.mask) & mask) != 0 { + ret = append(ret, mode) + } + } + return ret +} + +// SupportedLinkModes returns the names of the link modes supported by the interface. +func SupportedLinkModes(mask uint64) []string { + var ret []string + for _, mode := range supportedSpeeds(mask) { + ret = append(ret, mode.name) + } + return ret +} + +// SupportedSpeed returns the maximum capacity of this interface. +func SupportedSpeed(mask uint64) uint64 { + var ret uint64 + for _, mode := range supportedSpeeds(mask) { + if mode.speed > ret { + ret = mode.speed + } + } + return ret +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_cmd.go b/vendor/github.com/safchain/ethtool/ethtool_cmd.go new file mode 100644 index 000000000..e94d6dd89 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_cmd.go @@ -0,0 +1,208 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "math" + "reflect" + "unsafe" + + "golang.org/x/sys/unix" +) + +type EthtoolCmd struct { /* ethtool.c: struct ethtool_cmd */ + Cmd uint32 + Supported uint32 + Advertising uint32 + Speed uint16 + Duplex uint8 + Port uint8 + Phy_address uint8 + Transceiver uint8 + Autoneg uint8 + Mdio_support uint8 + Maxtxpkt uint32 + Maxrxpkt uint32 + Speed_hi uint16 + Eth_tp_mdix uint8 + Reserved2 uint8 + Lp_advertising uint32 + Reserved [2]uint32 +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdGet(ecmd, intf) +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdSet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdSet(ecmd, intf) +} + +func (f *EthtoolCmd) reflect(retv *map[string]uint64) { + val := reflect.ValueOf(f).Elem() + + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + + t := valueField.Interface() + // tt := reflect.TypeOf(t) + // fmt.Printf(" t %T %v tt %T %v\n", t, t, tt, tt) + switch tt := t.(type) { + case uint32: + // fmt.Printf(" t is uint32\n") + (*retv)[typeField.Name] = uint64(tt) + case uint16: + (*retv)[typeField.Name] = uint64(tt) + case uint8: + (*retv)[typeField.Name] = uint64(tt) + case int32: + (*retv)[typeField.Name] = uint64(tt) + case int16: + (*retv)[typeField.Name] = uint64(tt) + case int8: + (*retv)[typeField.Name] = uint64(tt) + default: + (*retv)[typeField.Name+"_unknown_type"] = 0 + } + + // tag := typeField.Tag + // fmt.Printf("Field Name: %s,\t Field Value: %v,\t Tag Value: %s\n", + // typeField.Name, valueField.Interface(), tag.Get("tag_name")) + } +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdGet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_GSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdSet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_SSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, unix.Errno(ep) + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdGetMapped returns the interface settings in a map +func (e *Ethtool) CmdGetMapped(intf string) (map[string]uint64, error) { + ecmd := EthtoolCmd{ + Cmd: ETHTOOL_GSET, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return nil, ep + } + + result := make(map[string]uint64) + + // ref https://gist.github.com/drewolson/4771479 + // Golang Reflection Example + ecmd.reflect(&result) + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + result["speed"] = uint64(speedval) + + return result, nil +} + +func CmdGetMapped(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.CmdGetMapped(intf) +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_msglvl.go b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go new file mode 100644 index 000000000..1f6e338cf --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go @@ -0,0 +1,114 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type ethtoolValue struct { /* ethtool.c: struct ethtool_value */ + cmd uint32 + data uint32 +} + +// MsglvlGet returns the msglvl of the given interface. +func (e *Ethtool) MsglvlGet(intf string) (uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + return edata.data, nil +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func (e *Ethtool) MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + readval := edata.data + + edata.cmd = ETHTOOL_SMSGLVL + edata.data = valset + + _, _, ep = unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + return readval, edata.data, nil +} + +// MsglvlGet returns the msglvl of the given interface. +func MsglvlGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.MsglvlGet(intf) +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, 0, err + } + defer e.Close() + return e.MsglvlSet(intf, valset) +} diff --git a/vendor/github.com/urfave/cli/v2/.flake8 b/vendor/github.com/urfave/cli/v2/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore new file mode 100644 index 000000000..2d5e149b4 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.gitignore @@ -0,0 +1,7 @@ +*.coverprofile +*.orig +node_modules/ +vendor +.idea +internal/*/built-example +coverage.txt diff --git a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..41ba294f6 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting Dan Buch at dan@meatballhat.com. All complaints will be +reviewed and investigated and will result in a response that is deemed necessary +and appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/urfave/cli/v2/LICENSE b/vendor/github.com/urfave/cli/v2/LICENSE new file mode 100644 index 000000000..42a597e29 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/v2/README.md b/vendor/github.com/urfave/cli/v2/README.md new file mode 100644 index 000000000..c9237fbc6 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/README.md @@ -0,0 +1,66 @@ +cli +=== + +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli) + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + +## Usage Documentation + +Usage documentation exists for each major version. Don't know what version you're on? You're probably using the version from the `master` branch, which is currently `v2`. + +- `v2` - [./docs/v2/manual.md](./docs/v2/manual.md) +- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md) + +## Installation + +Make sure you have a working Go environment. Go version 1.11+ is supported. [See the install instructions for Go](http://golang.org/doc/install.html). + +Go Modules are strongly recommended when using this package. [See the go blog guide on using Go Modules](https://blog.golang.org/using-go-modules). + +### Using `v2` releases + +``` +$ GO111MODULE=on go get github.com/urfave/cli/v2 +``` + +```go +... +import ( + "github.com/urfave/cli/v2" // imports as package "cli" +) +... +``` + +### Using `v1` releases + +``` +$ GO111MODULE=on go get github.com/urfave/cli +``` + +```go +... +import ( + "github.com/urfave/cli" +) +... +``` + +### GOPATH + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. This project uses Github Actions for +builds. For more build info, please look at the [./.github/workflows/cli.yml](https://github.com/urfave/cli/blob/master/.github/workflows/cli.yml). diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go new file mode 100644 index 000000000..d0c8f84e2 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -0,0 +1,542 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/docs/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to + // function as a default, so this is optional. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Name == "" { + a.Name = filepath.Base(os.Args[0]) + } + + if a.HelpName == "" { + a.HelpName = filepath.Base(os.Args[0]) + } + + if a.Usage == "" { + a.Usage = "A new cli application" + } + + if a.Version == "" { + a.HideVersion = true + } + + if a.BashComplete == nil { + a.BashComplete = DefaultAppComplete + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + if a.Compiled == (time.Time{}) { + a.Compiled = compileTime() + } + + if a.Writer == nil { + a.Writer = os.Stdout + } + + var newCommands []*Command + + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCommands = append(newCommands, c) + } + a.Commands = newCommands + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + if !a.HideHelpCommand { + a.appendCommand(helpCommand) + } + + if HelpFlag != nil { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = newCommandCategories() + for _, command := range a.Commands { + a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories.(*commandCategories)) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + return a.RunContext(context.Background(), arguments) +} + +// RunContext is like Run except it takes a Context that will be +// passed to its commands and sub-commands. Through this, you can +// propagate timeouts and cancellation requests +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, arguments[1:], shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, &Context{Context: ctx}) + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _ = ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + _ = ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowAppHelp(context) + return cerr + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + _, _ = fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) + _ = ShowAppHelp(context) + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = a.Action(context) + + a.handleExitCoder(context, err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // Setup also handles HideHelp and HideHelpCommand + a.Setup() + + var newCmds []*Command + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, ctx.Args().Tail(), ctx.shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _, _ = fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + _ = ShowSubcommandHelp(context) + } else { + _ = ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowSubcommandHelp(context) + return cerr + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + a.handleExitCoder(context, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = a.Action(context) + + a.handleExitCoder(context, err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return c + } + } + + return nil +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range a.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []*Command { + var ret []*Command + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) errWriter() io.Writer { + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(fl Flag) { + if !hasFlag(a.Flags, fl) { + a.Flags = append(a.Flags, fl) + } +} + +func (a *App) appendCommand(c *Command) { + if !hasCommand(a.Commands, c) { + a.Commands = append(a.Commands, c) + } +} + +func (a *App) handleExitCoder(context *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(context, err) + } else { + HandleExitCoder(err) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a *Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + switch a := action.(type) { + case ActionFunc: + return a(context) + case func(*Context) error: + return a(context) + case func(*Context): // deprecated function signature + a(context) + return nil + } + + return errInvalidActionType +} diff --git a/vendor/github.com/urfave/cli/v2/args.go b/vendor/github.com/urfave/cli/v2/args.go new file mode 100644 index 000000000..bd65c17bd --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/args.go @@ -0,0 +1,54 @@ +package cli + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type args []string + +func (a *args) Get(n int) string { + if len(*a) > n { + return (*a)[n] + } + return "" +} + +func (a *args) First() string { + return a.Get(0) +} + +func (a *args) Tail() []string { + if a.Len() >= 2 { + tail := []string((*a)[1:]) + ret := make([]string, len(tail)) + copy(ret, tail) + return ret + } + return []string{} +} + +func (a *args) Len() int { + return len(*a) +} + +func (a *args) Present() bool { + return a.Len() != 0 +} + +func (a *args) Slice() []string { + ret := make([]string, len(*a)) + copy(ret, *a) + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go new file mode 100644 index 000000000..867e3908c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/category.go @@ -0,0 +1,79 @@ +package cli + +// CommandCategories interface allows for category manipulation +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // categories returns a copy of the category slice + Categories() []CommandCategory +} + +type commandCategories []*commandCategory + +func newCommandCategories() CommandCategories { + ret := commandCategories([]*commandCategory{}) + return &ret +} + +func (c *commandCategories) Less(i, j int) bool { + return lexicographicLess((*c)[i].Name(), (*c)[j].Name()) +} + +func (c *commandCategories) Len() int { + return len(*c) +} + +func (c *commandCategories) Swap(i, j int) { + (*c)[i], (*c)[j] = (*c)[j], (*c)[i] +} + +func (c *commandCategories) AddCommand(category string, command *Command) { + for _, commandCategory := range []*commandCategory(*c) { + if commandCategory.name == category { + commandCategory.commands = append(commandCategory.commands, command) + return + } + } + newVal := append(*c, + &commandCategory{name: category, commands: []*Command{command}}) + *c = newVal +} + +func (c *commandCategories) Categories() []CommandCategory { + ret := make([]CommandCategory, len(*c)) + for i, cat := range *c { + ret[i] = cat + } + return ret +} + +// CommandCategory is a category containing commands. +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + +type commandCategory struct { + name string + commands []*Command +} + +func (c *commandCategory) Name() string { + return c.name +} + +func (c *commandCategory) VisibleCommands() []*Command { + if c.commands == nil { + c.commands = []*Command{} + } + + var ret []*Command + for _, command := range c.commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go new file mode 100644 index 000000000..62a5bc22d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/cli.go @@ -0,0 +1,23 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// (&cli.App{}).Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := &cli.App{ +// Name: "greet", +// Usage: "say a greeting", +// Action: func(c *cli.Context) error { +// fmt.Println("Greetings") +// return nil +// }, +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go new file mode 100644 index 000000000..95840f32e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -0,0 +1,301 @@ +package cli + +import ( + "flag" + "fmt" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c *Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c *Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && HelpFlag != nil { + // append help to flags + c.appendFlag(HelpFlag) + } + + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true + } + + set, err := c.parseFlags(ctx.Args(), ctx.shellComplete) + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err = c.OnUsageError(context, err, false) + context.App.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + _, _ = fmt.Fprintln(context.App.Writer) + _ = ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + cerr := checkRequiredFlags(c.Flags, context) + if cerr != nil { + _ = ShowCommandHelp(context, c.Name) + return cerr + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + context.App.handleExitCoder(context, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + _ = ShowCommandHelp(context, c.Name) + context.App.handleExitCoder(context, err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + context.Command = c + err = c.Action(context) + + if err != nil { + context.App.handleExitCoder(context, err) + } + return err +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + if c.SkipFlagParsing { + return set, set.Parse(append([]string{"--"}, args.Tail()...)) + } + + err = parseIter(set, c, args.Tail(), shellComplete) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +// Names returns the names including short names and aliases. +func (c *Command) Names() []string { + return append([]string{c.Name}, c.Aliases...) +} + +// HasName returns true if Command.Name matches given name +func (c *Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c *Command) startApp(ctx *Context) error { + app := &App{ + Metadata: ctx.App.Metadata, + Name: fmt.Sprintf("%s %s", ctx.App.Name, c.Name), + } + + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + app.HideHelpCommand = c.HideHelpCommand + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + app.ExitErrHandler = ctx.App.ExitErrHandler + app.UseShortOptionHandling = ctx.App.UseShortOptionHandling + + app.categories = newCommandCategories() + for _, command := range c.Subcommands { + app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories.(*commandCategories)) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c *Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} + +func (c *Command) appendFlag(fl Flag) { + if !hasFlag(c.Flags, fl) { + c.Flags = append(c.Flags, fl) + } +} + +func hasCommand(commands []*Command, command *Command) bool { + for _, existing := range commands { + if command == existing { + return true + } + } + + return false +} diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go new file mode 100644 index 000000000..74ed51912 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/context.go @@ -0,0 +1,273 @@ +package cli + +import ( + "context" + "errors" + "flag" + "fmt" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific args and +// parsed command-line options. +type Context struct { + context.Context + App *App + Command *Command + shellComplete bool + flagSet *flag.FlagSet + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + if parentCtx != nil { + c.Context = parentCtx.Context + c.shellComplete = parentCtx.shellComplete + if parentCtx.flagSet == nil { + parentCtx.flagSet = &flag.FlagSet{} + } + } + + c.Command = &Command{} + + if c.Context == nil { + c.Context = context.Background() + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if fs := lookupFlagSet(name, c); fs != nil { + if fs := lookupFlagSet(name, c); fs != nil { + isSet := false + fs.Visit(func(f *flag.Flag) { + if f.Name == name { + isSet = true + } + }) + if isSet { + return true + } + } + + f := lookupFlag(name, c) + if f == nil { + return false + } + + return f.IsSet() + } + + return false +} + +// LocalFlagNames returns a slice of flag names used in this context. +func (c *Context) LocalFlagNames() []string { + var names []string + c.flagSet.Visit(makeFlagNameVisitor(&names)) + return names +} + +// FlagNames returns a slice of flag names used by the this context and all of +// its parent contexts. +func (c *Context) FlagNames() []string { + var names []string + for _, ctx := range c.Lineage() { + ctx.flagSet.Visit(makeFlagNameVisitor(&names)) + } + return names +} + +// Lineage returns *this* context and all of its ancestor contexts in order from +// child to parent +func (c *Context) Lineage() []*Context { + var lineage []*Context + + for cur := c; cur != nil; cur = cur.parentContext { + lineage = append(lineage, cur) + } + + return lineage +} + +// Value returns the value of the flag corresponding to `name` +func (c *Context) Value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + ret := args(c.flagSet.Args()) + return &ret +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return c.Args().Len() +} + +func lookupFlag(name string, ctx *Context) Flag { + for _, c := range ctx.Lineage() { + if c.Command == nil { + continue + } + + for _, f := range c.Command.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + if ctx.App != nil { + for _, f := range ctx.App.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + return nil +} + +func lookupFlagSet(name string, ctx *Context) *flag.FlagSet { + for _, c := range ctx.Lineage() { + if f := c.flagSet.Lookup(name); f != nil { + return c.flagSet + } + } + + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case Serializer: + _ = set.Set(name, ff.Value.(Serializer).Serialize()) + default: + _ = set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := f.Names() + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} + +func makeFlagNameVisitor(names *[]string) func(*flag.Flag) { + return func(f *flag.Flag) { + nameParts := strings.Split(f.Name, ",") + name := strings.TrimSpace(nameParts[0]) + + for _, part := range nameParts { + part = strings.TrimSpace(part) + if len(part) > len(name) { + name = part + } + } + + if name != "" { + *names = append(*names, name) + } + } +} + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + var flagPresent bool + var flagName string + + for _, key := range f.Names() { + if len(key) > 1 { + flagName = key + } + + if context.IsSet(strings.TrimSpace(key)) { + flagPresent = true + } + } + + if !flagPresent && flagName != "" { + missingFlags = append(missingFlags, flagName) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go new file mode 100644 index 000000000..dc16fc82d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/docs.go @@ -0,0 +1,148 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +type cliTemplate struct { + App *App + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.VisibleFlags()), + SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()), + }) +} + +func prepareCommands(commands []*Command, level int) []string { + var coms []string + for _, command := range commands { + if command.Hidden { + continue + } + usage := "" + if command.Usage != "" { + usage = command.Usage + } + + prepared := fmt.Sprintf("%s %s\n\n%s\n", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + ) + + flags := prepareArgsWithValues(command.Flags) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + + for _, s := range flag.Names() { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + value := flag.GetValue() + if value != "" { + description += " (default: " + value + ")" + } + return ": " + description +} diff --git a/vendor/github.com/urfave/cli/v2/errors.go b/vendor/github.com/urfave/cli/v2/errors.go new file mode 100644 index 000000000..344b4361e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/errors.go @@ -0,0 +1,131 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError interface { + error + // Errors returns a copy of the errors slice + Errors() []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func newMultiError(err ...error) MultiError { + ret := multiError(err) + return &ret +} + +type multiError []error + +// Error implements the error interface. +func (m *multiError) Error() string { + errs := make([]string, len(*m)) + for i, err := range *m { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// Errors returns a copy of the errors slice +func (m *multiError) Errors() []error { + errs := make([]error, len(*m)) + for _, err := range *m { + errs = append(errs, err) + } + return errs +} + +// ErrorFormatter is the interface that will suitably format the error output +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +type exitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *exitError +func NewExitError(message interface{}, exitCode int) ExitCoder { + return Exit(message, exitCode) +} + +// Exit wraps a message and exit code into an ExitCoder suitable for handling by +// HandleExitCoder +func Exit(message interface{}, exitCode int) ExitCoder { + return &exitError{ + message: message, + exitCode: exitCode, + } +} + +func (ee *exitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +func (ee *exitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice and calls OsExiter with the last exit code. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + _, _ = fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + _, _ = fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors() { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else if merr != nil { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/v2/fish.go b/vendor/github.com/urfave/cli/v2/fish.go new file mode 100644 index 000000000..67122c9fe --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/fish.go @@ -0,0 +1,192 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []*Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for _, command := range commands { + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.Flags, command.Names())..., + ) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range flag.Names() { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case *GenericFlag: + if f.TakesFile { + return + } + case *StringFlag: + if f.TakesFile { + return + } + case *StringSliceFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go new file mode 100644 index 000000000..ad97c2d05 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag.go @@ -0,0 +1,388 @@ +package cli + +import ( + "flag" + "fmt" + "io/ioutil" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +var ( + slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano()) + + commaWhitespace = regexp.MustCompile("[, ]+.*") +) + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands. +// Set to nil to disable the flag. The subcommand +// will still be added unless HideHelp or HideHelpCommand is set to true. +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// Serializer is used to circumvent the limitations of flag.FlagSet.Set +type Serializer interface { + Serialize() string +} + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + if len(f[j].Names()) == 0 { + return false + } else if len(f[i].Names()) == 0 { + return true + } + return lexicographicLess(f[i].Names()[0], f[j].Names()[0]) +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + if err := f.Apply(set); err != nil { + return nil, err + } + } + set.SetOutput(ioutil.Discard) + return set, nil +} + +func visibleFlags(fl []Flag) []Flag { + var visible []Flag + for _, f := range fl { + field := flagValue(f).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, f) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(names []string, placeholder string) string { + var prefixed string + for i, name := range names { + if name == "" { + continue + } + + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(names)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVars []string, str string) string { + envText := "" + if envVars != nil && len(envVars) > 0 { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(envVars, sep), suffix) + } + return str + envText +} + +func flagNames(name string, aliases []string) []string { + var ret []string + + for _, part := range append([]string{name}, aliases...) { + // v1 -> v2 migration warning zone: + // Strip off anything after the first found comma or space, which + // *hopefully* makes it a tiny bit more obvious that unexpected behavior is + // caused by using the v1 form of stringly typed "Name". + ret = append(ret, commaWhitespace.ReplaceAllString(part, "")) + } + + return ret +} + +func flagStringSliceField(f Flag, name string) []string { + fv := flagValue(f) + field := fv.FieldByName(name) + + if field.IsValid() { + return field.Interface().([]string) + } + + return []string{} +} + +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func formatDefault(format string) string { + return " (default: " + format + ")" +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f := f.(type) { + case *IntSliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyIntSliceFlag(f)) + case *Int64SliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyInt64SliceFlag(f)) + case *Float64SliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyFloat64SliceFlag(f)) + case *StringSliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyStringSliceFlag(f)) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + if val.IsValid() { + needsPlaceholder = val.Kind() != reflect.Bool + defaultValueString = fmt.Sprintf(formatDefault("%v"), val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(formatDefault("%q"), val.String()) + } + } + + helpText := fv.FieldByName("DefaultText") + if helpText.IsValid() && helpText.String() != "" { + needsPlaceholder = val.Kind() != reflect.Bool + defaultValueString = fmt.Sprintf(formatDefault("%s"), helpText.String()) + } + + if defaultValueString == formatDefault("") { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(usage + defaultValueString) + + return withEnvHint(flagStringSliceField(f, "EnvVars"), + fmt.Sprintf("%s\t%s", prefixedNames(f.Names(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f *IntSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.Itoa(i)) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyInt64SliceFlag(f *Int64SliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyFloat64SliceFlag(f *Float64SliceFlag) string { + var defaultVals []string + + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), ".")) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyStringSliceFlag(f *StringSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, strconv.Quote(s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifySliceFlag(usage string, names, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(formatDefault("%s"), strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(names, placeholder), usageWithDefault) +} + +func hasFlag(flags []Flag, fl Flag) bool { + for _, existing := range flags { + if fl == existing { + return true + } + } + + return false +} + +func flagFromEnvOrFile(envVars []string, filePath string) (val string, ok bool) { + for _, envVar := range envVars { + envVar = strings.TrimSpace(envVar) + if val, ok := syscall.Getenv(envVar); ok { + return val, true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } + } + return "", false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go new file mode 100644 index 000000000..bc9ea35d0 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_bool.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value bool + DefaultText string + Destination *bool + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *BoolFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *BoolFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *BoolFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *BoolFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f *BoolFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *BoolFlag) GetValue() string { + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *BoolFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valBool, err := strconv.ParseBool(val) + + if err != nil { + return fmt.Errorf("could not parse %q as bool value for flag %s: %s", val, f.Name, err) + } + + f.Value = valBool + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.BoolVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Bool(name, f.Value, f.Usage) + } + + return nil +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go new file mode 100644 index 000000000..22a2e6720 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_duration.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value time.Duration + DefaultText string + Destination *time.Duration + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *DurationFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *DurationFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *DurationFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *DurationFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *DurationFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *DurationFlag) GetValue() string { + return f.Value.String() +} + +// Apply populates the flag given the flag set and environment +func (f *DurationFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valDuration, err := time.ParseDuration(val) + + if err != nil { + return fmt.Errorf("could not parse %q as duration value for flag %s: %s", val, f.Name, err) + } + + f.Value = valDuration + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Duration(name, f.Value, f.Usage) + } + return nil +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go new file mode 100644 index 000000000..91c778c87 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value float64 + DefaultText string + Destination *float64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Float64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64Flag) GetValue() string { + return fmt.Sprintf("%f", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *Float64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valFloat, err := strconv.ParseFloat(val, 10) + + if err != nil { + return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err) + } + + f.Value = valFloat + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Float64(name, f.Value, f.Usage) + } + + return nil +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go new file mode 100644 index 000000000..706ee6cd4 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go @@ -0,0 +1,163 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Float64Slice wraps []float64 to satisfy flag.Value +type Float64Slice struct { + slice []float64 + hasBeenSet bool +} + +// NewFloat64Slice makes a *Float64Slice with default values +func NewFloat64Slice(defaults ...float64) *Float64Slice { + return &Float64Slice{slice: append([]float64{}, defaults...)} +} + +// Set parses the value into a float64 and appends it to the list of values +func (f *Float64Slice) Set(value string) error { + if !f.hasBeenSet { + f.slice = []float64{} + f.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice) + f.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + + f.slice = append(f.slice, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Slice) String() string { + return fmt.Sprintf("%#v", f.slice) +} + +// Serialize allows Float64Slice to fulfill Serializer +func (f *Float64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(f.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of float64s set by this flag +func (f *Float64Slice) Value() []float64 { + return f.slice +} + +// Get returns the slice of float64s set by this flag +func (f *Float64Slice) Get() interface{} { + return *f +} + +// Float64SliceFlag is a flag with type *Float64Slice +type Float64SliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *Float64Slice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64SliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Float64SliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true if the flag takes a value, otherwise false +func (f *Float64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + f.Value = &Float64Slice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as float64 slice value for flag %s: %s", f.Value, f.Name, err) + } + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &Float64Slice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Float64Slice looks up the value of a local Float64SliceFlag, returns +// nil if not found +func (c *Context) Float64Slice(name string) []float64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupFloat64Slice(name, fs) + } + return nil +} + +func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*Float64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go new file mode 100644 index 000000000..b0c8ff44d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_generic.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value Generic + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *GenericFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *GenericFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *GenericFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *GenericFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *GenericFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as value for flag %s: %s", val, f.Name, err) + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go new file mode 100644 index 000000000..ac39d4a9e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value int + DefaultText string + Destination *int + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *IntFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *IntFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseInt(val, 0, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err) + } + + f.Value = int(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int(name, f.Value, f.Usage) + } + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go new file mode 100644 index 000000000..e09991269 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value int64 + DefaultText string + Destination *int64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Int64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *Int64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseInt(val, 0, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int64(name, f.Value, f.Usage) + } + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go new file mode 100644 index 000000000..6c7fd9376 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go @@ -0,0 +1,159 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice wraps []int64 to satisfy flag.Value +type Int64Slice struct { + slice []int64 + hasBeenSet bool +} + +// NewInt64Slice makes an *Int64Slice with default values +func NewInt64Slice(defaults ...int64) *Int64Slice { + return &Int64Slice{slice: append([]int64{}, defaults...)} +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Int64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Int64Slice) String() string { + return fmt.Sprintf("%#v", i.slice) +} + +// Serialize allows Int64Slice to fulfill Serializer +func (i *Int64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Int64Slice) Value() []int64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Int64Slice) Get() interface{} { + return *i +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *Int64Slice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Int64SliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = &Int64Slice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int64 slice value for flag %s: %s", val, f.Name, err) + } + } + + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*Int64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go new file mode 100644 index 000000000..4e0afc021 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int_slice.go @@ -0,0 +1,173 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice wraps []int to satisfy flag.Value +type IntSlice struct { + slice []int + hasBeenSet bool +} + +// NewIntSlice makes an *IntSlice with default values +func NewIntSlice(defaults ...int) *IntSlice { + return &IntSlice{slice: append([]int{}, defaults...)} +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *IntSlice) SetInt(value int) { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +// Set parses the value into an integer and appends it to the list of values +func (i *IntSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, int(tmp)) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *IntSlice) String() string { + return fmt.Sprintf("%#v", i.slice) +} + +// Serialize allows IntSlice to fulfill Serializer +func (i *IntSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *IntSlice) Value() []int { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *IntSlice) Get() interface{} { + return *i +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *IntSlice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntSliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *IntSliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = &IntSlice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int slice value for flag %s: %s", val, f.Name, err) + } + } + + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupIntSlice(name, c.flagSet) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*IntSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go new file mode 100644 index 000000000..8070dc4b0 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_path.go @@ -0,0 +1,95 @@ +package cli + +import "flag" + +type PathFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + DefaultText string + Destination *string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *PathFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *PathFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *PathFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *PathFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *PathFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *PathFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *PathFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +func (f *PathFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Path looks up the value of a local PathFlag, returns +// "" if not found +func (c *Context) Path(name string) string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupPath(name, fs) + } + + return "" +} + +func lookupPath(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go new file mode 100644 index 000000000..400bb532e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string.go @@ -0,0 +1,95 @@ +package cli + +import "flag" + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + DefaultText string + Destination *string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *StringFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +func (f *StringFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go new file mode 100644 index 000000000..ac363bf60 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string_slice.go @@ -0,0 +1,171 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strings" +) + +// StringSlice wraps a []string to satisfy flag.Value +type StringSlice struct { + slice []string + hasBeenSet bool +} + +// NewStringSlice creates a *StringSlice with default values +func NewStringSlice(defaults ...string) *StringSlice { + return &StringSlice{slice: append([]string{}, defaults...)} +} + +// Set appends the string value to the list of values +func (s *StringSlice) Set(value string) error { + if !s.hasBeenSet { + s.slice = []string{} + s.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &s.slice) + s.hasBeenSet = true + return nil + } + + s.slice = append(s.slice, value) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (s *StringSlice) String() string { + return fmt.Sprintf("%s", s.slice) +} + +// Serialize allows StringSlice to fulfill Serializer +func (s *StringSlice) Serialize() string { + jsonBytes, _ := json.Marshal(s.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of strings set by this flag +func (s *StringSlice) Value() []string { + return s.slice +} + +// Get returns the slice of strings set by this flag +func (s *StringSlice) Get() interface{} { + return *s +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value *StringSlice + DefaultText string + HasBeenSet bool + Destination *StringSlice +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringSliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *StringSliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = &StringSlice{} + destination := f.Value + if f.Destination != nil { + destination = f.Destination + } + + for _, s := range strings.Split(val, ",") { + if err := destination.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as string value for flag %s: %s", val, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + destination.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &StringSlice{} + } + + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*StringSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go new file mode 100644 index 000000000..9fac1d1e2 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_timestamp.go @@ -0,0 +1,152 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// Timestamp wrap to satisfy golang's flag interface. +type Timestamp struct { + timestamp *time.Time + hasBeenSet bool + layout string +} + +// Timestamp constructor +func NewTimestamp(timestamp time.Time) *Timestamp { + return &Timestamp{timestamp: ×tamp} +} + +// Set the timestamp value directly +func (t *Timestamp) SetTimestamp(value time.Time) { + if !t.hasBeenSet { + t.timestamp = &value + t.hasBeenSet = true + } +} + +// Set the timestamp string layout for future parsing +func (t *Timestamp) SetLayout(layout string) { + t.layout = layout +} + +// Parses the string value to timestamp +func (t *Timestamp) Set(value string) error { + timestamp, err := time.Parse(t.layout, value) + if err != nil { + return err + } + + t.timestamp = ×tamp + t.hasBeenSet = true + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (t *Timestamp) String() string { + return fmt.Sprintf("%#v", t.timestamp) +} + +// Value returns the timestamp value stored in the flag +func (t *Timestamp) Value() *time.Time { + return t.timestamp +} + +// Get returns the flag structure +func (t *Timestamp) Get() interface{} { + return *t +} + +// TimestampFlag is a flag with type time +type TimestampFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Layout string + Value *Timestamp + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *TimestampFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *TimestampFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *TimestampFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *TimestampFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *TimestampFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *TimestampFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *TimestampFlag) GetValue() string { + if f.Value != nil { + return f.Value.timestamp.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *TimestampFlag) Apply(set *flag.FlagSet) error { + if f.Layout == "" { + return fmt.Errorf("timestamp Layout is required") + } + f.Value = &Timestamp{} + f.Value.SetLayout(f.Layout) + + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as timestamp value for flag %s: %s", val, f.Name, err) + } + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(f.Value, name, f.Usage) + } + return nil +} + +// Timestamp gets the timestamp from a flag name +func (c *Context) Timestamp(name string) *time.Time { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupTimestamp(name, fs) + } + return nil +} + +// Fetches the timestamp value from the local timestampWrap +func lookupTimestamp(name string, set *flag.FlagSet) *time.Time { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*Timestamp)).Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go new file mode 100644 index 000000000..2e5e76b0e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value uint + DefaultText string + Destination *uint + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *UintFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *UintFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintFlag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +func (f *UintFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint value for flag %s: %s", val, f.Name, err) + } + + f.Value = uint(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint(name, f.Value, f.Usage) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go new file mode 100644 index 000000000..8fc3289d8 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value uint64 + DefaultText string + Destination *uint64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Uint64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Uint64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64Flag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint64 value for flag %s: %s", val, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint64(name, f.Value, f.Usage) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/funcs.go b/vendor/github.com/urfave/cli/v2/funcs.go new file mode 100644 index 000000000..474c48faf --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/funcs.go @@ -0,0 +1,44 @@ +package cli + +// BashCompleteFunc is an action to execute when the shell completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// ExitErrHandlerFunc is executed if provided in order to handle exitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(context *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVars []string, str string) string + +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go new file mode 100644 index 000000000..c1e974a48 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -0,0 +1,368 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + "unicode/utf8" +) + +var helpCommand = &Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + _ = ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = &Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + _ = ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) error { + template := c.App.CustomAppHelpTemplate + if template == "" { + template = AppHelpTemplate + } + + if c.App.ExtraInfo == nil { + HelpPrinter(c.App.Writer, template, c.App) + return nil + } + + customAppData := func() map[string]interface{} { + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, template, c.App, customAppData()) + + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + DefaultCompleteWithFlags(nil)(c) +} + +func printCommandSuggestions(commands []*Command, writer io.Writer) { + for _, command := range commands { + if command.Hidden { + continue + } + if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range flag.Names() { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // resuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(c *Context) { + return func(c *Context) { + if len(os.Args) > 2 { + lastArg := os.Args[len(os.Args)-2] + if strings.HasPrefix(lastArg, "-") { + printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer) + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer) + } + return + } + } + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, c.App.Writer) + } else { + printCommandSuggestions(c.App.Commands, c.App.Writer) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + _ = ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + templ := c.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate + } + + HelpPrinter(ctx.App.Writer, templ, c) + + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return Exit(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + if c == nil { + return nil + } + + if c.Command != nil { + return ShowCommandHelp(c, c.Command.Name) + } + + return ShowCommandHelp(c, "") +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } + } + +} + +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + for key, value := range customFuncs { + funcMap[key] = value + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + _ = w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + HelpPrinterCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + for _, name := range VersionFlag.Names() { + if c.Bool(name) { + found = true + } + } + return found +} + +func checkHelp(c *Context) bool { + found := false + for _, name := range HelpFlag.Names() { + if c.Bool(name) { + found = true + } + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--generate-bash-completion" { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} diff --git a/vendor/github.com/urfave/cli/v2/parse.go b/vendor/github.com/urfave/cli/v2/parse.go new file mode 100644 index 000000000..7df17296a --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/parse.go @@ -0,0 +1,94 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +// Pass `shellComplete` to continue parsing options on failure during shell +// completion when, the user-supplied options may be incomplete. +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { + for { + err := set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + if shellComplete { + return nil + } + return err + } + + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -") + if errStr == trimmed { + return err + } + + // regenerate the initial args with the split short opts + argsWereSplit := false + for i, arg := range args { + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { + continue + } + + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) + if len(shortOpts) == 1 { + return err + } + + // swap current argument with the split version + args = append(args[:i], append(shortOpts, args[i+1:]...)...) + argsWereSplit = true + break + } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + + // Since custom parsing failed, replace the flag set before retrying + newSet, err := ip.newFlagSet() + if err != nil { + return err + } + *set = *newSet + } +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/v2/sort.go b/vendor/github.com/urfave/cli/v2/sort.go new file mode 100644 index 000000000..23d1c2f77 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go new file mode 100644 index 000000000..aee3e0494 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -0,0 +1,120 @@ +package cli + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var MarkdownDocTemplate = `% {{ .App.Name }} 8 + +# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.UsageText }} +# DESCRIPTION + +{{ .App.UsageText }} +{{ end }} +**Usage**: + +` + "```" + ` +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore index 9f11b755a..66f8fb502 100644 --- a/vendor/github.com/vishvananda/netlink/.gitignore +++ b/vendor/github.com/vishvananda/netlink/.gitignore @@ -1 +1,2 @@ .idea/ +.vscode/ diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml deleted file mode 100644 index 7d14af4d6..000000000 --- a/vendor/github.com/vishvananda/netlink/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" -before_script: - # make sure we keep path in tact when we sudo - - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers - # modprobe ip_gre or else the first gre device can't be deleted - - sudo modprobe ip_gre - # modprobe nf_conntrack for the conntrack testing - - sudo modprobe nf_conntrack - - sudo modprobe nf_conntrack_netlink - - sudo modprobe nf_conntrack_ipv4 - - sudo modprobe nf_conntrack_ipv6 - - sudo modprobe sch_hfsc -install: - - go get github.com/vishvananda/netns -go_import_path: github.com/vishvananda/netlink diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md index a88e2f418..0128bc67d 100644 --- a/vendor/github.com/vishvananda/netlink/README.md +++ b/vendor/github.com/vishvananda/netlink/README.md @@ -1,6 +1,6 @@ # netlink - netlink library for go # -[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) +![Build Status](https://github.com/vishvananda/netlink/actions/workflows/main.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) The netlink package provides a simple netlink library for go. Netlink is the interface a user-space program in linux uses to communicate with diff --git a/vendor/github.com/vishvananda/netlink/addr.go b/vendor/github.com/vishvananda/netlink/addr.go index f08c95696..653f540db 100644 --- a/vendor/github.com/vishvananda/netlink/addr.go +++ b/vendor/github.com/vishvananda/netlink/addr.go @@ -17,6 +17,7 @@ type Addr struct { Broadcast net.IP PreferedLft int ValidLft int + LinkIndex int } // String returns $ip/$netmask $label diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 28746d5af..218ab2379 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -11,9 +11,6 @@ import ( "golang.org/x/sys/unix" ) -// IFA_FLAGS is a u32 attribute. -const IFA_FLAGS = 0x8 - // AddrAdd will add an IP address to a link device. // // Equivalent to: `ip addr add $addr dev $link` @@ -77,17 +74,19 @@ func (h *Handle) AddrDel(link Link, addr *Addr) error { } func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error { - base := link.Attrs() - if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { - return fmt.Errorf("label must begin with interface name") - } - h.ensureIndex(base) - family := nl.GetIPFamily(addr.IP) - msg := nl.NewIfAddrmsg(family) - msg.Index = uint32(base.Index) msg.Scope = uint8(addr.Scope) + if link == nil { + msg.Index = uint32(addr.LinkIndex) + } else { + base := link.Attrs() + if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { + return fmt.Errorf("label must begin with interface name") + } + h.ensureIndex(base) + msg.Index = uint32(base.Index) + } mask := addr.Mask if addr.Peer != nil { mask = addr.Peer.Mask @@ -125,7 +124,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } else { b := make([]byte, 4) native.PutUint32(b, uint32(addr.Flags)) - flagsData := nl.NewRtAttr(IFA_FLAGS, b) + flagsData := nl.NewRtAttr(unix.IFA_FLAGS, b) req.AddData(flagsData) } } @@ -156,10 +155,10 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error // value should be "forever". To compensate for that, only add the attributes if at least one of the values is // non-zero, which means the caller has explicitly set them if addr.ValidLft > 0 || addr.PreferedLft > 0 { - cachedata := nl.IfaCacheInfo{ - IfaValid: uint32(addr.ValidLft), - IfaPrefered: uint32(addr.PreferedLft), - } + cachedata := nl.IfaCacheInfo{unix.IfaCacheinfo{ + Valid: uint32(addr.ValidLft), + Prefered: uint32(addr.PreferedLft), + }} req.AddData(nl.NewRtAttr(unix.IFA_CACHEINFO, cachedata.Serialize())) } @@ -179,7 +178,7 @@ func AddrList(link Link, family int) ([]Addr, error) { // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(family) + msg := nl.NewIfAddrmsg(family) req.AddData(msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) @@ -196,12 +195,12 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { var res []Addr for _, m := range msgs { - addr, msgFamily, ifindex, err := parseAddr(m) + addr, msgFamily, err := parseAddr(m) if err != nil { return res, err } - if link != nil && ifindex != indexFilter { + if link != nil && addr.LinkIndex != indexFilter { // Ignore messages from other interfaces continue } @@ -216,11 +215,11 @@ func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { return res, nil } -func parseAddr(m []byte) (addr Addr, family, index int, err error) { +func parseAddr(m []byte) (addr Addr, family int, err error) { msg := nl.DeserializeIfAddrmsg(m) family = -1 - index = -1 + addr.LinkIndex = -1 attrs, err1 := nl.ParseRouteAttr(m[msg.Len():]) if err1 != nil { @@ -229,7 +228,7 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { } family = int(msg.Family) - index = int(msg.Index) + addr.LinkIndex = int(msg.Index) var local, dst *net.IPNet for _, attr := range attrs { @@ -254,12 +253,12 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { addr.Broadcast = attr.Value case unix.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) - case IFA_FLAGS: + case unix.IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) - case nl.IFA_CACHEINFO: + case unix.IFA_CACHEINFO: ci := nl.DeserializeIfaCacheInfo(attr.Value) - addr.PreferedLft = int(ci.IfaPrefered) - addr.ValidLft = int(ci.IfaValid) + addr.PreferedLft = int(ci.Prefered) + addr.ValidLft = int(ci.Valid) } } @@ -271,7 +270,7 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { // But obviously, as there are IPv6 PtP addresses, too, // IFA_LOCAL should also be handled for IPv6. if local != nil { - if family == FAMILY_V4 && local.IP.Equal(dst.IP) { + if family == FAMILY_V4 && dst != nil && local.IP.Equal(dst.IP) { addr.IPNet = dst } else { addr.IPNet = local @@ -299,22 +298,24 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // AddrSubscribeOptions contains a set of options to use with // AddrSubscribeWithOptions. type AddrSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool - ReceiveBufferSize int + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -325,26 +326,33 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvBufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err } - if done != nil { - go func() { - <-done - s.Close() - }() + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } } if rcvbuf != 0 { - err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false) + err = s.SetReceiveBufferSize(rcvbuf, rcvBufForce) if err != nil { return err } } + if done != nil { + go func() { + <-done + s.Close() + }() + } if listExisting { req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) @@ -360,7 +368,8 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -375,7 +384,6 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue @@ -394,7 +402,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c continue } - addr, _, ifindex, err := parseAddr(m.Data) + addr, _, err := parseAddr(m.Data) if err != nil { if cberr != nil { cberr(fmt.Errorf("could not parse address: %v", err)) @@ -403,7 +411,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c } ch <- AddrUpdate{LinkAddress: *addr.IPNet, - LinkIndex: ifindex, + LinkIndex: addr.LinkIndex, NewAddr: msgType == unix.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, diff --git a/vendor/github.com/vishvananda/netlink/bpf_linux.go b/vendor/github.com/vishvananda/netlink/bpf_linux.go index 6631626bf..96befbfe0 100644 --- a/vendor/github.com/vishvananda/netlink/bpf_linux.go +++ b/vendor/github.com/vishvananda/netlink/bpf_linux.go @@ -16,6 +16,30 @@ const ( BPF_PROG_TYPE_SCHED_ACT BPF_PROG_TYPE_TRACEPOINT BPF_PROG_TYPE_XDP + BPF_PROG_TYPE_PERF_EVENT + BPF_PROG_TYPE_CGROUP_SKB + BPF_PROG_TYPE_CGROUP_SOCK + BPF_PROG_TYPE_LWT_IN + BPF_PROG_TYPE_LWT_OUT + BPF_PROG_TYPE_LWT_XMIT + BPF_PROG_TYPE_SOCK_OPS + BPF_PROG_TYPE_SK_SKB + BPF_PROG_TYPE_CGROUP_DEVICE + BPF_PROG_TYPE_SK_MSG + BPF_PROG_TYPE_RAW_TRACEPOINT + BPF_PROG_TYPE_CGROUP_SOCK_ADDR + BPF_PROG_TYPE_LWT_SEG6LOCAL + BPF_PROG_TYPE_LIRC_MODE2 + BPF_PROG_TYPE_SK_REUSEPORT + BPF_PROG_TYPE_FLOW_DISSECTOR + BPF_PROG_TYPE_CGROUP_SYSCTL + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE + BPF_PROG_TYPE_CGROUP_SOCKOPT + BPF_PROG_TYPE_TRACING + BPF_PROG_TYPE_STRUCT_OPS + BPF_PROG_TYPE_EXT + BPF_PROG_TYPE_LSM + BPF_PROG_TYPE_SK_LOOKUP ) type BPFAttr struct { diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6e1224c47..6c340b0ce 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -63,7 +63,19 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanAddRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,10 +87,22 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, pvid, untagged, self, master) } -func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanDelRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, pvid, untagged, self, master) +} + +func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) @@ -105,7 +129,20 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged if untagged { vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED } - br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + + if vidEnd != 0 { + vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd} + vlanEndInfo.Flags = vlanInfo.Flags + + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + + vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize()) + } else { + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + } + req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err diff --git a/vendor/github.com/vishvananda/netlink/chain.go b/vendor/github.com/vishvananda/netlink/chain.go new file mode 100644 index 000000000..1d1c144e9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain.go @@ -0,0 +1,22 @@ +package netlink + +import ( + "fmt" +) + +// Chain contains the attributes of a Chain +type Chain struct { + Parent uint32 + Chain uint32 +} + +func (c Chain) String() string { + return fmt.Sprintf("{Parent: %d, Chain: %d}", c.Parent, c.Chain) +} + +func NewChain(parent uint32, chain uint32) Chain { + return Chain{ + Parent: parent, + Chain: chain, + } +} diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go new file mode 100644 index 000000000..d9f441613 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -0,0 +1,112 @@ +package netlink + +import ( + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// ChainDel will delete a chain from the system. +func ChainDel(link Link, chain Chain) error { + // Equivalent to: `tc chain del $chain` + return pkgHandle.ChainDel(link, chain) +} + +// ChainDel will delete a chain from the system. +// Equivalent to: `tc chain del $chain` +func (h *Handle) ChainDel(link Link, chain Chain) error { + return h.chainModify(unix.RTM_DELCHAIN, 0, link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func ChainAdd(link Link, chain Chain) error { + return pkgHandle.ChainAdd(link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func (h *Handle) ChainAdd(link Link, chain Chain) error { + return h.chainModify( + unix.RTM_NEWCHAIN, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + link, + chain) +} + +func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: chain.Parent, + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(chain.Chain))) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func ChainList(link Link, parent uint32) ([]Chain, error) { + return pkgHandle.ChainList(link, parent) +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { + req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: parent, + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if err != nil { + return nil, err + } + + var res []Chain + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip chains from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + var chain Chain + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_CHAIN: + chain.Chain = native.Uint32(attr.Value) + chain.Parent = parent + } + } + res = append(res, chain) + } + + return res, nil +} diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go index dcc22d9e9..e686f6745 100644 --- a/vendor/github.com/vishvananda/netlink/class.go +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -47,6 +47,7 @@ type ClassStatistics struct { Basic *GnetStatsBasic Queue *GnetStatsQueue RateEst *GnetStatsRateEst + BasicHw *GnetStatsBasic // Hardward statistics added in kernel 4.20 } // NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0. @@ -55,6 +56,7 @@ func NewClassStatistics() *ClassStatistics { Basic: &GnetStatsBasic{}, Queue: &GnetStatsQueue{}, RateEst: &GnetStatsRateEst{}, + BasicHw: &GnetStatsBasic{}, } } @@ -132,7 +134,10 @@ func (class *GenericClass) Type() string { return class.ClassType } -// ServiceCurve is the way the HFSC curve are represented +// ServiceCurve is a nondecreasing function of some time unit, returning the amount of service +// (an allowed or allocated amount of bandwidth) at some specific point in time. The purpose of it +// should be subconsciously obvious: if a class was allowed to transfer not less than the amount +// specified by its service curve, then the service curve is not violated. type ServiceCurve struct { m1 uint32 d uint32 @@ -144,6 +149,21 @@ func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) { return c.m1, c.d, c.m2 } +// Burst returns the burst rate (m1) of the curve +func (c *ServiceCurve) Burst() uint32 { + return c.m1 +} + +// Delay return the delay (d) of the curve +func (c *ServiceCurve) Delay() uint32 { + return c.d +} + +// Rate returns the rate (m2) of the curve +func (c *ServiceCurve) Rate() uint32 { + return c.m2 +} + // HfscClass is a representation of the HFSC class type HfscClass struct { ClassAttrs @@ -152,35 +172,44 @@ type HfscClass struct { Usc ServiceCurve } -// SetUsc sets the Usc curve +// SetUsc sets the USC curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetUsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetFsc sets the Fsc curve +// SetFsc sets the Fsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetFsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetRsc sets the Rsc curve +// SetRsc sets the Rsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. func (hfsc *HfscClass) SetRsc(m1 uint32, d uint32, m2 uint32) { - hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} } -// SetSC implements the SC from the tc CLI +// SetSC implements the SC from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetSC(m1 uint32, d uint32, m2 uint32) { - hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetRsc(m1, d, m2) + hfsc.SetFsc(m1, d, m2) } -// SetUL implements the UL from the tc CLI +// SetUL implements the UL from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetUL(m1 uint32, d uint32, m2 uint32) { - hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetUsc(m1, d, m2) } -// SetLS implements the LS from the tc CLI +// SetLS implements the LS from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. func (hfsc *HfscClass) SetLS(m1 uint32, d uint32, m2 uint32) { - hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8} + hfsc.SetFsc(m1, d, m2) } // NewHfscClass returns a new HFSC struct with the set parameters @@ -193,6 +222,7 @@ func NewHfscClass(attrs ClassAttrs) *HfscClass { } } +// String() returns a string that contains the information and attributes of the HFSC class func (hfsc *HfscClass) String() string { return fmt.Sprintf( "{%s -- {RSC: {m1=%d d=%d m2=%d}} {FSC: {m1=%d d=%d m2=%d}} {USC: {m1=%d d=%d m2=%d}}}", diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index 31091e501..a82eb09de 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -43,12 +43,12 @@ func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { if buffer == 0 { buffer = uint32(float64(rate)/Hz() + float64(mtu)) } - buffer = uint32(Xmittime(rate, buffer)) + buffer = Xmittime(rate, buffer) if cbuffer == 0 { cbuffer = uint32(float64(ceil)/Hz() + float64(mtu)) } - cbuffer = uint32(Xmittime(ceil, cbuffer)) + cbuffer = Xmittime(ceil, cbuffer) return &HtbClass{ ClassAttrs: attrs, @@ -56,9 +56,9 @@ func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { Ceil: ceil, Buffer: buffer, Cbuffer: cbuffer, - Quantum: 10, Level: 0, - Prio: 0, + Prio: cattrs.Prio, + Quantum: cattrs.Quantum, } } @@ -176,12 +176,21 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { options.AddRtAttr(nl.TCA_HTB_PARMS, opt.Serialize()) options.AddRtAttr(nl.TCA_HTB_RTAB, SerializeRtab(rtab)) options.AddRtAttr(nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + if htb.Rate >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_RATE64, nl.Uint64Attr(htb.Rate)) + } + if htb.Ceil >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_CEIL64, nl.Uint64Attr(htb.Ceil)) + } case "hfsc": hfsc := class.(*HfscClass) opt := nl.HfscCopt{} - opt.Rsc.Set(hfsc.Rsc.Attrs()) - opt.Fsc.Set(hfsc.Fsc.Attrs()) - opt.Usc.Set(hfsc.Usc.Attrs()) + rm1, rd, rm2 := hfsc.Rsc.Attrs() + opt.Rsc.Set(rm1/8, rd, rm2/8) + fm1, fd, fm2 := hfsc.Fsc.Attrs() + opt.Fsc.Set(fm1/8, fd, fm2/8) + um1, ud, um2 := hfsc.Usc.Attrs() + opt.Usc.Set(um1/8, ud, um2/8) options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) @@ -303,6 +312,10 @@ func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, erro htb.Quantum = opt.Quantum htb.Level = opt.Level htb.Prio = opt.Prio + case nl.TCA_HTB_RATE64: + htb.Rate = native.Uint64(datum.Value[0:8]) + case nl.TCA_HTB_CEIL64: + htb.Ceil = native.Uint64(datum.Value[0:8]) } } return detailed, nil @@ -315,11 +328,11 @@ func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, err m1, d, m2 := nl.DeserializeHfscCurve(datum.Value).Attrs() switch datum.Attr.Type { case nl.TCA_HFSC_RSC: - hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Rsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} case nl.TCA_HFSC_FSC: - hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Fsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} case nl.TCA_HFSC_USC: - hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} + hfsc.Usc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} } } return detailed, nil @@ -328,7 +341,6 @@ func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, err func parseTcStats(data []byte) (*ClassStatistics, error) { buf := &bytes.Buffer{} buf.Write(data) - native := nl.NativeEndian() tcStats := &tcStats{} if err := binary.Read(buf, native, tcStats); err != nil { return nil, err @@ -350,7 +362,6 @@ func parseTcStats(data []byte) (*ClassStatistics, error) { func parseGnetStats(data []byte, gnetStats interface{}) error { buf := &bytes.Buffer{} buf.Write(data) - native := nl.NativeEndian() return binary.Read(buf, native, gnetStats) } @@ -377,6 +388,11 @@ func parseTcStats2(data []byte) (*ClassStatistics, error) { return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s", err, hex.Dump(datum.Value)) } + case nl.TCA_STATS_BASIC_HW: + if err := parseGnetStats(datum.Value, stats.BasicHw); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.BasicHw with: %v\n%s", + err, hex.Dump(datum.Value)) + } } } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index 4bff0dcba..ba022453b 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "net" + "time" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -54,10 +55,30 @@ func ConntrackTableFlush(table ConntrackTableType) error { return pkgHandle.ConntrackTableFlush(table) } +// ConntrackCreate creates a new conntrack flow in the desired table +// conntrack -I [table] Create a conntrack or expectation +func ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackCreate(table, family, flow) +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackUpdate(table, family, flow) +} + // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { - return pkgHandle.ConntrackDeleteFilter(table, family, filter) + return pkgHandle.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return pkgHandle.ConntrackDeleteFilters(table, family, filters...) } // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed @@ -86,9 +107,51 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { return err } +// ConntrackCreate creates a new conntrack flow in the desired table using the handle +// conntrack -I [table] Create a conntrack or expectation +func (h *Handle) ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_CREATE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func (h *Handle) ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_REPLACE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + return h.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { res, err := h.dumpConntrackTable(table, family) if err != nil { return 0, err @@ -97,12 +160,16 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami var matched uint for _, dataRaw := range res { flow := parseRawData(dataRaw) - if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) - // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already - req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) - matched++ + for _, filter := range filters { + if match := filter.MatchConntrackFlow(flow); match { + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already + req2.AddRawData(dataRaw[4:]) + req2.Execute(unix.NETLINK_NETFILTER, 0) + matched++ + // flow is already deleted, no need to match on other filters and continue to the next flow. + break + } } } @@ -127,10 +194,44 @@ func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) return req.Execute(unix.NETLINK_NETFILTER, 0) } +// ProtoInfo wraps an L4-protocol structure - roughly corresponds to the +// __nfct_protoinfo union found in libnetfilter_conntrack/include/internal/object.h. +// Currently, only protocol names, and TCP state is supported. +type ProtoInfo interface { + Protocol() string +} + +// ProtoInfoTCP corresponds to the `tcp` struct of the __nfct_protoinfo union. +// Only TCP state is currently supported. +type ProtoInfoTCP struct { + State uint8 +} +// Protocol returns "tcp". +func (*ProtoInfoTCP) Protocol() string {return "tcp"} +func (p *ProtoInfoTCP) toNlData() ([]*nl.RtAttr, error) { + ctProtoInfo := nl.NewRtAttr(unix.NLA_F_NESTED | nl.CTA_PROTOINFO, []byte{}) + ctProtoInfoTCP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_PROTOINFO_TCP, []byte{}) + ctProtoInfoTCPState := nl.NewRtAttr(nl.CTA_PROTOINFO_TCP_STATE, nl.Uint8Attr(p.State)) + ctProtoInfoTCP.AddChild(ctProtoInfoTCPState) + ctProtoInfo.AddChild(ctProtoInfoTCP) + + return []*nl.RtAttr{ctProtoInfo}, nil +} + +// ProtoInfoSCTP only supports the protocol name. +type ProtoInfoSCTP struct {} +// Protocol returns "sctp". +func (*ProtoInfoSCTP) Protocol() string {return "sctp"} + +// ProtoInfoDCCP only supports the protocol name. +type ProtoInfoDCCP struct {} +// Protocol returns "dccp". +func (*ProtoInfoDCCP) Protocol() string {return "dccp"} + // The full conntrack flow structure is very complicated and can be found in the file: // http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h // For the time being, the structure below allows to parse and extract the base information of a flow -type ipTuple struct { +type IPTuple struct { Bytes uint64 DstIP net.IP DstPort uint16 @@ -140,21 +241,150 @@ type ipTuple struct { SrcPort uint16 } +// toNlData generates the inner fields of a nested tuple netlink datastructure +// does not generate the "nested"-flagged outer message. +func (t *IPTuple) toNlData(family uint8) ([]*nl.RtAttr, error) { + + var srcIPsFlag, dstIPsFlag int + if family == nl.FAMILY_V4 { + srcIPsFlag = nl.CTA_IP_V4_SRC + dstIPsFlag = nl.CTA_IP_V4_DST + } else if family == nl.FAMILY_V6 { + srcIPsFlag = nl.CTA_IP_V6_SRC + dstIPsFlag = nl.CTA_IP_V6_DST + } else { + return []*nl.RtAttr{}, fmt.Errorf("couldn't generate netlink message for tuple due to unrecognized FamilyType '%d'", family) + } + + ctTupleIP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_IP, nil) + ctTupleIPSrc := nl.NewRtAttr(srcIPsFlag, t.SrcIP) + ctTupleIP.AddChild(ctTupleIPSrc) + ctTupleIPDst := nl.NewRtAttr(dstIPsFlag, t.DstIP) + ctTupleIP.AddChild(ctTupleIPDst) + + ctTupleProto := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_PROTO, nil) + ctTupleProtoNum := nl.NewRtAttr(nl.CTA_PROTO_NUM, []byte{t.Protocol}) + ctTupleProto.AddChild(ctTupleProtoNum) + ctTupleProtoSrcPort := nl.NewRtAttr(nl.CTA_PROTO_SRC_PORT, nl.BEUint16Attr(t.SrcPort)) + ctTupleProto.AddChild(ctTupleProtoSrcPort) + ctTupleProtoDstPort := nl.NewRtAttr(nl.CTA_PROTO_DST_PORT, nl.BEUint16Attr(t.DstPort)) + ctTupleProto.AddChild(ctTupleProtoDstPort, ) + + return []*nl.RtAttr{ctTupleIP, ctTupleProto}, nil +} + type ConntrackFlow struct { FamilyType uint8 - Forward ipTuple - Reverse ipTuple + Forward IPTuple + Reverse IPTuple Mark uint32 + Zone uint16 + TimeStart uint64 + TimeStop uint64 + TimeOut uint32 + Labels []byte + ProtoInfo ProtoInfo } func (s *ConntrackFlow) String() string { // conntrack cmd output: - // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 - return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=%d", + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 labels=0x00000000050012ac4202010000000000 zone=100 + // start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec) + start := time.Unix(0, int64(s.TimeStart)) + stop := time.Unix(0, int64(s.TimeStop)) + timeout := int32(s.TimeOut) + res := fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x ", nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, s.Mark) + if len(s.Labels) > 0 { + res += fmt.Sprintf("labels=0x%x ", s.Labels) + } + if s.Zone != 0 { + res += fmt.Sprintf("zone=%d ", s.Zone) + } + res += fmt.Sprintf("start=%v stop=%v timeout=%d(sec)", start, stop, timeout) + return res +} + +// toNlData generates netlink messages representing the flow. +func (s *ConntrackFlow) toNlData() ([]*nl.RtAttr, error) { + var payload []*nl.RtAttr + // The message structure is built as follows: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + + // CTA_TUPLE_ORIG + ctTupleOrig := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_ORIG, nil) + forwardFlowAttrs, err := s.Forward.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack forward flow: %w", err) + } + for _, a := range forwardFlowAttrs { + ctTupleOrig.AddChild(a) + } + + // CTA_TUPLE_REPLY + ctTupleReply := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_REPLY, nil) + reverseFlowAttrs, err := s.Reverse.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack reverse flow: %w", err) + } + for _, a := range reverseFlowAttrs { + ctTupleReply.AddChild(a) + } + + ctMark := nl.NewRtAttr(nl.CTA_MARK, nl.BEUint32Attr(s.Mark)) + ctTimeout := nl.NewRtAttr(nl.CTA_TIMEOUT, nl.BEUint32Attr(s.TimeOut)) + + payload = append(payload, ctTupleOrig, ctTupleReply, ctMark, ctTimeout) + + if s.ProtoInfo != nil { + switch p := s.ProtoInfo.(type) { + case *ProtoInfoTCP: + attrs, err := p.toNlData() + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack flow's TCP protoinfo: %w", err) + } + payload = append(payload, attrs...) + default: + return nil, errors.New("couldn't generate netlink data for conntrack: field 'ProtoInfo' only supports TCP or nil") + } + } + + return payload, nil } // This method parse the ip tuple structure @@ -164,7 +394,7 @@ func (s *ConntrackFlow) String() string { // // // -func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { +func parseIpTuple(reader *bytes.Reader, tpl *IPTuple) uint8 { for i := 0; i < 2; i++ { _, t, _, v := parseNfAttrTLV(reader) switch t { @@ -174,25 +404,43 @@ func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { tpl.DstIP = v } } - // Skip the next 4 bytes nl.NLA_F_NESTED|nl.CTA_TUPLE_PROTO - reader.Seek(4, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) + // Get total length of nested protocol-specific info. + _, _, protoInfoTotalLen := parseNfAttrTL(reader) + _, t, l, v := parseNfAttrTLV(reader) + // Track the number of bytes read. + protoInfoBytesRead := uint16(nl.SizeofNfattr) + l if t == nl.CTA_PROTO_NUM { tpl.Protocol = uint8(v[0]) } - // Skip some padding 3 bytes + // We only parse TCP & UDP headers. Skip the others. + if tpl.Protocol != unix.IPPROTO_TCP && tpl.Protocol != unix.IPPROTO_UDP { + // skip the rest + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + return tpl.Protocol + } + // Skip 3 bytes of padding reader.Seek(3, seekCurrent) + protoInfoBytesRead += 3 for i := 0; i < 2; i++ { _, t, _ := parseNfAttrTL(reader) + protoInfoBytesRead += uint16(nl.SizeofNfattr) switch t { case nl.CTA_PROTO_SRC_PORT: parseBERaw16(reader, &tpl.SrcPort) + protoInfoBytesRead += 2 case nl.CTA_PROTO_DST_PORT: parseBERaw16(reader, &tpl.DstPort) + protoInfoBytesRead += 2 } - // Skip some padding 2 byte + // Skip 2 bytes of padding reader.Seek(2, seekCurrent) + protoInfoBytesRead += 2 } + // Skip any remaining/unknown parts of the message + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + return tpl.Protocol } @@ -211,10 +459,18 @@ func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) { binary.Read(r, nl.NativeEndian(), &attrType) isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED attrType = attrType & (nl.NLA_F_NESTED - 1) - return isNested, attrType, len } +// skipNfAttrValue seeks `r` past attr of length `len`. +// Maintains buffer alignment. +// Returns length of the seek performed. +func skipNfAttrValue(r *bytes.Reader, len uint16) uint16 { + len = (len + nl.NLA_ALIGNTO - 1) & ^(nl.NLA_ALIGNTO - 1) + r.Seek(int64(len), seekCurrent) + return len +} + func parseBERaw16(r *bytes.Reader, v *uint16) { binary.Read(r, binary.BigEndian, v) } @@ -227,6 +483,10 @@ func parseBERaw64(r *bytes.Reader, v *uint64) { binary.Read(r, binary.BigEndian, v) } +func parseRaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, nl.NativeEndian(), v) +} + func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { for i := 0; i < 2; i++ { switch _, t, _ := parseNfAttrTL(r); t { @@ -241,11 +501,107 @@ func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { return } +// when the flow is alive, only the timestamp_start is returned in structure +func parseTimeStamp(r *bytes.Reader, readSize uint16) (tstart, tstop uint64) { + var numTimeStamps int + oneItem := nl.SizeofNfattr + 8 // 4 bytes attr header + 8 bytes timestamp + if readSize == uint16(oneItem) { + numTimeStamps = 1 + } else if readSize == 2*uint16(oneItem) { + numTimeStamps = 2 + } else { + return + } + for i := 0; i < numTimeStamps; i++ { + switch _, t, _ := parseNfAttrTL(r); t { + case nl.CTA_TIMESTAMP_START: + parseBERaw64(r, &tstart) + case nl.CTA_TIMESTAMP_STOP: + parseBERaw64(r, &tstop) + default: + return + } + } + return + +} + +func parseProtoInfoTCPState(r *bytes.Reader) (s uint8) { + binary.Read(r, binary.BigEndian, &s) + r.Seek(nl.SizeofNfattr - 1, seekCurrent) + return s +} + +// parseProtoInfoTCP reads the entire nested protoinfo structure, but only parses the state attr. +func parseProtoInfoTCP(r *bytes.Reader, attrLen uint16) (*ProtoInfoTCP) { + p := new(ProtoInfoTCP) + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP_STATE: + p.State = parseProtoInfoTCPState(r) + bytesRead += nl.SizeofNfattr + default: + bytesRead += int(skipNfAttrValue(r, l)) + } + } + + return p +} + +func parseProtoInfo(r *bytes.Reader, attrLen uint16) (p ProtoInfo) { + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP: + p = parseProtoInfoTCP(r, l) + bytesRead += int(l) + // No inner fields of DCCP / SCTP currently supported. + case nl.CTA_PROTOINFO_DCCP: + p = new(ProtoInfoDCCP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + case nl.CTA_PROTOINFO_SCTP: + p = new(ProtoInfoSCTP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + default: + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + } + } + + return p +} + +func parseTimeOut(r *bytes.Reader) (ttimeout uint32) { + parseBERaw32(r, &ttimeout) + return +} + func parseConnectionMark(r *bytes.Reader) (mark uint32) { parseBERaw32(r, &mark) return } +func parseConnectionLabels(r *bytes.Reader) (label []byte) { + label = make([]byte, 16) // netfilter defines 128 bit labels value + binary.Read(r, nl.NativeEndian(), &label) + return +} + +func parseConnectionZone(r *bytes.Reader) (zone uint16) { + parseBERaw16(r, &zone) + r.Seek(2, seekCurrent) + return +} + func parseRawData(data []byte) *ConntrackFlow { s := &ConntrackFlow{} // First there is the Nfgenmsg header @@ -266,25 +622,41 @@ func parseRawData(data []byte) *ConntrackFlow { if nested, t, l := parseNfAttrTL(reader); nested { switch t { case nl.CTA_TUPLE_ORIG: - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { parseIpTuple(reader, &s.Forward) } case nl.CTA_TUPLE_REPLY: - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { parseIpTuple(reader, &s.Reverse) } else { // Header not recognized skip it - reader.Seek(int64(l), seekCurrent) + skipNfAttrValue(reader, l) } case nl.CTA_COUNTERS_ORIG: s.Forward.Bytes, s.Forward.Packets = parseByteAndPacketCounters(reader) case nl.CTA_COUNTERS_REPLY: s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader) + case nl.CTA_TIMESTAMP: + s.TimeStart, s.TimeStop = parseTimeStamp(reader, l) + case nl.CTA_PROTOINFO: + s.ProtoInfo = parseProtoInfo(reader, l) + default: + skipNfAttrValue(reader, l) } } else { switch t { case nl.CTA_MARK: s.Mark = parseConnectionMark(reader) + case nl.CTA_LABELS: + s.Labels = parseConnectionLabels(reader) + case nl.CTA_TIMEOUT: + s.TimeOut = parseTimeOut(reader) + case nl.CTA_ID, nl.CTA_STATUS, nl.CTA_USE: + skipNfAttrValue(reader, l) + case nl.CTA_ZONE: + s.Zone = parseConnectionZone(reader) + default: + skipNfAttrValue(reader, l) } } } @@ -318,18 +690,27 @@ func parseRawData(data []byte) *ConntrackFlow { // --mask-src ip Source mask address // --mask-dst ip Destination mask address +// Layer 4 Protocol common parameters and options: +// TCP, UDP, SCTP, UDPLite and DCCP +// --sport, --orig-port-src port Source port in original direction +// --dport, --orig-port-dst port Destination port in original direction + // Filter types type ConntrackFilterType uint8 const ( - ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction - ConntrackOrigDstIP // -orig-dst ip Destination address from original direction - ConntrackReplySrcIP // --reply-src ip Reply Source IP - ConntrackReplyDstIP // --reply-dst ip Reply Destination IP - ConntrackReplyAnyIP // Match source or destination reply IP - ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP - ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP - ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instaed ConntrackReplyAnyIP + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackReplySrcIP // --reply-src ip Reply Source IP + ConntrackReplyDstIP // --reply-dst ip Reply Destination IP + ConntrackReplyAnyIP // Match source or destination reply IP + ConntrackOrigSrcPort // --orig-port-src port Source port in original direction + ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction + ConntrackMatchLabels // --label label1,label2 Labels used in entry + ConntrackUnmatchLabels // --label label1,label2 Labels not used in entry + ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP + ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP + ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP ) type CustomConntrackFilter interface { @@ -339,53 +720,180 @@ type CustomConntrackFilter interface { } type ConntrackFilter struct { - ipFilter map[ConntrackFilterType]net.IP + ipNetFilter map[ConntrackFilterType]*net.IPNet + portFilter map[ConntrackFilterType]uint16 + protoFilter uint8 + labelFilter map[ConntrackFilterType][][]byte + zoneFilter *uint16 +} + +// AddIPNet adds a IP subnet to the conntrack filter +func (f *ConntrackFilter) AddIPNet(tp ConntrackFilterType, ipNet *net.IPNet) error { + if ipNet == nil { + return fmt.Errorf("Filter attribute empty") + } + if f.ipNetFilter == nil { + f.ipNetFilter = make(map[ConntrackFilterType]*net.IPNet) + } + if _, ok := f.ipNetFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.ipNetFilter[tp] = ipNet + return nil } // AddIP adds an IP to the conntrack filter func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { - if f.ipFilter == nil { - f.ipFilter = make(map[ConntrackFilterType]net.IP) + if ip == nil { + return fmt.Errorf("Filter attribute empty") + } + return f.AddIPNet(tp, NewIPNet(ip)) +} + +// AddPort adds a Port to the conntrack filter if the Layer 4 protocol allows it +func (f *ConntrackFilter) AddPort(tp ConntrackFilterType, port uint16) error { + switch f.protoFilter { + // TCP, UDP, DCCP, SCTP, UDPLite + case 6, 17, 33, 132, 136: + default: + return fmt.Errorf("Filter attribute not available without a valid Layer 4 protocol: %d", f.protoFilter) } - if _, ok := f.ipFilter[tp]; ok { + + if f.portFilter == nil { + f.portFilter = make(map[ConntrackFilterType]uint16) + } + if _, ok := f.portFilter[tp]; ok { return errors.New("Filter attribute already present") } - f.ipFilter[tp] = ip + f.portFilter[tp] = port + return nil +} + +// AddProtocol adds the Layer 4 protocol to the conntrack filter +func (f *ConntrackFilter) AddProtocol(proto uint8) error { + if f.protoFilter != 0 { + return errors.New("Filter attribute already present") + } + f.protoFilter = proto + return nil +} + +// AddLabels adds the provided list (zero or more) of labels to the conntrack filter +// ConntrackFilterType here can be either: +// 1. ConntrackMatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` contains ALL the provided labels +// it is considered a match. This can be used when you want to match flows that contain +// one or more labels. +// 2. ConntrackUnmatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` does NOT contain ALL the provided labels +// it is considered a match. This can be used when you want to match flows that don't contain +// one or more labels. +func (f *ConntrackFilter) AddLabels(tp ConntrackFilterType, labels [][]byte) error { + if len(labels) == 0 { + return errors.New("Invalid length for provided labels") + } + if f.labelFilter == nil { + f.labelFilter = make(map[ConntrackFilterType][][]byte) + } + if _, ok := f.labelFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.labelFilter[tp] = labels + return nil +} + +// AddZone adds a zone to the conntrack filter +func (f *ConntrackFilter) AddZone(zone uint16) error { + if f.zoneFilter != nil { + return errors.New("Filter attribute already present") + } + f.zoneFilter = &zone return nil } // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter // false otherwise func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { - if len(f.ipFilter) == 0 { + if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 && len(f.labelFilter) == 0 && f.zoneFilter == nil { // empty filter always not match return false } - match := true - // -orig-src ip Source address from original direction - if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found { - match = match && elem.Equal(flow.Forward.SrcIP) + // -p, --protonum proto Layer 4 Protocol, eg. 'tcp' + if f.protoFilter != 0 && flow.Forward.Protocol != f.protoFilter { + // different Layer 4 protocol always not match + return false } - // -orig-dst ip Destination address from original direction - if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found { - match = match && elem.Equal(flow.Forward.DstIP) + // Conntrack zone filter + if f.zoneFilter != nil && *f.zoneFilter != flow.Zone { + return false } - // -src-nat ip Source NAT ip - if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found { - match = match && elem.Equal(flow.Reverse.SrcIP) + match := true + + // IP conntrack filter + if len(f.ipNetFilter) > 0 { + // -orig-src ip Source address from original direction + if elem, found := f.ipNetFilter[ConntrackOrigSrcIP]; found { + match = match && elem.Contains(flow.Forward.SrcIP) + } + + // -orig-dst ip Destination address from original direction + if elem, found := f.ipNetFilter[ConntrackOrigDstIP]; match && found { + match = match && elem.Contains(flow.Forward.DstIP) + } + + // -src-nat ip Source NAT ip + if elem, found := f.ipNetFilter[ConntrackReplySrcIP]; match && found { + match = match && elem.Contains(flow.Reverse.SrcIP) + } + + // -dst-nat ip Destination NAT ip + if elem, found := f.ipNetFilter[ConntrackReplyDstIP]; match && found { + match = match && elem.Contains(flow.Reverse.DstIP) + } + + // Match source or destination reply IP + if elem, found := f.ipNetFilter[ConntrackReplyAnyIP]; match && found { + match = match && (elem.Contains(flow.Reverse.SrcIP) || elem.Contains(flow.Reverse.DstIP)) + } } - // -dst-nat ip Destination NAT ip - if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found { - match = match && elem.Equal(flow.Reverse.DstIP) + // Layer 4 Port filter + if len(f.portFilter) > 0 { + // -orig-port-src port Source port from original direction + if elem, found := f.portFilter[ConntrackOrigSrcPort]; match && found { + match = match && elem == flow.Forward.SrcPort + } + + // -orig-port-dst port Destination port from original direction + if elem, found := f.portFilter[ConntrackOrigDstPort]; match && found { + match = match && elem == flow.Forward.DstPort + } } - // Match source or destination reply IP - if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found { - match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) + // Label filter + if len(f.labelFilter) > 0 { + if len(flow.Labels) > 0 { + // --label label1,label2 in conn entry; + // every label passed should be contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackMatchLabels]; match && found { + for _, label := range elem { + match = match && (bytes.Contains(flow.Labels, label)) + } + } + // --label label1,label2 in conn entry; + // every label passed should be not contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackUnmatchLabels]; match && found { + for _, label := range elem { + match = match && !(bytes.Contains(flow.Labels, label)) + } + } + } else { + // flow doesn't contain labels, so it doesn't contain or notContain any provided matches + match = false + } } return match diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go index af7af799e..0bfdf422d 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -11,6 +11,9 @@ type InetFamily uint8 // ConntrackFlow placeholder type ConntrackFlow struct{} +// CustomConntrackFilter placeholder +type CustomConntrackFilter struct{} + // ConntrackFilter placeholder type ConntrackFilter struct{} @@ -29,10 +32,18 @@ func ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + // ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed // conntrack -L [table] [options] List conntrack or expectation table func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { @@ -48,6 +59,14 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed // conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { return 0, ErrNotImplemented } + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go index 29b3f8ec1..d98801dbb 100644 --- a/vendor/github.com/vishvananda/netlink/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -1,9 +1,11 @@ package netlink import ( + "fmt" + "net" + "strings" "syscall" - "fmt" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) @@ -27,6 +29,325 @@ type DevlinkDevice struct { Attrs DevlinkDevAttrs } +// DevlinkPortFn represents port function and its attributes +type DevlinkPortFn struct { + HwAddr net.HardwareAddr + State uint8 + OpState uint8 +} + +// DevlinkPortFnSetAttrs represents attributes to set +type DevlinkPortFnSetAttrs struct { + FnAttrs DevlinkPortFn + HwAddrValid bool + StateValid bool +} + +// DevlinkPort represents port and its attributes +type DevlinkPort struct { + BusName string + DeviceName string + PortIndex uint32 + PortType uint16 + NetdeviceName string + NetdevIfIndex uint32 + RdmaDeviceName string + PortFlavour uint16 + Fn *DevlinkPortFn +} + +type DevLinkPortAddAttrs struct { + Controller uint32 + SfNumber uint32 + PortIndex uint32 + PfNumber uint16 + SfNumberValid bool + PortIndexValid bool + ControllerValid bool +} + +// DevlinkDeviceInfo represents devlink info +type DevlinkDeviceInfo struct { + Driver string + SerialNumber string + BoardID string + FwApp string + FwAppBoundleID string + FwAppName string + FwBoundleID string + FwMgmt string + FwMgmtAPI string + FwMgmtBuild string + FwNetlist string + FwNetlistBuild string + FwPsidAPI string + FwUndi string +} + +// DevlinkResource represents a device resource +type DevlinkResource struct { + Name string + ID uint64 + Size uint64 + SizeNew uint64 + SizeMin uint64 + SizeMax uint64 + SizeGranularity uint64 + PendingChange bool + Unit uint8 + SizeValid bool + OCCValid bool + OCCSize uint64 + Parent *DevlinkResource + Children []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResource, returns error if occured +func (dlr *DevlinkResource) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // mandatory attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_ID] + if !ok { + return fmt.Errorf("missing resource id") + } + dlr.ID = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_NAME] + if !ok { + return fmt.Errorf("missing resource name") + } + dlr.Name = nl.BytesToString(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE] + if !ok { + return fmt.Errorf("missing resource size") + } + dlr.Size = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_GRAN] + if !ok { + return fmt.Errorf("missing resource size granularity") + } + dlr.SizeGranularity = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_UNIT] + if !ok { + return fmt.Errorf("missing resource unit") + } + dlr.Unit = uint8(attr.Value[0]) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MIN] + if !ok { + return fmt.Errorf("missing resource size min") + } + dlr.SizeMin = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MAX] + if !ok { + return fmt.Errorf("missing resource size max") + } + dlr.SizeMax = native.Uint64(attr.Value) + + // optional attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_OCC] + if ok { + dlr.OCCSize = native.Uint64(attr.Value) + dlr.OCCValid = true + } + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_VALID] + if ok { + dlr.SizeValid = uint8(attr.Value[0]) != 0 + } + + dlr.SizeNew = dlr.Size + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_NEW] + if ok { + dlr.SizeNew = native.Uint64(attr.Value) + } + + dlr.PendingChange = dlr.Size != dlr.SizeNew + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if ok { + // handle nested resoruces recursively + subResources, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, subresource := range subResources { + resource := DevlinkResource{Parent: dlr} + attrs, err := nl.ParseRouteAttrAsMap(subresource.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse child resource, parent:%s. %w", dlr.Name, err) + } + dlr.Children = append(dlr.Children, resource) + } + } + return nil +} + +// DevlinkResources represents all devlink resources of a devlink device +type DevlinkResources struct { + Bus string + Device string + Resources []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResources, returns error if occured +func (dlrs *DevlinkResources) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // Bus + attr, ok = attrs[nl.DEVLINK_ATTR_BUS_NAME] + if !ok { + return fmt.Errorf("missing bus name") + } + dlrs.Bus = nl.BytesToString(attr.Value) + + // Device + attr, ok = attrs[nl.DEVLINK_ATTR_DEV_NAME] + if !ok { + return fmt.Errorf("missing device name") + } + dlrs.Device = nl.BytesToString(attr.Value) + + // Resource List + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if !ok { + return fmt.Errorf("missing resource list") + } + + resourceAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, resourceAttr := range resourceAttrs { + resource := DevlinkResource{} + attrs, err := nl.ParseRouteAttrAsMap(resourceAttr.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse root resoruces, %w", err) + } + dlrs.Resources = append(dlrs.Resources, resource) + } + + return nil +} + +// DevlinkParam represents parameter of the device +type DevlinkParam struct { + Name string + IsGeneric bool + Type uint8 // possible values are in nl.DEVLINK_PARAM_TYPE_* constants + Values []DevlinkParamValue +} + +// DevlinkParamValue contains values of the parameter +// Data field contains specific type which can be casted by unsing info from the DevlinkParam.Type field +type DevlinkParamValue struct { + rawData []byte + Data interface{} + CMODE uint8 // possible values are in nl.DEVLINK_PARAM_CMODE_* constants +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkParam, returns error if occured +func (dlp *DevlinkParam) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + var valuesList [][]syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM: + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_NAME: + dlp.Name = nl.BytesToString(nattr.Value) + case nl.DEVLINK_ATTR_PARAM_GENERIC: + dlp.IsGeneric = true + case nl.DEVLINK_ATTR_PARAM_TYPE: + if len(nattr.Value) == 1 { + dlp.Type = nattr.Value[0] + } + case nl.DEVLINK_ATTR_PARAM_VALUES_LIST: + nnattrs, err := nl.ParseRouteAttr(nattr.Value) + if err != nil { + return err + } + valuesList = append(valuesList, nnattrs) + } + } + } + } + for _, valAttr := range valuesList { + v := DevlinkParamValue{} + if err := v.parseAttributes(valAttr, dlp.Type); err != nil { + return err + } + dlp.Values = append(dlp.Values, v) + } + return nil +} + +func (dlpv *DevlinkParamValue) parseAttributes(attrs []syscall.NetlinkRouteAttr, paramType uint8) error { + for _, attr := range attrs { + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + var rawData []byte + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_VALUE_DATA: + rawData = nattr.Value + case nl.DEVLINK_ATTR_PARAM_VALUE_CMODE: + if len(nattr.Value) == 1 { + dlpv.CMODE = nattr.Value[0] + } + } + } + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + dlpv.Data = uint8(0) + if rawData != nil && len(rawData) == 1 { + dlpv.Data = uint8(rawData[0]) + } + case nl.DEVLINK_PARAM_TYPE_U16: + dlpv.Data = uint16(0) + if rawData != nil { + dlpv.Data = native.Uint16(rawData) + } + case nl.DEVLINK_PARAM_TYPE_U32: + dlpv.Data = uint32(0) + if rawData != nil { + dlpv.Data = native.Uint32(rawData) + } + case nl.DEVLINK_PARAM_TYPE_STRING: + dlpv.Data = "" + if rawData != nil { + dlpv.Data = nl.BytesToString(rawData) + } + case nl.DEVLINK_PARAM_TYPE_BOOL: + dlpv.Data = rawData != nil + } + } + return nil +} + func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { devices := make([]*DevlinkDevice, 0, len(msgs)) for _, m := range msgs { @@ -95,9 +416,9 @@ func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error for _, a := range attrs { switch a.Attr.Type { case nl.DEVLINK_ATTR_BUS_NAME: - d.BusName = string(a.Value) + d.BusName = string(a.Value[:len(a.Value)-1]) case nl.DEVLINK_ATTR_DEV_NAME: - d.DeviceName = string(a.Value) + d.DeviceName = string(a.Value[:len(a.Value)-1]) case nl.DEVLINK_ATTR_ESWITCH_MODE: d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value)) case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE: @@ -126,12 +447,12 @@ func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK) req.AddData(msg) - b := make([]byte, len(dev.BusName)) + b := make([]byte, len(dev.BusName)+1) copy(b, dev.BusName) data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) req.AddData(data) - b = make([]byte, len(dev.DeviceName)) + b = make([]byte, len(dev.DeviceName)+1) copy(b, dev.DeviceName) data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) req.AddData(data) @@ -270,3 +591,569 @@ func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode) } + +func (port *DevlinkPort) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + port.BusName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_DEV_NAME: + port.DeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_INDEX: + port.PortIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_TYPE: + port.PortType = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_NETDEV_NAME: + port.NetdeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_NETDEV_IFINDEX: + port.NetdevIfIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_IBDEV_NAME: + port.RdmaDeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_FLAVOUR: + port.PortFlavour = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_FUNCTION: + port.Fn = &DevlinkPortFn{} + for nested := range nl.ParseAttributes(a.Value) { + switch nested.Type { + case nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR: + port.Fn.HwAddr = nested.Value[:] + case nl.DEVLINK_PORT_FN_ATTR_STATE: + port.Fn.State = uint8(nested.Value[0]) + case nl.DEVLINK_PORT_FN_ATTR_OPSTATE: + port.Fn.OpState = uint8(nested.Value[0]) + } + } + } + } + return nil +} + +func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) { + ports := make([]*DevlinkPort, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + ports = append(ports, port) + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_PORT_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + ports, err := parseDevLinkAllPortList(msgs) + if err != nil { + return nil, err + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func DevLinkGetAllPortList() ([]*DevlinkPort, error) { + return pkgHandle.DevLinkGetAllPortList() +} + +func parseDevlinkPortMsg(msgs [][]byte) (*DevlinkPort, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + return port, nil +} + +// DevLinkGetPortByIndexprovides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_GET, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevlinkGetDeviceResources returns devlink device resources +func DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + return pkgHandle.DevlinkGetDeviceResources(bus, device) +} + +// DevlinkGetDeviceResources returns devlink device resources +func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_RESOURCE_DUMP, bus, device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + var resources DevlinkResources + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttrAsMap(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + resources.parseAttributes(attrs) + } + + return &resources, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.Flags |= unix.NLM_F_DUMP + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + var params []*DevlinkParam + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + params = append(params, p) + } + + return params, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParams(bus, device) +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func (h *Handle) DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + if len(respmsg) == 0 { + return nil, fmt.Errorf("unexpected response") + } + attrs, err := nl.ParseRouteAttr(respmsg[0][nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + return p, nil +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParamByName(bus, device, param) +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func (h *Handle) DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + // retrive the param type + p, err := h.DevlinkGetDeviceParamByName(bus, device, param) + if err != nil { + return fmt.Errorf("failed to get device param: %v", err) + } + paramType := p.Type + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_SET, bus, device) + if err != nil { + return err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_TYPE, nl.Uint8Attr(paramType))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_CMODE, nl.Uint8Attr(cmode))) + + var valueAsBytes []byte + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + v, ok := value.(uint8) + if !ok { + return fmt.Errorf("unepected value type required: uint8, actual: %T", value) + } + valueAsBytes = nl.Uint8Attr(v) + case nl.DEVLINK_PARAM_TYPE_U16: + v, ok := value.(uint16) + if !ok { + return fmt.Errorf("unepected value type required: uint16, actual: %T", value) + } + valueAsBytes = nl.Uint16Attr(v) + case nl.DEVLINK_PARAM_TYPE_U32: + v, ok := value.(uint32) + if !ok { + return fmt.Errorf("unepected value type required: uint32, actual: %T", value) + } + valueAsBytes = nl.Uint32Attr(v) + case nl.DEVLINK_PARAM_TYPE_STRING: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unepected value type required: string, actual: %T", value) + } + valueAsBytes = nl.ZeroTerminated(v) + case nl.DEVLINK_PARAM_TYPE_BOOL: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unepected value type required: bool, actual: %T", value) + } + if v { + valueAsBytes = []byte{} + } + default: + return fmt.Errorf("unsupported parameter type: %d", paramType) + } + if valueAsBytes != nil { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_DATA, valueAsBytes)) + } + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + return pkgHandle.DevlinkSetDeviceParam(bus, device, param, cmode, value) +} + +// DevLinkGetPortByIndex provides a pointer to devlink portand nil error, +// otherwise returns an error code. +func DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + return pkgHandle.DevLinkGetPortByIndex(Bus, Device, PortIndex) +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func (h *Handle) DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_NEW, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FLAVOUR, nl.Uint16Attr(Flavour))) + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_PF_NUMBER, nl.Uint16Attr(Attrs.PfNumber))) + if Flavour == nl.DEVLINK_PORT_FLAVOUR_PCI_SF && Attrs.SfNumberValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_SF_NUMBER, nl.Uint32Attr(Attrs.SfNumber))) + } + if Attrs.PortIndexValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(Attrs.PortIndex))) + } + if Attrs.ControllerValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, nl.Uint32Attr(Attrs.Controller))) + } + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + return pkgHandle.DevLinkPortAdd(Bus, Device, Flavour, Attrs) +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func (h *Handle) DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_DEL, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + return pkgHandle.DevLinkPortDel(Bus, Device, PortIndex) +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func (h *Handle) DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_SET, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + fnAttr := nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FUNCTION|unix.NLA_F_NESTED, nil) + + if FnAttrs.HwAddrValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, []byte(FnAttrs.FnAttrs.HwAddr)) + } + + if FnAttrs.StateValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FN_ATTR_STATE, nl.Uint8Attr(FnAttrs.FnAttrs.State)) + } + req.AddData(fnAttr) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + return pkgHandle.DevlinkPortFnSet(Bus, Device, PortIndex, FnAttrs) +} + +// devlinkInfoGetter is function that is responsible for getting devlink info message +// this is introduced for test purpose +type devlinkInfoGetter func(bus, device string) ([]byte, error) + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByName(Bus string, Device string, getInfoMsg devlinkInfoGetter) (*DevlinkDeviceInfo, error) { + info, err := h.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, getInfoMsg) + if err != nil { + return nil, err + } + + return parseInfoData(info), nil +} + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByName(Bus string, Device string) (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string, getInfoMsg devlinkInfoGetter) (map[string]string, error) { + response, err := getInfoMsg(Bus, Device) + if err != nil { + return nil, err + } + + info, err := parseInfoMsg(response) + if err != nil { + return nil, err + } + + return info, nil +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string) (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfo returns devlink info for target device, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfo() (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfoAsMap returns devlink info for target device as a map, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfoAsMap() (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +func (h *Handle) getDevlinkInfoMsg(bus, device string) ([]byte, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_INFO_GET, bus, device) + if err != nil { + return nil, err + } + + response, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + if len(response) < 1 { + return nil, fmt.Errorf("getDevlinkInfoMsg: message too short") + } + + return response[0], nil +} + +func parseInfoMsg(msg []byte) (map[string]string, error) { + if len(msg) < nl.SizeofGenlmsg { + return nil, fmt.Errorf("parseInfoMsg: message too short") + } + + info := make(map[string]string) + err := collectInfoData(msg[nl.SizeofGenlmsg:], info) + + if err != nil { + return nil, err + } + + return info, nil +} + +func collectInfoData(msg []byte, data map[string]string) error { + attrs, err := nl.ParseRouteAttr(msg) + if err != nil { + return err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_DRIVER_NAME: + data["driver"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_SERIAL_NUMBER: + data["serialNumber"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_RUNNING, nl.DEVLINK_ATTR_INFO_VERSION_FIXED, + nl.DEVLINK_ATTR_INFO_VERSION_STORED: + key, value, err := getNestedInfoData(attr.Value) + if err != nil { + return err + } + data[key] = value + } + } + + if len(data) == 0 { + return fmt.Errorf("collectInfoData: could not read attributes") + } + + return nil +} + +func getNestedInfoData(msg []byte) (string, string, error) { + nestedAttrs, err := nl.ParseRouteAttr(msg) + + var key, value string + + if err != nil { + return "", "", err + } + + if len(nestedAttrs) != 2 { + return "", "", fmt.Errorf("getNestedInfoData: too few attributes in nested structure") + } + + for _, nestedAttr := range nestedAttrs { + switch nestedAttr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_VERSION_NAME: + key = parseInfoValue(nestedAttr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_VALUE: + value = parseInfoValue(nestedAttr.Value) + } + } + + if key == "" { + return "", "", fmt.Errorf("getNestedInfoData: key not found") + } + + if value == "" { + return "", "", fmt.Errorf("getNestedInfoData: value not found") + } + + return key, value, nil +} + +func parseInfoData(data map[string]string) *DevlinkDeviceInfo { + info := new(DevlinkDeviceInfo) + for key, value := range data { + switch key { + case "driver": + info.Driver = value + case "serialNumber": + info.SerialNumber = value + case "board.id": + info.BoardID = value + case "fw.app": + info.FwApp = value + case "fw.app.bundle_id": + info.FwAppBoundleID = value + case "fw.app.name": + info.FwAppName = value + case "fw.bundle_id": + info.FwBoundleID = value + case "fw.mgmt": + info.FwMgmt = value + case "fw.mgmt.api": + info.FwMgmtAPI = value + case "fw.mgmt.build": + info.FwMgmtBuild = value + case "fw.netlist": + info.FwNetlist = value + case "fw.netlist.build": + info.FwNetlistBuild = value + case "fw.psid.api": + info.FwPsidAPI = value + case "fw.undi": + info.FwUndi = value + } + } + return info +} + +func parseInfoValue(value []byte) string { + v := strings.ReplaceAll(string(value), "\x00", "") + return strings.TrimSpace(v) +} diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 88792eab0..84e1ca7a4 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -19,6 +19,7 @@ type FilterAttrs struct { Parent uint32 Priority uint16 // lower is higher priority Protocol uint16 // unix.ETH_P_* + Chain *uint32 } func (q FilterAttrs) String() string { @@ -27,6 +28,11 @@ func (q FilterAttrs) String() string { type TcAct int32 +const ( + TC_ACT_EXT_SHIFT = 28 + TC_ACT_EXT_VAL_MASK = (1 << TC_ACT_EXT_SHIFT) - 1 +) + const ( TC_ACT_UNSPEC TcAct = -1 TC_ACT_OK TcAct = 0 @@ -40,6 +46,22 @@ const ( TC_ACT_JUMP TcAct = 0x10000000 ) +func getTcActExt(local int32) int32 { + return local << TC_ACT_EXT_SHIFT +} + +func getTcActGotoChain() TcAct { + return TcAct(getTcActExt(2)) +} + +func getTcActExtOpcode(combined int32) int32 { + return combined & (^TC_ACT_EXT_VAL_MASK) +} + +func TcActExtCmp(combined int32, opcode int32) bool { + return getTcActExtOpcode(combined) == opcode +} + func (a TcAct) String() string { switch a { case TC_ACT_UNSPEC: @@ -63,6 +85,9 @@ func (a TcAct) String() string { case TC_ACT_JUMP: return "jump" } + if TcActExtCmp(int32(a), int32(getTcActGotoChain())) { + return "goto" + } return fmt.Sprintf("0x%x", int32(a)) } @@ -93,17 +118,32 @@ func (a TcPolAct) String() string { } type ActionAttrs struct { - Index int - Capab int - Action TcAct - Refcnt int - Bindcnt int + Index int + Capab int + Action TcAct + Refcnt int + Bindcnt int + Statistics *ActionStatistic + Timestamp *ActionTimestamp } func (q ActionAttrs) String() string { return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt) } +type ActionTimestamp struct { + Installed uint64 + LastUsed uint64 + Expires uint64 + FirstUsed uint64 +} + +func (t ActionTimestamp) String() string { + return fmt.Sprintf("Installed %d LastUsed %d Expires %d FirstUsed %d", t.Installed, t.LastUsed, t.Expires, t.FirstUsed) +} + +type ActionStatistic ClassStatistics + // Action represents an action in any supported filter. type Action interface { Attrs() *ActionAttrs @@ -112,6 +152,7 @@ type Action interface { type GenericAction struct { ActionAttrs + Chain int32 } func (action *GenericAction) Type() string { @@ -157,6 +198,39 @@ func NewConnmarkAction() *ConnmarkAction { } } +type CsumUpdateFlags uint32 + +const ( + TCA_CSUM_UPDATE_FLAG_IPV4HDR CsumUpdateFlags = 1 + TCA_CSUM_UPDATE_FLAG_ICMP CsumUpdateFlags = 2 + TCA_CSUM_UPDATE_FLAG_IGMP CsumUpdateFlags = 4 + TCA_CSUM_UPDATE_FLAG_TCP CsumUpdateFlags = 8 + TCA_CSUM_UPDATE_FLAG_UDP CsumUpdateFlags = 16 + TCA_CSUM_UPDATE_FLAG_UDPLITE CsumUpdateFlags = 32 + TCA_CSUM_UPDATE_FLAG_SCTP CsumUpdateFlags = 64 +) + +type CsumAction struct { + ActionAttrs + UpdateFlags CsumUpdateFlags +} + +func (action *CsumAction) Type() string { + return "csum" +} + +func (action *CsumAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewCsumAction() *CsumAction { + return &CsumAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -213,10 +287,11 @@ const ( type TunnelKeyAction struct { ActionAttrs - Action TunnelKeyAct - SrcAddr net.IP - DstAddr net.IP - KeyID uint32 + Action TunnelKeyAct + SrcAddr net.IP + DstAddr net.IP + KeyID uint32 + DestPort uint16 } func (action *TunnelKeyAction) Type() string { @@ -241,6 +316,7 @@ type SkbEditAction struct { PType *uint16 Priority *uint32 Mark *uint32 + Mask *uint32 } func (action *SkbEditAction) Type() string { @@ -259,6 +335,40 @@ func NewSkbEditAction() *SkbEditAction { } } +type PoliceAction struct { + ActionAttrs + Rate uint32 // in byte per second + Burst uint32 // in byte + RCellLog int + Mtu uint32 + Mpu uint16 // in byte + PeakRate uint32 // in byte per second + PCellLog int + AvRate uint32 // in byte per second + Overhead uint16 + LinkLayer int + ExceedAction TcPolAct + NotExceedAction TcPolAct +} + +func (action *PoliceAction) Type() string { + return "police" +} + +func (action *PoliceAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewPoliceAction() *PoliceAction { + return &PoliceAction{ + RCellLog: -1, + PCellLog: -1, + LinkLayer: 1, // ETHERNET + ExceedAction: TC_POLICE_RECLASSIFY, + NotExceedAction: TC_POLICE_OK, + } +} + // MatchAll filters match all packets type MatchAll struct { FilterAttrs @@ -274,20 +384,21 @@ func (filter *MatchAll) Type() string { return "matchall" } -type FilterFwAttrs struct { - ClassId uint32 - InDev string - Mask uint32 - Index uint32 - Buffer uint32 - Mtu uint32 - Mpu uint16 - Rate uint32 - AvRate uint32 - PeakRate uint32 - Action TcPolAct - Overhead uint16 - LinkLayer int +type FwFilter struct { + FilterAttrs + ClassId uint32 + InDev string + Mask uint32 + Police *PoliceAction + Actions []Action +} + +func (filter *FwFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *FwFilter) Type() string { + return "fw" } type BpfFilter struct { @@ -322,3 +433,30 @@ func (filter *GenericFilter) Attrs() *FilterAttrs { func (filter *GenericFilter) Type() string { return filter.FilterType } + +type PeditAction struct { + ActionAttrs + Proto uint8 + SrcMacAddr net.HardwareAddr + DstMacAddr net.HardwareAddr + SrcIP net.IP + DstIP net.IP + SrcPort uint16 + DstPort uint16 +} + +func (p *PeditAction) Attrs() *ActionAttrs { + return &p.ActionAttrs +} + +func (p *PeditAction) Type() string { + return "pedit" +} + +func NewPeditAction() *PeditAction { + return &PeditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index c56f314cd..87cd18f8e 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -37,9 +37,11 @@ type U32 struct { ClassId uint32 Divisor uint32 // Divisor MUST be power of 2. Hash uint32 + Link uint32 RedirIndex int Sel *TcU32Sel Actions []Action + Police *PoliceAction } func (filter *U32) Attrs() *FilterAttrs { @@ -50,74 +52,185 @@ func (filter *U32) Type() string { return "u32" } -// Fw filter filters on firewall marks -// NOTE: this is in filter_linux because it refers to nl.TcPolice which -// is defined in nl/tc_linux.go -type Fw struct { +type Flower struct { FilterAttrs - ClassId uint32 - // TODO remove nl type from interface - Police nl.TcPolice - InDev string - // TODO Action - Mask uint32 - AvRate uint32 - Rtab [256]uint32 - Ptab [256]uint32 -} - -func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { - var rtab [256]uint32 - var ptab [256]uint32 - rcellLog := -1 - pcellLog := -1 - avrate := fattrs.AvRate / 8 - police := nl.TcPolice{} - police.Rate.Rate = fattrs.Rate / 8 - police.PeakRate.Rate = fattrs.PeakRate / 8 - buffer := fattrs.Buffer - linklayer := nl.LINKLAYER_ETHERNET + DestIP net.IP + DestIPMask net.IPMask + SrcIP net.IP + SrcIPMask net.IPMask + EthType uint16 + EncDestIP net.IP + EncDestIPMask net.IPMask + EncSrcIP net.IP + EncSrcIPMask net.IPMask + EncDestPort uint16 + EncKeyId uint32 + SkipHw bool + SkipSw bool + IPProto *nl.IPProto + DestPort uint16 + SrcPort uint16 - if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC { - linklayer = fattrs.LinkLayer - } + Actions []Action +} - police.Action = int32(fattrs.Action) - if police.Rate.Rate != 0 { - police.Rate.Mpu = fattrs.Mpu - police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("TBF: failed to calculate rate table") - } - police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) +func (filter *Flower) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *Flower) Type() string { + return "flower" +} + +func (filter *Flower) encodeIP(parent *nl.RtAttr, ip net.IP, mask net.IPMask, v4Type, v6Type int, v4MaskType, v6MaskType int) { + ipType := v4Type + maskType := v4MaskType + + encodeMask := mask + if mask == nil { + encodeMask = net.CIDRMask(32, 32) } - police.Mtu = fattrs.Mtu - if police.PeakRate.Rate != 0 { - police.PeakRate.Mpu = fattrs.Mpu - police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("POLICE: failed to calculate peak rate table") + v4IP := ip.To4() + if v4IP == nil { + ipType = v6Type + maskType = v6MaskType + if mask == nil { + encodeMask = net.CIDRMask(128, 128) } + } else { + ip = v4IP } - return &Fw{ - FilterAttrs: attrs, - ClassId: fattrs.ClassId, - InDev: fattrs.InDev, - Mask: fattrs.Mask, - Police: police, - AvRate: avrate, - Rtab: rtab, - Ptab: ptab, - }, nil + parent.AddRtAttr(ipType, ip) + parent.AddRtAttr(maskType, encodeMask) } -func (filter *Fw) Attrs() *FilterAttrs { - return &filter.FilterAttrs +func (filter *Flower) encode(parent *nl.RtAttr) error { + if filter.EthType != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_TYPE, htons(filter.EthType)) + } + if filter.SrcIP != nil { + filter.encodeIP(parent, filter.SrcIP, filter.SrcIPMask, + nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC, + nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK) + } + if filter.DestIP != nil { + filter.encodeIP(parent, filter.DestIP, filter.DestIPMask, + nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST, + nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK) + } + if filter.EncSrcIP != nil { + filter.encodeIP(parent, filter.EncSrcIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK) + } + if filter.EncDestIP != nil { + filter.encodeIP(parent, filter.EncDestIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK) + } + if filter.EncDestPort != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT, htons(filter.EncDestPort)) + } + if filter.EncKeyId != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) + } + if filter.IPProto != nil { + ipproto := *filter.IPProto + parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize()) + if filter.SrcPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_SRC, htons(filter.SrcPort)) + } + } + if filter.DestPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_DST, htons(filter.DestPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_DST, htons(filter.DestPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_DST, htons(filter.DestPort)) + } + } + } + + var flags uint32 = 0 + if filter.SkipHw { + flags |= nl.TCA_CLS_FLAGS_SKIP_HW + } + if filter.SkipSw { + flags |= nl.TCA_CLS_FLAGS_SKIP_SW + } + parent.AddRtAttr(nl.TCA_FLOWER_FLAGS, htonl(flags)) + + actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + return nil } -func (filter *Fw) Type() string { - return "fw" +func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FLOWER_KEY_ETH_TYPE: + filter.EthType = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC: + filter.SrcIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK: + filter.SrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST: + filter.DestIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK: + filter.DestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC: + filter.EncSrcIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK: + filter.EncSrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST: + filter.EncDestIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK: + filter.EncDestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT: + filter.EncDestPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_ENC_KEY_ID: + filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_KEY_IP_PROTO: + val := new(nl.IPProto) + *val = nl.IPProto(datum.Value[0]) + filter.IPProto = val + case nl.TCA_FLOWER_KEY_TCP_SRC, nl.TCA_FLOWER_KEY_UDP_SRC, nl.TCA_FLOWER_KEY_SCTP_SRC: + filter.SrcPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_TCP_DST, nl.TCA_FLOWER_KEY_UDP_DST, nl.TCA_FLOWER_KEY_SCTP_DST: + filter.DestPort = ntohs(datum.Value) + case nl.TCA_FLOWER_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return err + } + filter.Actions, err = parseActions(tables) + if err != nil { + return err + } + case nl.TCA_FLOWER_FLAGS: + attr := nl.DeserializeUint32Bitfield(datum.Value) + skipSw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_HW + skipHw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_SW + if skipSw != 0 { + filter.SkipSw = true + } + if skipHw != 0 { + filter.SkipHw = true + } + } + } + return nil } // FilterDel will delete a filter from the system. @@ -129,19 +242,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) - base := filter.Attrs() - msg := &nl.TcMsg{ - Family: nl.FAMILY_ALL, - Ifindex: int32(base.LinkIndex), - Handle: base.Handle, - Parent: base.Parent, - Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), - } - req.AddData(msg) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + return h.filterModify(filter, unix.RTM_DELTFILTER, 0) } // FilterAdd will add a filter to the system. @@ -153,7 +254,7 @@ func FilterAdd(filter Filter) error { // FilterAdd will add a filter to the system. // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL) } // FilterReplace will replace a filter. @@ -165,12 +266,11 @@ func FilterReplace(filter Filter) error { // FilterReplace will replace a filter. // Equivalent to: `tc filter replace $filter` func (h *Handle) FilterReplace(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE) } -func (h *Handle) filterModify(filter Filter, flags int) error { - native = nl.NativeEndian() - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) +func (h *Handle) filterModify(filter Filter, proto, flags int) error { + req := h.newNetlinkRequest(proto, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -180,6 +280,9 @@ func (h *Handle) filterModify(filter Filter, flags int) error { Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), } req.AddData(msg) + if filter.Attrs().Chain != nil { + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(*filter.Attrs().Chain))) + } req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -226,6 +329,15 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.Hash != 0 { options.AddRtAttr(nl.TCA_U32_HASH, nl.Uint32Attr(filter.Hash)) } + if filter.Link != 0 { + options.AddRtAttr(nl.TCA_U32_LINK, nl.Uint32Attr(filter.Link)) + } + if filter.Police != nil { + police := options.AddRtAttr(nl.TCA_U32_POLICE, nil) + if err := encodePolice(police, filter.Police); err != nil { + return err + } + } actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil) // backwards compatibility if filter.RedirIndex != 0 { @@ -234,7 +346,7 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if err := EncodeActions(actionsAttr, filter.Actions); err != nil { return err } - case *Fw: + case *FwFilter: if filter.Mask != 0 { b := make([]byte, 4) native.PutUint32(b, filter.Mask) @@ -243,17 +355,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.InDev != "" { options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) } - if (filter.Police != nl.TcPolice{}) { - + if filter.Police != nil { police := options.AddRtAttr(nl.TCA_FW_POLICE, nil) - police.AddRtAttr(nl.TCA_POLICE_TBF, filter.Police.Serialize()) - if (filter.Police.Rate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Rtab) - police.AddRtAttr(nl.TCA_POLICE_RATE, payload) - } - if (filter.Police.PeakRate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Ptab) - police.AddRtAttr(nl.TCA_POLICE_PEAKRATE, payload) + if err := encodePolice(police, filter.Police); err != nil { + return err } } if filter.ClassId != 0 { @@ -261,6 +366,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { native.PutUint32(b, filter.ClassId) options.AddRtAttr(nl.TCA_FW_CLASSID, b) } + actionsAttr := options.AddRtAttr(nl.TCA_FW_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } case *BpfFilter: var bpfFlags uint32 if filter.ClassId != 0 { @@ -284,8 +393,11 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.ClassId != 0 { options.AddRtAttr(nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) } + case *Flower: + if err := filter.encode(options); err != nil { + return err + } } - req.AddData(options) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err @@ -347,11 +459,13 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { case "u32": filter = &U32{} case "fw": - filter = &Fw{} + filter = &FwFilter{} case "bpf": filter = &BpfFilter{} case "matchall": filter = &MatchAll{} + case "flower": + filter = &Flower{} default: filter = &GenericFilter{FilterType: filterType} } @@ -381,9 +495,18 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { if err != nil { return nil, err } + case "flower": + detailed, err = parseFlowerData(filter, data) + if err != nil { + return nil, err + } default: detailed = true } + case nl.TCA_CHAIN: + val := new(uint32) + *val = native.Uint32(attr.Value) + base.Chain = val } } // only return the detailed version of the filter @@ -412,6 +535,61 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { attrs.Bindcnt = int(tcgen.Bindcnt) } +func toTimeStamp(tcf *nl.Tcf) *ActionTimestamp { + return &ActionTimestamp{ + Installed: tcf.Install, + LastUsed: tcf.LastUse, + Expires: tcf.Expires, + FirstUsed: tcf.FirstUse} +} + +func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { + var rtab [256]uint32 + var ptab [256]uint32 + police := nl.TcPolice{} + police.Index = uint32(action.Attrs().Index) + police.Bindcnt = int32(action.Attrs().Bindcnt) + police.Capab = uint32(action.Attrs().Capab) + police.Refcnt = int32(action.Attrs().Refcnt) + police.Rate.Rate = action.Rate + police.PeakRate.Rate = action.PeakRate + police.Action = int32(action.ExceedAction) + + if police.Rate.Rate != 0 { + police.Rate.Mpu = action.Mpu + police.Rate.Overhead = action.Overhead + if CalcRtable(&police.Rate, rtab[:], action.RCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("TBF: failed to calculate rate table") + } + police.Burst = Xmittime(uint64(police.Rate.Rate), action.Burst) + } + + police.Mtu = action.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = action.Mpu + police.PeakRate.Overhead = action.Overhead + if CalcRtable(&police.PeakRate, ptab[:], action.PCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("POLICE: failed to calculate peak rate table") + } + } + + attr.AddRtAttr(nl.TCA_POLICE_TBF, police.Serialize()) + if police.Rate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RATE, SerializeRtab(rtab)) + } + if police.PeakRate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_PEAKRATE, SerializeRtab(ptab)) + } + if action.AvRate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_AVRATE, nl.Uint32Attr(action.AvRate)) + } + if action.NotExceedAction != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RESULT, nl.Uint32Attr(uint32(action.NotExceedAction))) + } + + return nil +} + func EncodeActions(attr *nl.RtAttr, actions []Action) error { tabIndex := int(nl.TCA_ACT_TAB) @@ -419,6 +597,14 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { switch action := action.(type) { default: return fmt.Errorf("unknown action type %s", action.Type()) + case *PoliceAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("police")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + if err := encodePolice(aopts, action); err != nil { + return err + } case *MirredAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -456,6 +642,9 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } else { return fmt.Errorf("invalid dst addr %s for tunnel_key action", action.DstAddr) } + if action.DestPort != 0 { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_DST_PORT, htons(action.DestPort)) + } } case *SkbEditAction: table := attr.AddRtAttr(tabIndex, nil) @@ -477,6 +666,9 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { if action.Mark != nil { aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) } + if action.Mask != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MASK, nl.Uint32Attr(*action.Mask)) + } case *ConnmarkAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -487,6 +679,16 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } toTcGen(action.Attrs(), &connmark.TcGen) aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) + case *CsumAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("csum")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + csum := nl.TcCsum{ + UpdateFlags: uint32(action.UpdateFlags), + } + toTcGen(action.Attrs(), &csum.TcGen) + aopts.AddRtAttr(nl.TCA_CSUM_PARMS, csum.Serialize()) case *BpfAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -505,16 +707,64 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { gen := nl.TcGen{} toTcGen(action.Attrs(), &gen) aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize()) + case *PeditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + pedit := nl.TcPedit{} + if action.SrcMacAddr != nil { + pedit.SetEthSrc(action.SrcMacAddr) + } + if action.DstMacAddr != nil { + pedit.SetEthDst(action.DstMacAddr) + } + if action.SrcIP != nil { + pedit.SetSrcIP(action.SrcIP) + } + if action.DstIP != nil { + pedit.SetDstIP(action.DstIP) + } + if action.SrcPort != 0 { + pedit.SetSrcPort(action.SrcPort, action.Proto) + } + if action.DstPort != 0 { + pedit.SetDstPort(action.DstPort, action.Proto) + } + pedit.Encode(table) } } return nil } +func parsePolice(data syscall.NetlinkRouteAttr, police *PoliceAction) { + switch data.Attr.Type { + case nl.TCA_POLICE_RESULT: + police.NotExceedAction = TcPolAct(native.Uint32(data.Value[0:4])) + case nl.TCA_POLICE_AVRATE: + police.AvRate = native.Uint32(data.Value[0:4]) + case nl.TCA_POLICE_TBF: + p := *nl.DeserializeTcPolice(data.Value) + police.ActionAttrs = ActionAttrs{} + police.Attrs().Index = int(p.Index) + police.Attrs().Bindcnt = int(p.Bindcnt) + police.Attrs().Capab = int(p.Capab) + police.Attrs().Refcnt = int(p.Refcnt) + police.ExceedAction = TcPolAct(p.Action) + police.Rate = p.Rate.Rate + police.PeakRate = p.PeakRate.Rate + police.Burst = Xmitsize(uint64(p.Rate.Rate), p.Burst) + police.Mtu = p.Mtu + police.LinkLayer = int(p.Rate.Linklayer) & nl.TC_LINKLAYER_MASK + police.Overhead = p.Rate.Overhead + } +} + func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { var actions []Action for _, table := range tables { var action Action var actionType string + var actionnStatistic *ActionStatistic + var actionTimestamp *ActionTimestamp aattrs, err := nl.ParseRouteAttr(table.Value) if err != nil { return nil, err @@ -532,12 +782,18 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &BpfAction{} case "connmark": action = &ConnmarkAction{} + case "csum": + action = &CsumAction{} case "gact": action = &GenericAction{} case "tunnel_key": action = &TunnelKeyAction{} case "skbedit": action = &SkbEditAction{} + case "police": + action = &PoliceAction{} + case "pedit": + action = &PeditAction{} default: break nextattr } @@ -556,7 +812,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).Ifindex = int(mirred.Ifindex) action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) + case nl.TCA_MIRRED_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } + case "tunnel_key": switch adatum.Attr.Type { case nl.TCA_TUNNEL_KEY_PARMS: @@ -566,12 +826,15 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*TunnelKeyAction).Action = TunnelKeyAct(tun.Action) case nl.TCA_TUNNEL_KEY_ENC_KEY_ID: action.(*TunnelKeyAction).KeyID = networkOrder.Uint32(adatum.Value[0:4]) - case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC: - case nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC: - action.(*TunnelKeyAction).SrcAddr = net.IP(adatum.Value[:]) - case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST: - case nl.TCA_TUNNEL_KEY_ENC_IPV4_DST: - action.(*TunnelKeyAction).DstAddr = net.IP(adatum.Value[:]) + case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC: + action.(*TunnelKeyAction).SrcAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, nl.TCA_TUNNEL_KEY_ENC_IPV4_DST: + action.(*TunnelKeyAction).DstAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_DST_PORT: + action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value) + case nl.TCA_TUNNEL_KEY_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "skbedit": switch adatum.Attr.Type { @@ -582,6 +845,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_MARK: mark := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_MASK: + mask := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mask = &mask case nl.TCA_SKBEDIT_PRIORITY: priority := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Priority = &priority @@ -591,6 +857,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_QUEUE_MAPPING: mapping := native.Uint16(adatum.Value[0:2]) action.(*SkbEditAction).QueueMapping = &mapping + case nl.TCA_SKBEDIT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "bpf": switch adatum.Attr.Type { @@ -601,6 +870,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4])) case nl.TCA_ACT_BPF_NAME: action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) + case nl.TCA_ACT_BPF_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "connmark": switch adatum.Attr.Type { @@ -609,24 +881,53 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} toAttrs(&connmark.TcGen, action.Attrs()) action.(*ConnmarkAction).Zone = connmark.Zone + case nl.TCA_CONNMARK_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "csum": + switch adatum.Attr.Type { + case nl.TCA_CSUM_PARMS: + csum := *nl.DeserializeTcCsum(adatum.Value) + action.(*CsumAction).ActionAttrs = ActionAttrs{} + toAttrs(&csum.TcGen, action.Attrs()) + action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + case nl.TCA_CSUM_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: gen := *nl.DeserializeTcGen(adatum.Value) toAttrs(&gen, action.Attrs()) + if action.Attrs().Action.String() == "goto" { + action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action + } + case nl.TCA_GACT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } + case "police": + parsePolice(adatum, action.(*PoliceAction)) } } + case nl.TCA_ACT_STATS: + s, err := parseTcStats2(aattr.Value) + if err != nil { + return nil, err + } + actionnStatistic = (*ActionStatistic)(s) } } + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp actions = append(actions, action) } return actions, nil } func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() u32 := filter.(*U32) detailed := false for _, datum := range data { @@ -658,20 +959,28 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) u32.RedirIndex = int(action.Ifindex) } } + case nl.TCA_U32_POLICE: + var police PoliceAction + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + parsePolice(aattr, &police) + } + u32.Police = &police case nl.TCA_U32_CLASSID: u32.ClassId = native.Uint32(datum.Value) case nl.TCA_U32_DIVISOR: u32.Divisor = native.Uint32(datum.Value) case nl.TCA_U32_HASH: u32.Hash = native.Uint32(datum.Value) + case nl.TCA_U32_LINK: + u32.Link = native.Uint32(datum.Value) } } return detailed, nil } func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() - fw := filter.(*Fw) + fw := filter.(*FwFilter) detailed := true for _, datum := range data { switch datum.Attr.Type { @@ -682,16 +991,20 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { case nl.TCA_FW_INDEV: fw.InDev = string(datum.Value[:len(datum.Value)-1]) case nl.TCA_FW_POLICE: + var police PoliceAction adata, _ := nl.ParseRouteAttr(datum.Value) for _, aattr := range adata { - switch aattr.Attr.Type { - case nl.TCA_POLICE_TBF: - fw.Police = *nl.DeserializeTcPolice(aattr.Value) - case nl.TCA_POLICE_RATE: - fw.Rtab = DeserializeRtab(aattr.Value) - case nl.TCA_POLICE_PEAKRATE: - fw.Ptab = DeserializeRtab(aattr.Value) - } + parsePolice(aattr, &police) + } + fw.Police = &police + case nl.TCA_FW_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + fw.Actions, err = parseActions(tables) + if err != nil { + return detailed, err } } } @@ -699,7 +1012,6 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { } func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() bpf := filter.(*BpfFilter) detailed := true for _, datum := range data { @@ -718,14 +1030,13 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) case nl.TCA_BPF_ID: bpf.Id = int(native.Uint32(datum.Value[0:4])) case nl.TCA_BPF_TAG: - bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1]) + bpf.Tag = hex.EncodeToString(datum.Value) } } return detailed, nil } func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() matchall := filter.(*MatchAll) detailed := true for _, datum := range data { @@ -746,6 +1057,10 @@ func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, er return detailed, nil } +func parseFlowerData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + return true, filter.(*Flower).decode(data) +} + func AlignToAtm(size uint) uint { var linksize, cells int cells = int(size / nl.ATM_CELL_PAYLOAD) @@ -783,7 +1098,7 @@ func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, lin } for i := 0; i < 256; i++ { sz = AdjustSize(uint((i+1)<= nl.IPSET_ERR_PRIVATE { + err = nl.IPSetError(uintptr(errno)) + } + } + return +} + +func ipsetUnserialize(msgs [][]byte) (result IPSetResult) { + for _, msg := range msgs { + result.unserialize(msg) + } + return result +} + +func (result *IPSetResult) unserialize(msg []byte) { + result.Nfgenmsg = nl.DeserializeNfgenmsg(msg) + + for attr := range nl.ParseAttributes(msg[4:]) { + switch attr.Type { + case nl.IPSET_ATTR_PROTOCOL: + result.Protocol = attr.Value[0] + case nl.IPSET_ATTR_SETNAME: + result.SetName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_TYPENAME: + result.TypeName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_REVISION: + result.Revision = attr.Value[0] + case nl.IPSET_ATTR_FAMILY: + result.Family = attr.Value[0] + case nl.IPSET_ATTR_FLAGS: + result.Flags = attr.Value[0] + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.parseAttrData(attr.Value) + case nl.IPSET_ATTR_ADT | nl.NLA_F_NESTED: + result.parseAttrADT(attr.Value) + case nl.IPSET_ATTR_PROTOCOL_MIN: + result.ProtocolMinVersion = attr.Value[0] + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() + default: + log.Printf("unknown ipset attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrData(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_HASHSIZE | nl.NLA_F_NET_BYTEORDER: + result.HashSize = attr.Uint32() + case nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER: + result.MaxElements = attr.Uint32() + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + result.Timeout = &val + case nl.IPSET_ATTR_ELEMENTS | nl.NLA_F_NET_BYTEORDER: + result.NumEntries = attr.Uint32() + case nl.IPSET_ATTR_REFERENCES | nl.NLA_F_NET_BYTEORDER: + result.References = attr.Uint32() + case nl.IPSET_ATTR_MEMSIZE | nl.NLA_F_NET_BYTEORDER: + result.SizeInMemory = attr.Uint32() + case nl.IPSET_ATTR_CADT_FLAGS | nl.NLA_F_NET_BYTEORDER: + result.CadtFlags = attr.Uint32() + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP | nl.NLA_F_NET_BYTEORDER: + result.Entries = append(result.Entries, IPSetEntry{IP: nested.Value}) + case nl.IPSET_ATTR_IP: + result.IPFrom = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_IP_TO | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP: + result.IPTo = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_PORT_FROM | nl.NLA_F_NET_BYTEORDER: + result.PortFrom = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_PORT_TO | nl.NLA_F_NET_BYTEORDER: + result.PortTo = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_CADT_LINENO | nl.NLA_F_NET_BYTEORDER: + result.LineNo = attr.Uint32() + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() + default: + log.Printf("unknown ipset data attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrADT(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.Entries = append(result.Entries, parseIPSetEntry(attr.Value)) + default: + log.Printf("unknown ADT attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func parseIPSetEntry(data []byte) (entry IPSetEntry) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Timeout = &val + case nl.IPSET_ATTR_BYTES | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Bytes = &val + case nl.IPSET_ATTR_PACKETS | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Packets = &val + case nl.IPSET_ATTR_ETHER: + entry.MAC = net.HardwareAddr(attr.Value) + case nl.IPSET_ATTR_IP: + entry.IP = net.IP(attr.Value) + case nl.IPSET_ATTR_COMMENT: + entry.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: + entry.IP = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: + entry.IP2 = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + case nl.IPSET_ATTR_CIDR: + entry.CIDR = attr.Value[0] + case nl.IPSET_ATTR_CIDR2: + entry.CIDR2 = attr.Value[0] + case nl.IPSET_ATTR_PORT | nl.NLA_F_NET_BYTEORDER: + val := networkOrder.Uint16(attr.Value) + entry.Port = &val + case nl.IPSET_ATTR_PROTO: + val := attr.Value[0] + entry.Protocol = &val + case nl.IPSET_ATTR_IFACE: + entry.IFace = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Mark = &val + default: + log.Printf("unknown ADT attribute from kernel: %+v", attr) + } + } + return +} diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index 886d88d1b..f820cdb67 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -22,31 +22,41 @@ type ( // LinkAttrs represents data shared by most link types type LinkAttrs struct { - Index int - MTU int - TxQLen int // Transmit Queue Length - Name string - HardwareAddr net.HardwareAddr - Flags net.Flags - RawFlags uint32 - ParentIndex int // index of the parent link device - MasterIndex int // must be the index of a bridge - Namespace interface{} // nil | NsPid | NsFd - Alias string - Statistics *LinkStatistics - Promisc int - Xdp *LinkXdp - EncapType string - Protinfo *Protinfo - OperState LinkOperState - NetNsID int - NumTxQueues int - NumRxQueues int - GSOMaxSize uint32 - GSOMaxSegs uint32 - Vfs []VfInfo // virtual functions available on link - Group uint32 - Slave LinkSlave + Index int + MTU int + TxQLen int // Transmit Queue Length + Name string + HardwareAddr net.HardwareAddr + Flags net.Flags + RawFlags uint32 + ParentIndex int // index of the parent link device + MasterIndex int // must be the index of a bridge + Namespace interface{} // nil | NsPid | NsFd + Alias string + AltNames []string + Statistics *LinkStatistics + Promisc int + Allmulti int + Multi int + Xdp *LinkXdp + EncapType string + Protinfo *Protinfo + OperState LinkOperState + PhysSwitchID int + NetNsID int + NumTxQueues int + NumRxQueues int + TSOMaxSegs uint32 + TSOMaxSize uint32 + GSOMaxSegs uint32 + GSOMaxSize uint32 + GROMaxSize uint32 + GSOIPv4MaxSize uint32 + GROIPv4MaxSize uint32 + Vfs []VfInfo // virtual functions available on link + Group uint32 + PermHWAddr net.HardwareAddr + Slave LinkSlave } // LinkSlave represents a slave device. @@ -60,11 +70,23 @@ type VfInfo struct { Mac net.HardwareAddr Vlan int Qos int + VlanProto int TxRate int // IFLA_VF_TX_RATE Max TxRate Spoofchk bool LinkState uint32 MaxTxRate uint32 // IFLA_VF_RATE Max TxRate MinTxRate uint32 // IFLA_VF_RATE Min TxRate + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 + + RssQuery uint32 + Trust uint32 } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -103,7 +125,8 @@ func (s LinkOperState) String() string { // NewLinkAttrs returns LinkAttrs structure filled with default values func NewLinkAttrs() LinkAttrs { return LinkAttrs{ - TxQLen: -1, + NetNsID: -1, + TxQLen: -1, } } @@ -196,10 +219,11 @@ type LinkStatistics64 struct { } type LinkXdp struct { - Fd int - Attached bool - Flags uint32 - ProgId uint32 + Fd int + Attached bool + AttachMode uint32 + Flags uint32 + ProgId uint32 } // Device links cannot be created via netlink. These links @@ -246,8 +270,11 @@ func (ifb *Ifb) Type() string { type Bridge struct { LinkAttrs MulticastSnooping *bool + AgeingTime *uint32 HelloTime *uint32 VlanFiltering *bool + VlanDefaultPVID *uint16 + GroupFwdMask *uint16 } func (bridge *Bridge) Attrs() *LinkAttrs { @@ -291,6 +318,9 @@ type Macvlan struct { // MACAddrs is only populated for Macvlan SOURCE links MACAddrs []net.HardwareAddr + + BCQueueLen uint32 + UsedBCQueueLen uint32 } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -333,11 +363,52 @@ func (tuntap *Tuntap) Type() string { return "tuntap" } +type NetkitMode uint32 + +const ( + NETKIT_MODE_L2 NetkitMode = iota + NETKIT_MODE_L3 +) + +type NetkitPolicy int + +const ( + NETKIT_POLICY_FORWARD NetkitPolicy = 0 + NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 +) + +func (n *Netkit) IsPrimary() bool { + return n.isPrimary +} + +// SetPeerAttrs will not take effect if trying to modify an existing netkit device +func (n *Netkit) SetPeerAttrs(Attrs *LinkAttrs) { + n.peerLinkAttrs = *Attrs +} + +type Netkit struct { + LinkAttrs + Mode NetkitMode + Policy NetkitPolicy + PeerPolicy NetkitPolicy + isPrimary bool + peerLinkAttrs LinkAttrs +} + +func (n *Netkit) Attrs() *LinkAttrs { + return &n.LinkAttrs +} + +func (n *Netkit) Type() string { + return "netkit" +} + // Veth devices must specify PeerName on create type Veth struct { LinkAttrs PeerName string // veth on create only PeerHardwareAddr net.HardwareAddr + PeerNamespace interface{} } func (veth *Veth) Attrs() *LinkAttrs { @@ -348,6 +419,19 @@ func (veth *Veth) Type() string { return "veth" } +// Wireguard represent links of type "wireguard", see https://www.wireguard.com/ +type Wireguard struct { + LinkAttrs +} + +func (wg *Wireguard) Attrs() *LinkAttrs { + return &wg.LinkAttrs +} + +func (wg *Wireguard) Type() string { + return "wireguard" +} + // GenericLink links represent types that are not currently understood // by this netlink library. type GenericLink struct { @@ -428,6 +512,19 @@ func (ipvlan *IPVlan) Type() string { return "ipvlan" } +// IPVtap - IPVtap is a virtual interfaces based on ipvlan +type IPVtap struct { + IPVlan +} + +func (ipvtap *IPVtap) Attrs() *LinkAttrs { + return &ipvtap.LinkAttrs +} + +func (ipvtap IPVtap) Type() string { + return "ipvtap" +} + // VlanProtocol type type VlanProtocol int @@ -527,6 +624,27 @@ const ( BOND_ARP_VALIDATE_ALL ) +var bondArpValidateToString = map[BondArpValidate]string{ + BOND_ARP_VALIDATE_NONE: "none", + BOND_ARP_VALIDATE_ACTIVE: "active", + BOND_ARP_VALIDATE_BACKUP: "backup", + BOND_ARP_VALIDATE_ALL: "none", +} +var StringToBondArpValidateMap = map[string]BondArpValidate{ + "none": BOND_ARP_VALIDATE_NONE, + "active": BOND_ARP_VALIDATE_ACTIVE, + "backup": BOND_ARP_VALIDATE_BACKUP, + "all": BOND_ARP_VALIDATE_ALL, +} + +func (b BondArpValidate) String() string { + s, ok := bondArpValidateToString[b] + if !ok { + return fmt.Sprintf("BondArpValidate(%d)", b) + } + return s +} + // BondPrimaryReselect type type BondPrimaryReselect int @@ -537,6 +655,25 @@ const ( BOND_PRIMARY_RESELECT_FAILURE ) +var bondPrimaryReselectToString = map[BondPrimaryReselect]string{ + BOND_PRIMARY_RESELECT_ALWAYS: "always", + BOND_PRIMARY_RESELECT_BETTER: "better", + BOND_PRIMARY_RESELECT_FAILURE: "failure", +} +var StringToBondPrimaryReselectMap = map[string]BondPrimaryReselect{ + "always": BOND_PRIMARY_RESELECT_ALWAYS, + "better": BOND_PRIMARY_RESELECT_BETTER, + "failure": BOND_PRIMARY_RESELECT_FAILURE, +} + +func (b BondPrimaryReselect) String() string { + s, ok := bondPrimaryReselectToString[b] + if !ok { + return fmt.Sprintf("BondPrimaryReselect(%d)", b) + } + return s +} + // BondArpAllTargets type type BondArpAllTargets int @@ -546,6 +683,23 @@ const ( BOND_ARP_ALL_TARGETS_ALL ) +var bondArpAllTargetsToString = map[BondArpAllTargets]string{ + BOND_ARP_ALL_TARGETS_ANY: "any", + BOND_ARP_ALL_TARGETS_ALL: "all", +} +var StringToBondArpAllTargetsMap = map[string]BondArpAllTargets{ + "any": BOND_ARP_ALL_TARGETS_ANY, + "all": BOND_ARP_ALL_TARGETS_ALL, +} + +func (b BondArpAllTargets) String() string { + s, ok := bondArpAllTargetsToString[b] + if !ok { + return fmt.Sprintf("BondArpAllTargets(%d)", b) + } + return s +} + // BondFailOverMac type type BondFailOverMac int @@ -556,6 +710,25 @@ const ( BOND_FAIL_OVER_MAC_FOLLOW ) +var bondFailOverMacToString = map[BondFailOverMac]string{ + BOND_FAIL_OVER_MAC_NONE: "none", + BOND_FAIL_OVER_MAC_ACTIVE: "active", + BOND_FAIL_OVER_MAC_FOLLOW: "follow", +} +var StringToBondFailOverMacMap = map[string]BondFailOverMac{ + "none": BOND_FAIL_OVER_MAC_NONE, + "active": BOND_FAIL_OVER_MAC_ACTIVE, + "follow": BOND_FAIL_OVER_MAC_FOLLOW, +} + +func (b BondFailOverMac) String() string { + s, ok := bondFailOverMacToString[b] + if !ok { + return fmt.Sprintf("BondFailOverMac(%d)", b) + } + return s +} + // BondXmitHashPolicy type type BondXmitHashPolicy int @@ -583,6 +756,7 @@ const ( BOND_XMIT_HASH_POLICY_LAYER2_3 BOND_XMIT_HASH_POLICY_ENCAP2_3 BOND_XMIT_HASH_POLICY_ENCAP3_4 + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC BOND_XMIT_HASH_POLICY_UNKNOWN ) @@ -592,6 +766,7 @@ var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{ BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac", } var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ "layer2": BOND_XMIT_HASH_POLICY_LAYER2, @@ -599,6 +774,7 @@ var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, + "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC, } // BondLacpRate type @@ -647,6 +823,25 @@ const ( BOND_AD_SELECT_COUNT ) +var bondAdSelectToString = map[BondAdSelect]string{ + BOND_AD_SELECT_STABLE: "stable", + BOND_AD_SELECT_BANDWIDTH: "bandwidth", + BOND_AD_SELECT_COUNT: "count", +} +var StringToBondAdSelectMap = map[string]BondAdSelect{ + "stable": BOND_AD_SELECT_STABLE, + "bandwidth": BOND_AD_SELECT_BANDWIDTH, + "count": BOND_AD_SELECT_COUNT, +} + +func (b BondAdSelect) String() string { + s, ok := bondAdSelectToString[b] + if !ok { + return fmt.Sprintf("BondAdSelect(%d)", b) + } + return s +} + // BondAdInfo represents ad info for bond type BondAdInfo struct { AggregatorId int @@ -678,7 +873,7 @@ type Bond struct { AllSlavesActive int MinLinks int LpInterval int - PackersPerSlave int + PacketsPerSlave int LacpRate BondLacpRate AdSelect BondAdSelect // looking at iproute tool AdInfo can only be retrived. It can't be set. @@ -711,7 +906,7 @@ func NewLinkBond(atr LinkAttrs) *Bond { AllSlavesActive: -1, MinLinks: -1, LpInterval: -1, - PackersPerSlave: -1, + PacketsPerSlave: -1, LacpRate: -1, AdSelect: -1, AdActorSysPrio: -1, @@ -761,8 +956,10 @@ func (bond *Bond) Type() string { type BondSlaveState uint8 const ( - BondStateActive = iota // Link is active. - BondStateBackup // Link is backup. + //BondStateActive Link is active. + BondStateActive BondSlaveState = iota + //BondStateBackup Link is backup. + BondStateBackup ) func (s BondSlaveState) String() string { @@ -776,15 +973,19 @@ func (s BondSlaveState) String() string { } } -// BondSlaveState represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave +// BondSlaveMiiStatus represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave // attribute, which contains the status of MII link monitoring type BondSlaveMiiStatus uint8 const ( - BondLinkUp = iota // link is up and running. - BondLinkFail // link has just gone down. - BondLinkDown // link has been down for too long time. - BondLinkBack // link is going back. + //BondLinkUp link is up and running. + BondLinkUp BondSlaveMiiStatus = iota + //BondLinkFail link has just gone down. + BondLinkFail + //BondLinkDown link has been down for too long time. + BondLinkDown + //BondLinkBack link is going back. + BondLinkBack ) func (s BondSlaveMiiStatus) String() string { @@ -817,6 +1018,49 @@ func (b *BondSlave) SlaveType() string { return "bond" } +type VrfSlave struct { + Table uint32 +} + +func (v *VrfSlave) SlaveType() string { + return "vrf" +} + +// Geneve devices must specify RemoteIP and ID (VNI) on create +// https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/drivers/net/geneve.c#L1209-L1223 +type Geneve struct { + LinkAttrs + ID uint32 // vni + Remote net.IP + Ttl uint8 + Tos uint8 + Dport uint16 + UdpCsum uint8 + UdpZeroCsum6Tx uint8 + UdpZeroCsum6Rx uint8 + Link uint32 + FlowBased bool + InnerProtoInherit bool + Df GeneveDf +} + +func (geneve *Geneve) Attrs() *LinkAttrs { + return &geneve.LinkAttrs +} + +func (geneve *Geneve) Type() string { + return "geneve" +} + +type GeneveDf uint8 + +const ( + GENEVE_DF_UNSET GeneveDf = iota + GENEVE_DF_SET + GENEVE_DF_INHERIT + GENEVE_DF_MAX +) + // Gretap devices must specify LocalIP and RemoteIP on create type Gretap struct { LinkAttrs @@ -861,6 +1105,7 @@ type Iptun struct { EncapType uint16 EncapFlags uint16 FlowBased bool + Proto uint8 } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -878,10 +1123,15 @@ type Ip6tnl struct { Remote net.IP Ttl uint8 Tos uint8 - EncapLimit uint8 Flags uint32 Proto uint8 FlowInfo uint32 + EncapLimit uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 + FlowBased bool } func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs { @@ -892,14 +1142,47 @@ func (ip6tnl *Ip6tnl) Type() string { return "ip6tnl" } +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L84 +type TunnelEncapType uint16 + +const ( + None TunnelEncapType = iota + FOU + GUE +) + +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L91 +type TunnelEncapFlag uint16 + +const ( + CSum TunnelEncapFlag = 1 << 0 + CSum6 = 1 << 1 + RemCSum = 1 << 2 +) + +// from https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ip6_tunnel.h#L12 +type IP6TunnelFlag uint16 + +const ( + IP6_TNL_F_IGN_ENCAP_LIMIT IP6TunnelFlag = 1 // don't add encapsulation limit if one isn't present in inner packet + IP6_TNL_F_USE_ORIG_TCLASS = 2 // copy the traffic class field from the inner packet + IP6_TNL_F_USE_ORIG_FLOWLABEL = 4 // copy the flowlabel from the inner packet + IP6_TNL_F_MIP6_DEV = 8 // being used for Mobile IPv6 + IP6_TNL_F_RCV_DSCP_COPY = 10 // copy DSCP from the outer packet + IP6_TNL_F_USE_ORIG_FWMARK = 20 // copy fwmark from inner packet + IP6_TNL_F_ALLOW_LOCAL_REMOTE = 40 // allow remote endpoint on the local node +) + type Sittun struct { LinkAttrs Link uint32 - Local net.IP - Remote net.IP Ttl uint8 Tos uint8 PMtuDisc uint8 + Proto uint8 + Local net.IP + Remote net.IP + EncapLimit uint8 EncapType uint16 EncapFlags uint16 EncapSport uint16 @@ -950,6 +1233,7 @@ type Gretun struct { EncapFlags uint16 EncapSport uint16 EncapDport uint16 + FlowBased bool } func (gretun *Gretun) Attrs() *LinkAttrs { @@ -993,6 +1277,7 @@ func (gtp *GTP) Type() string { } // Virtual XFRM Interfaces +// // Named "xfrmi" to prevent confusion with XFRM objects type Xfrmi struct { LinkAttrs @@ -1034,6 +1319,58 @@ var StringToIPoIBMode = map[string]IPoIBMode{ "connected": IPOIB_MODE_CONNECTED, } +const ( + CAN_STATE_ERROR_ACTIVE = iota + CAN_STATE_ERROR_WARNING + CAN_STATE_ERROR_PASSIVE + CAN_STATE_BUS_OFF + CAN_STATE_STOPPED + CAN_STATE_SLEEPING +) + +type Can struct { + LinkAttrs + + BitRate uint32 + SamplePoint uint32 + TimeQuanta uint32 + PropagationSegment uint32 + PhaseSegment1 uint32 + PhaseSegment2 uint32 + SyncJumpWidth uint32 + BitRatePreScaler uint32 + + Name string + TimeSegment1Min uint32 + TimeSegment1Max uint32 + TimeSegment2Min uint32 + TimeSegment2Max uint32 + SyncJumpWidthMax uint32 + BitRatePreScalerMin uint32 + BitRatePreScalerMax uint32 + BitRatePreScalerInc uint32 + + ClockFrequency uint32 + + State uint32 + + Mask uint32 + Flags uint32 + + TxError uint16 + RxError uint16 + + RestartMs uint32 +} + +func (can *Can) Attrs() *LinkAttrs { + return &can.LinkAttrs +} + +func (can *Can) Type() string { + return "can" +} + type IPoIB struct { LinkAttrs Pkey uint16 @@ -1049,11 +1386,27 @@ func (ipoib *IPoIB) Type() string { return "ipoib" } +type BareUDP struct { + LinkAttrs + Port uint16 + EtherType uint16 + SrcPortMin uint16 + MultiProto bool +} + +func (bareudp *BareUDP) Attrs() *LinkAttrs { + return &bareudp.LinkAttrs +} + +func (bareudp *BareUDP) Type() string { + return "bareudp" +} + // iproute2 supported devices; // vlan | veth | vcan | dummy | ifb | macvlan | macvtap | // bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | // gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon | -// bond_slave | ipvlan | xfrm +// bond_slave | ipvlan | xfrm | bareudp // LinkNotFoundError wraps the various not found errors when // getting/reading links. This is intended for better error diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index ec915a0b9..d713612a9 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -34,14 +34,27 @@ const ( TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) +var StringToTuntapModeMap = map[string]TuntapMode{ + "tun": TUNTAP_MODE_TUN, + "tap": TUNTAP_MODE_TAP, +} + +func (ttm TuntapMode) String() string { + switch ttm { + case TUNTAP_MODE_TUN: + return "tun" + case TUNTAP_MODE_TAP: + return "tap" + } + return "unknown" +} + const ( VF_LINK_STATE_AUTO uint32 = 0 VF_LINK_STATE_ENABLE uint32 = 1 VF_LINK_STATE_DISABLE uint32 = 2 ) -var lookupByDump = false - var macvlanModes = [...]uint32{ 0, nl.MACVLAN_MODE_PRIVATE, @@ -138,7 +151,6 @@ func (h *Handle) LinkSetAllmulticastOn(link Link) error { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Change = unix.IFF_ALLMULTI msg.Flags = unix.IFF_ALLMULTI - msg.Index = int32(base.Index) req.AddData(msg) @@ -168,6 +180,51 @@ func (h *Handle) LinkSetAllmulticastOff(link Link) error { return err } +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func LinkSetMulticastOn(link Link) error { + return pkgHandle.LinkSetMulticastOn(link) +} + +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func (h *Handle) LinkSetMulticastOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Flags = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func LinkSetMulticastOff(link Link) error { + return pkgHandle.LinkSetMulticastOff(link) +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func (h *Handle) LinkSetMulticastOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { return pkgHandle.MacvlanMACAddrAdd(link, addr) } @@ -237,6 +294,37 @@ func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode return err } +// LinkSetMacvlanMode sets the mode of a macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + return pkgHandle.LinkSetMacvlanMode(link, mode) +} + +// LinkSetMacvlanMode sets the mode of the macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func (h *Handle) LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[mode])) + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func BridgeSetMcastSnoop(link Link, on bool) error { return pkgHandle.BridgeSetMcastSnoop(link, on) } @@ -247,6 +335,26 @@ func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { return h.linkModify(bridge, unix.NLM_F_ACK) } +func BridgeSetVlanFiltering(link Link, on bool) error { + return pkgHandle.BridgeSetVlanFiltering(link, on) +} + +func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error { + bridge := link.(*Bridge) + bridge.VlanFiltering = &on + return h.linkModify(bridge, unix.NLM_F_ACK) +} + +func BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + return pkgHandle.BridgeSetVlanDefaultPVID(link, pvid) +} + +func (h *Handle) BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + bridge := link.(*Bridge) + bridge.VlanDefaultPVID = &pvid + return h.linkModify(bridge, unix.NLM_F_ACK) +} + func SetPromiscOn(link Link) error { return pkgHandle.SetPromiscOn(link) } @@ -389,6 +497,58 @@ func (h *Handle) LinkSetAlias(link Link, name string) error { return err } +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func LinkAddAltName(link Link, name string) error { + return pkgHandle.LinkAddAltName(link, name) +} + +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func (h *Handle) LinkAddAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func LinkDelAltName(link Link, name string) error { + return pkgHandle.LinkDelAltName(link, name) +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func (h *Handle) LinkDelAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_DELLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + // LinkSetHardwareAddr sets the hardware address of the link device. // Equivalent to: `ip link set $link address $hwaddr` func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { @@ -491,13 +651,50 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) vfmsg := nl.VfVlan{ Vf: uint32(vf), Vlan: uint32(vlan), Qos: uint32(qos), } - nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return pkgHandle.LinkSetVfVlanQosProto(link, vf, vlan, qos, proto) +} + +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + vfInfo := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfVlanList := vfInfo.AddRtAttr(nl.IFLA_VF_VLAN_LIST, nil) + + vfmsg := nl.VfVlanInfo{ + VfVlan: nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + Qos: uint32(qos), + }, + VlanProto: (uint16(proto)>>8)&0xFF | (uint16(proto)&0xFF)<<8, + } + + vfVlanList.AddRtAttr(nl.IFLA_VF_VLAN_INFO, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -848,6 +1045,141 @@ func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { return err } +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func LinkSetGSOMaxSegs(link Link, maxSegs int) error { + return pkgHandle.LinkSetGSOMaxSegs(link, maxSegs) +} + +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func (h *Handle) LinkSetGSOMaxSegs(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOMaxSize(link, maxSize) +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func LinkSetGROMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROMaxSize(link, maxSize) +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOIPv4MaxSize(link, maxSize) +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROIPv4MaxSize(link, maxSize) +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func boolAttr(val bool) []byte { var v uint8 if val { @@ -1005,8 +1337,8 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { if bond.LpInterval >= 0 { data.AddRtAttr(nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) } - if bond.PackersPerSlave >= 0 { - data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) + if bond.PacketsPerSlave >= 0 { + data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PacketsPerSlave))) } if bond.LacpRate >= 0 { data.AddRtAttr(nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) @@ -1048,6 +1380,14 @@ func (h *Handle) LinkAdd(link Link) error { return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) } +func LinkModify(link Link) error { + return pkgHandle.LinkModify(link) +} + +func (h *Handle) LinkModify(link Link) error { + return h.linkModify(link, unix.NLM_F_REQUEST|unix.NLM_F_ACK) +} + func (h *Handle) linkModify(link Link, flags int) error { // TODO: support extra data for macvlan base := link.Attrs() @@ -1060,8 +1400,6 @@ func (h *Handle) linkModify(link Link, flags int) error { } if isTuntap { - // TODO: support user - // TODO: support group if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) } @@ -1089,21 +1427,64 @@ func (h *Handle) linkModify(link Link, flags int) error { } req.Flags |= uint16(tuntap.Mode) - + const TUN = "/dev/net/tun" for i := 0; i < queues; i++ { localReq := req - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + fd, err := unix.Open(TUN, os.O_RDWR|syscall.O_CLOEXEC, 0) if err != nil { cleanupFds(fds) return err } - fds = append(fds, file) - _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) + _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) if errno != 0 { + // close the new fd + unix.Close(fd) + // and the already opened ones cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETOWNER, uintptr(tuntap.Owner)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETOWNER failed [%d], errno %v", i, errno) + } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETGROUP, uintptr(tuntap.Group)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETGROUP failed [%d], errno %v", i, errno) + } + + // Set the tun device to non-blocking before use. The below comment + // taken from: + // + // https://github.com/mistsys/tuntap/commit/161418c25003bbee77d085a34af64d189df62bea + // + // Note there is a complication because in go, if a device node is + // opened, go sets it to use nonblocking I/O. However a /dev/net/tun + // doesn't work with epoll until after the TUNSETIFF ioctl has been + // done. So we open the unix fd directly, do the ioctl, then put the + // fd in nonblocking mode, an then finally wrap it in a os.File, + // which will see the nonblocking mode and add the fd to the + // pollable set, so later on when we Read() from it blocked the + // calling thread in the kernel. + // + // See + // https://github.com/golang/go/issues/30426 + // which got exposed in go 1.13 by the fix to + // https://github.com/golang/go/issues/30624 + err = unix.SetNonblock(fd, true) + if err != nil { + cleanupFds(fds) + return fmt.Errorf("Tuntap set to non-blocking failed [%d], err %v", i, err) + } + + // create the file from the file descriptor and store it + file := os.NewFile(uintptr(fd), TUN) + fds = append(fds, file) + // 1) we only care for the name of the first tap in the multi queue set // 2) if the original name was empty, the localReq has now the actual name // @@ -1114,11 +1495,29 @@ func (h *Handle) linkModify(link Link, flags int) error { if i == 0 { link.Attrs().Name = strings.Trim(string(localReq.Name[:]), "\x00") } + + } + + control := func(file *os.File, f func(fd uintptr)) error { + name := file.Name() + conn, err := file.SyscallConn() + if err != nil { + return fmt.Errorf("SyscallConn() failed on %s: %v", name, err) + } + if err := conn.Control(f); err != nil { + return fmt.Errorf("Failed to get file descriptor for %s: %v", name, err) + } + return nil } // only persist interface if NonPersist is NOT set if !tuntap.NonPersist { - _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) + var errno syscall.Errno + if err := control(fds[0], func(fd uintptr) { + _, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 1) + }); err != nil { + return err + } if errno != 0 { cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) @@ -1135,7 +1534,10 @@ func (h *Handle) linkModify(link Link, flags int) error { // un-persist (e.g. allow the interface to be removed) the tuntap // should not hurt if not set prior, condition might be not needed if !tuntap.NonPersist { - _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + // ignore error + _ = control(fds[0], func(fd uintptr) { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 0) + }) } cleanupFds(fds) return err @@ -1193,6 +1595,11 @@ func (h *Handle) linkModify(link Link, flags int) error { nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) + if base.Alias != "" { + alias := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(base.Alias)) + req.AddData(alias) + } + if base.MTU > 0 { mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) @@ -1228,6 +1635,21 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(gsoAttr) } + if base.GROMaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize)) + req.AddData(groAttr) + } + + if base.GSOIPv4MaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GSOIPv4MaxSize)) + req.AddData(gsoAttr) + } + + if base.GROIPv4MaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GROIPv4MaxSize)) + req.AddData(groAttr) + } + if base.Group > 0 { groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group)) req.AddData(groupAttr) @@ -1264,6 +1686,10 @@ func (h *Handle) linkModify(link Link, flags int) error { if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) } + case *Netkit: + if err := addNetkitAttrs(link, linkInfo, flags); err != nil { + return err + } case *Veth: data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) @@ -1272,12 +1698,28 @@ func (h *Handle) linkModify(link Link, flags int) error { if base.TxQLen >= 0 { peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } + if base.NumTxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + } + if base.NumRxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + } if base.MTU > 0 { peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } if link.PeerHardwareAddr != nil { peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(link.PeerHardwareAddr)) } + if link.PeerNamespace != nil { + switch ns := link.PeerNamespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_FD, val) + } + } case *Vxlan: addVxlanAttrs(link, linkInfo) case *Bond: @@ -1286,16 +1728,16 @@ func (h *Handle) linkModify(link Link, flags int) error { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) + case *IPVtap: + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) case *Macvlan: - if link.Mode != MACVLAN_MODE_DEFAULT { - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) - } + addMacvlanAttrs(link, linkInfo) case *Macvtap: - if link.Mode != MACVLAN_MODE_DEFAULT { - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) - } + addMacvtapAttrs(link, linkInfo) + case *Geneve: + addGeneveAttrs(link, linkInfo) case *Gretap: addGretapAttrs(link, linkInfo) case *Iptun: @@ -1318,6 +1760,8 @@ func (h *Handle) linkModify(link Link, flags int) error { addXfrmiAttrs(link, linkInfo) case *IPoIB: addIPoIBAttrs(link, linkInfo) + case *BareUDP: + addBareUDPAttrs(link, linkInfo) } req.AddData(linkInfo) @@ -1372,6 +1816,13 @@ func (h *Handle) linkByNameDump(name string) (Link, error) { if link.Attrs().Name == name { return link, nil } + + // support finding interfaces also via altnames + for _, altName := range link.Attrs().AltNames { + if altName == name { + return link, nil + } + } } return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)} } @@ -1410,6 +1861,9 @@ func (h *Handle) LinkByName(name string) (Link, error) { req.AddData(attr) nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) + if len(name) > 15 { + nameData = nl.NewRtAttr(unix.IFLA_ALT_IFNAME, nl.ZeroTerminated(name)) + } req.AddData(nameData) link, err := execGetLink(req) @@ -1499,7 +1953,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { } } -// linkDeserialize deserializes a raw message received from netlink into +// LinkDeserialize deserializes a raw message received from netlink into // a link object. func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) @@ -1509,10 +1963,19 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } - base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} - if msg.Flags&unix.IFF_PROMISC != 0 { - base.Promisc = 1 + base := NewLinkAttrs() + base.Index = int(msg.Index) + base.RawFlags = msg.Flags + base.Flags = linkFlags(msg.Flags) + base.EncapType = msg.EncapType() + base.NetNsID = -1 + if msg.Flags&unix.IFF_ALLMULTI != 0 { + base.Allmulti = 1 } + if msg.Flags&unix.IFF_MULTICAST != 0 { + base.Multi = 1 + } + var ( link Link stats32 *LinkStatistics32 @@ -1541,18 +2004,26 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Bridge{} case "vlan": link = &Vlan{} + case "netkit": + link = &Netkit{} case "veth": link = &Veth{} + case "wireguard": + link = &Wireguard{} case "vxlan": link = &Vxlan{} case "bond": link = &Bond{} case "ipvlan": link = &IPVlan{} + case "ipvtap": + link = &IPVtap{} case "macvlan": link = &Macvlan{} case "macvtap": link = &Macvtap{} + case "geneve": + link = &Geneve{} case "gretap": link = &Gretap{} case "ip6gretap": @@ -1579,6 +2050,10 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Tuntap{} case "ipoib": link = &IPoIB{} + case "can": + link = &Can{} + case "bareudp": + link = &BareUDP{} default: link = &GenericLink{LinkType: linkType} } @@ -1588,6 +2063,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } switch linkType { + case "netkit": + parseNetkitData(link, data) case "vlan": parseVlanData(link, data) case "vxlan": @@ -1596,10 +2073,14 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseBondData(link, data) case "ipvlan": parseIPVlanData(link, data) + case "ipvtap": + parseIPVtapData(link, data) case "macvlan": parseMacvlanData(link, data) case "macvtap": parseMacvtapData(link, data) + case "geneve": + parseGeneveData(link, data) case "gretap": parseGretapData(link, data) case "ip6gretap": @@ -1628,13 +2109,21 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseTuntapData(link, data) case "ipoib": parseIPoIBData(link, data) + case "can": + parseCanData(link, data) + case "bareudp": + parseBareUDPData(link, data) } + case nl.IFLA_INFO_SLAVE_KIND: slaveType = string(info.Value[:len(info.Value)-1]) switch slaveType { case "bond": linkSlave = &BondSlave{} + case "vrf": + linkSlave = &VrfSlave{} } + case nl.IFLA_INFO_SLAVE_DATA: switch slaveType { case "bond": @@ -1643,6 +2132,12 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } parseBondSlaveData(linkSlave, data) + case "vrf": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseVrfSlaveData(linkSlave, data) } } } @@ -1660,6 +2155,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.Name = string(attr.Value[:len(attr.Value)-1]) case unix.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_PROMISCUITY: + base.Promisc = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_MASTER: @@ -1694,14 +2191,38 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { protinfo := parseProtinfo(attrs) base.Protinfo = &protinfo } + case unix.IFLA_PROP_LIST | unix.NLA_F_NESTED: + attrs, err := nl.ParseRouteAttr(attr.Value[:]) + if err != nil { + return nil, err + } + + base.AltNames = []string{} + for _, attr := range attrs { + if attr.Attr.Type == unix.IFLA_ALT_IFNAME { + base.AltNames = append(base.AltNames, nl.BytesToString(attr.Value)) + } + } case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) + case unix.IFLA_PHYS_SWITCH_ID: + base.PhysSwitchID = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_GSO_MAX_SIZE: - base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_TSO_MAX_SEGS: + base.TSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_TSO_MAX_SIZE: + base.TSOMaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_GSO_MAX_SEGS: base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SIZE: + base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_MAX_SIZE: + base.GROMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_IPV4_MAX_SIZE: + base.GSOIPv4MaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_IPV4_MAX_SIZE: + base.GROIPv4MaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_VFINFO_LIST: data, err := nl.ParseRouteAttr(attr.Value) if err != nil { @@ -1718,6 +2239,13 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.NumRxQueues = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_GROUP: base.Group = native.Uint32(attr.Value[0:4]) + case unix.IFLA_PERM_ADDRESS: + for _, b := range attr.Value { + if b != 0 { + base.PermHWAddr = attr.Value[:] + break + } + } } } @@ -1830,21 +2358,24 @@ type LinkUpdate struct { // LinkSubscribe takes a chan down which notifications will be sent // when links change. Close the 'done' chan to stop subscription. func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // LinkSubscribeAt works like LinkSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { - return linkSubscribeAt(ns, netns.None(), ch, done, nil, false) + return linkSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // LinkSubscribeOptions contains a set of options to use with // LinkSubscribeWithOptions. type LinkSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // LinkSubscribeWithOptions work like LinkSubscribe but enable to @@ -1855,14 +2386,27 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -1884,7 +2428,8 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -1899,15 +2444,15 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } ifmsg := nl.DeserializeIfInfomsg(m.Data) header := unix.NlMsghdr(m.Header) @@ -1916,7 +2461,7 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c if cberr != nil { cberr(err) } - return + continue } ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} } @@ -1942,6 +2487,16 @@ func (h *Handle) LinkSetGuard(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD) } +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return pkgHandle.LinkSetBRSlaveGroupFwdMask(link, mask) +} + +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func (h *Handle) LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return h.setProtinfoAttrRawVal(link, nl.Uint16Attr(mask), nl.IFLA_BRPORT_GROUP_FWD_MASK) +} + func LinkSetFastLeave(link Link, mode bool) error { return pkgHandle.LinkSetFastLeave(link, mode) } @@ -1974,6 +2529,14 @@ func (h *Handle) LinkSetFlood(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD) } +func LinkSetIsolated(link Link, mode bool) error { + return pkgHandle.LinkSetIsolated(link, mode) +} + +func (h *Handle) LinkSetIsolated(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_ISOLATED) +} + func LinkSetBrProxyArp(link Link, mode bool) error { return pkgHandle.LinkSetBrProxyArp(link, mode) } @@ -1990,7 +2553,15 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI) } -func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { +func LinkSetBrNeighSuppress(link Link, mode bool) error { + return pkgHandle.LinkSetBrNeighSuppress(link, mode) +} + +func (h *Handle) LinkSetBrNeighSuppress(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_NEIGH_SUPPRESS) +} + +func (h *Handle) setProtinfoAttrRawVal(link Link, val []byte, attr int) error { base := link.Attrs() h.ensureIndex(base) req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) @@ -2000,7 +2571,7 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { req.AddData(msg) br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) - br.AddRtAttr(attr, boolToByte(mode)) + br.AddRtAttr(attr, val) req.AddData(br) _, err := req.Execute(unix.NETLINK_ROUTE, 0) if err != nil { @@ -2008,6 +2579,9 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { } return nil } +func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { + return h.setProtinfoAttrRawVal(link, boolToByte(mode), attr) +} // LinkSetTxQLen sets the transaction queue length for the link. // Equivalent to: `ip link set $link txqlen $qlen` @@ -2065,6 +2639,80 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return err } +func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { + if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { + return fmt.Errorf("netkit doesn't support setting Ethernet") + } + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + // Kernel will return error if trying to change the mode of an existing netkit device + data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) + data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + + if (flag & unix.NLM_F_EXCL) == 0 { + // Modifying peer link attributes will not take effect + return nil + } + + peer := data.AddRtAttr(nl.IFLA_NETKIT_PEER_INFO, nil) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + if nk.peerLinkAttrs.Flags&net.FlagUp != 0 { + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + } + if nk.peerLinkAttrs.Index != 0 { + msg.Index = int32(nk.peerLinkAttrs.Index) + } + peer.AddChild(msg) + if nk.peerLinkAttrs.Name != "" { + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(nk.peerLinkAttrs.Name)) + } + if nk.peerLinkAttrs.MTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(nk.peerLinkAttrs.MTU))) + } + if nk.peerLinkAttrs.GSOMaxSegs > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSegs)) + } + if nk.peerLinkAttrs.GSOMaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSize)) + } + if nk.peerLinkAttrs.GSOIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOIPv4MaxSize)) + } + if nk.peerLinkAttrs.GROIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GROIPv4MaxSize)) + } + if nk.peerLinkAttrs.Namespace != nil { + switch ns := nk.peerLinkAttrs.Namespace.(type) { + case NsPid: + peer.AddRtAttr(unix.IFLA_NET_NS_PID, nl.Uint32Attr(uint32(ns))) + case NsFd: + peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) + } + } + return nil +} + +func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { + netkit := link.(*Netkit) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_NETKIT_PRIMARY: + isPrimary := datum.Value[0:1][0] + if isPrimary != 0 { + netkit.isPrimary = true + } + case nl.IFLA_NETKIT_MODE: + netkit.Mode = NetkitMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_POLICY: + netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_POLICY: + netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + } + } +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { @@ -2080,6 +2728,13 @@ func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan := link.(*Vxlan) for _, datum := range data { + // NOTE(vish): Apparently some messages can be sent with no value. + // We special case GBP here to not change existing + // functionality. It appears that GBP sends a datum.Value + // of null. + if len(datum.Value) == 0 && datum.Attr.Type != nl.IFLA_VXLAN_GBP { + continue + } switch datum.Attr.Type { case nl.IFLA_VXLAN_ID: vxlan.VxlanId = int(native.Uint32(datum.Value[0:4])) @@ -2178,7 +2833,7 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BOND_LP_INTERVAL: bond.LpInterval = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_PACKETS_PER_SLAVE: - bond.PackersPerSlave = int(native.Uint32(data[i].Value[0:4])) + bond.PacketsPerSlave = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_AD_LACP_RATE: bond.LacpRate = BondLacpRate(data[i].Value[0]) case nl.IFLA_BOND_AD_SELECT: @@ -2258,6 +2913,16 @@ func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { } } +func parseVrfSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + vrfSlave := slave.(*VrfSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + vrfSlave.Table = native.Uint32(data[i].Value[0:4]) + } + } +} + func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { ipv := link.(*IPVlan) for _, datum := range data { @@ -2270,11 +2935,42 @@ func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseIPVtapData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVtap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) + } + } +} + +func addMacvtapAttrs(macvtap *Macvtap, linkInfo *nl.RtAttr) { + addMacvlanAttrs(&macvtap.Macvlan, linkInfo) +} + func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvtap) parseMacvlanData(&macv.Macvlan, data) } +func addMacvlanAttrs(macvlan *Macvlan, linkInfo *nl.RtAttr) { + var data *nl.RtAttr + + if macvlan.Mode != MACVLAN_MODE_DEFAULT || macvlan.BCQueueLen > 0 { + data = linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + } + + if macvlan.Mode != MACVLAN_MODE_DEFAULT { + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macvlan.Mode])) + } + if macvlan.BCQueueLen > 0 { + data.AddRtAttr(nl.IFLA_MACVLAN_BC_QUEUE_LEN, nl.Uint32Attr(macvlan.BCQueueLen)) + } +} + func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { @@ -2302,6 +2998,10 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { for _, macDatum := range macs { macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) } + case nl.IFLA_MACVLAN_BC_QUEUE_LEN: + macv.BCQueueLen = native.Uint32(datum.Value[0:4]) + case nl.IFLA_MACVLAN_BC_QUEUE_LEN_USED: + macv.UsedBCQueueLen = native.Uint32(datum.Value[0:4]) } } } @@ -2327,12 +3027,73 @@ func linkFlags(rawFlags uint32) net.Flags { return f } +func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if geneve.InnerProtoInherit { + data.AddRtAttr(nl.IFLA_GENEVE_INNER_PROTO_INHERIT, []byte{}) + } + + if geneve.FlowBased { + geneve.ID = 0 + data.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, []byte{}) + } + + if ip := geneve.Remote; ip != nil { + if ip4 := ip.To4(); ip4 != nil { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE, ip.To4()) + } else { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE6, []byte(ip)) + } + } + + if geneve.ID != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_ID, nl.Uint32Attr(geneve.ID)) + } + + if geneve.Dport != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_PORT, htons(geneve.Dport)) + } + + if geneve.Ttl != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TTL, nl.Uint8Attr(geneve.Ttl)) + } + + if geneve.Tos != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) + } + + data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df))) +} + +func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { + geneve := link.(*Geneve) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GENEVE_ID: + geneve.ID = native.Uint32(datum.Value[0:4]) + case nl.IFLA_GENEVE_REMOTE, nl.IFLA_GENEVE_REMOTE6: + geneve.Remote = datum.Value + case nl.IFLA_GENEVE_PORT: + geneve.Dport = ntohs(datum.Value[0:2]) + case nl.IFLA_GENEVE_TTL: + geneve.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_TOS: + geneve.Tos = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_COLLECT_METADATA: + geneve.FlowBased = true + case nl.IFLA_GENEVE_INNER_PROTO_INHERIT: + geneve.InnerProtoInherit = true + } + } +} + func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) if gretap.FlowBased { // In flow based mode, no other attributes need to be configured - data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) return } @@ -2415,6 +3176,12 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if gre.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) + return + } + if ip := gre.Local; ip != nil { if ip.To4() != nil { ip = ip.To4() @@ -2485,6 +3252,8 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_ENCAP_DPORT: gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = true } } } @@ -2513,7 +3282,8 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { case nl.IFLA_XDP_FD: xdp.Fd = int(native.Uint32(attr.Value[0:4])) case nl.IFLA_XDP_ATTACHED: - xdp.Attached = attr.Value[0] != 0 + xdp.AttachMode = uint32(attr.Value[0]) + xdp.Attached = xdp.AttachMode != 0 case nl.IFLA_XDP_FLAGS: xdp.Flags = native.Uint32(attr.Value[0:4]) case nl.IFLA_XDP_PROG_ID: @@ -2524,14 +3294,14 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if iptun.FlowBased { // In flow based mode, no other attributes need to be configured - linkInfo.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) return } - data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) - ip := iptun.Local.To4() if ip != nil { data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) @@ -2552,6 +3322,7 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(iptun.Proto)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2577,7 +3348,9 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_IPTUN_ENCAP_FLAGS: iptun.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_COLLECT_METADATA: - iptun.FlowBased = int8(datum.Value[0]) != 0 + iptun.FlowBased = true + case nl.IFLA_IPTUN_PROTO: + iptun.Proto = datum.Value[0] } } } @@ -2585,6 +3358,12 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if ip6tnl.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) + return + } + if ip6tnl.Link != 0 { data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link)) } @@ -2601,10 +3380,14 @@ func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(ip6tnl.Ttl)) data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(ip6tnl.Tos)) - data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit)) data.AddRtAttr(nl.IFLA_IPTUN_FLAGS, nl.Uint32Attr(ip6tnl.Flags)) data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(ip6tnl.Proto)) data.AddRtAttr(nl.IFLA_IPTUN_FLOWINFO, nl.Uint32Attr(ip6tnl.FlowInfo)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(ip6tnl.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(ip6tnl.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(ip6tnl.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(ip6tnl.EncapDport)) } func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2616,17 +3399,27 @@ func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_IPTUN_REMOTE: ip6tnl.Remote = net.IP(datum.Value[:16]) case nl.IFLA_IPTUN_TTL: - ip6tnl.Ttl = uint8(datum.Value[0]) + ip6tnl.Ttl = datum.Value[0] case nl.IFLA_IPTUN_TOS: - ip6tnl.Tos = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_ENCAP_LIMIT: - ip6tnl.EncapLimit = uint8(datum.Value[0]) + ip6tnl.Tos = datum.Value[0] case nl.IFLA_IPTUN_FLAGS: ip6tnl.Flags = native.Uint32(datum.Value[:4]) case nl.IFLA_IPTUN_PROTO: - ip6tnl.Proto = uint8(datum.Value[0]) + ip6tnl.Proto = datum.Value[0] case nl.IFLA_IPTUN_FLOWINFO: ip6tnl.FlowInfo = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_ENCAP_LIMIT: + ip6tnl.EncapLimit = datum.Value[0] + case nl.IFLA_IPTUN_ENCAP_TYPE: + ip6tnl.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + ip6tnl.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + ip6tnl.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + ip6tnl.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + ip6tnl.FlowBased = true } } } @@ -2653,8 +3446,10 @@ func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) } + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(sittun.Proto)) data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(sittun.EncapLimit)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) @@ -2670,11 +3465,13 @@ func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_IPTUN_REMOTE: sittun.Remote = net.IP(datum.Value[0:4]) case nl.IFLA_IPTUN_TTL: - sittun.Ttl = uint8(datum.Value[0]) + sittun.Ttl = datum.Value[0] case nl.IFLA_IPTUN_TOS: - sittun.Tos = uint8(datum.Value[0]) + sittun.Tos = datum.Value[0] case nl.IFLA_IPTUN_PMTUDISC: - sittun.PMtuDisc = uint8(datum.Value[0]) + sittun.PMtuDisc = datum.Value[0] + case nl.IFLA_IPTUN_PROTO: + sittun.Proto = datum.Value[0] case nl.IFLA_IPTUN_ENCAP_TYPE: sittun.EncapType = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_ENCAP_FLAGS: @@ -2761,18 +3558,30 @@ func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { if bridge.MulticastSnooping != nil { data.AddRtAttr(nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) } + if bridge.AgeingTime != nil { + data.AddRtAttr(nl.IFLA_BR_AGEING_TIME, nl.Uint32Attr(*bridge.AgeingTime)) + } if bridge.HelloTime != nil { data.AddRtAttr(nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) } if bridge.VlanFiltering != nil { data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering)) } + if bridge.VlanDefaultPVID != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_DEFAULT_PVID, nl.Uint16Attr(*bridge.VlanDefaultPVID)) + } + if bridge.GroupFwdMask != nil { + data.AddRtAttr(nl.IFLA_BR_GROUP_FWD_MASK, nl.Uint16Attr(*bridge.GroupFwdMask)) + } } func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { br := bridge.(*Bridge) for _, datum := range data { switch datum.Attr.Type { + case nl.IFLA_BR_AGEING_TIME: + ageingTime := native.Uint32(datum.Value[0:4]) + br.AgeingTime = &ageingTime case nl.IFLA_BR_HELLO_TIME: helloTime := native.Uint32(datum.Value[0:4]) br.HelloTime = &helloTime @@ -2782,6 +3591,12 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BR_VLAN_FILTERING: vlanFiltering := datum.Value[0] == 1 br.VlanFiltering = &vlanFiltering + case nl.IFLA_BR_VLAN_DEFAULT_PVID: + vlanDefaultPVID := native.Uint16(datum.Value[0:2]) + br.VlanDefaultPVID = &vlanDefaultPVID + case nl.IFLA_BR_GROUP_FWD_MASK: + mask := native.Uint16(datum.Value[0:2]) + br.GroupFwdMask = &mask } } } @@ -2823,12 +3638,17 @@ func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) { if err != nil { return nil, err } - vfs = append(vfs, parseVfInfo(vfAttrs, i)) + + vf, err := parseVfInfo(vfAttrs, i) + if err != nil { + return nil, err + } + vfs = append(vfs, vf) } return vfs, nil } -func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { +func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) (VfInfo, error) { vf := VfInfo{ID: id} for _, element := range data { switch element.Attr.Type { @@ -2839,6 +3659,12 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { vl := nl.DeserializeVfVlan(element.Value[:]) vf.Vlan = int(vl.Vlan) vf.Qos = int(vl.Qos) + case nl.IFLA_VF_VLAN_LIST: + vfVlanInfoList, err := nl.DeserializeVfVlanList(element.Value[:]) + if err != nil { + return vf, err + } + vf.VlanProto = int(vfVlanInfoList[0].VlanProto) case nl.IFLA_VF_TX_RATE: txr := nl.DeserializeVfTxRate(element.Value[:]) vf.TxRate = int(txr.Rate) @@ -2852,16 +3678,35 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { vfr := nl.DeserializeVfRate(element.Value[:]) vf.MaxTxRate = vfr.MaxTxRate vf.MinTxRate = vfr.MinTxRate + case nl.IFLA_VF_STATS: + vfstats := nl.DeserializeVfStats(element.Value[:]) + vf.RxPackets = vfstats.RxPackets + vf.TxPackets = vfstats.TxPackets + vf.RxBytes = vfstats.RxBytes + vf.TxBytes = vfstats.TxBytes + vf.Multicast = vfstats.Multicast + vf.Broadcast = vfstats.Broadcast + vf.RxDropped = vfstats.RxDropped + vf.TxDropped = vfstats.TxDropped + + case nl.IFLA_VF_RSS_QUERY_EN: + result := nl.DeserializeVfRssQueryEn(element.Value) + vf.RssQuery = result.Setting + + case nl.IFLA_VF_TRUST: + result := nl.DeserializeVfTrust(element.Value) + vf.Trust = result.Setting } } - return vf + return vf, nil } func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) - data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) - + if xfrmi.Ifid != 0 { + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + } } func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2876,8 +3721,7 @@ func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { } } -// LinkSetBondSlave add slave to bond link via ioctl interface. -func LinkSetBondSlave(link Link, master *Bond) error { +func ioctlBondSlave(cmd uintptr, link Link, master *Bond) error { fd, err := getSocketUDP() if err != nil { return err @@ -2885,10 +3729,38 @@ func LinkSetBondSlave(link Link, master *Bond) error { defer syscall.Close(fd) ifreq := newIocltSlaveReq(link.Attrs().Name, master.Attrs().Name) - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), unix.SIOCBONDENSLAVE, uintptr(unsafe.Pointer(ifreq))) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(ifreq))) if errno != 0 { - return fmt.Errorf("Failed to enslave %q to %q, errno=%v", link.Attrs().Name, master.Attrs().Name, errno) + return fmt.Errorf("errno=%v", errno) + } + return nil +} + +// LinkSetBondSlaveActive sets specified slave to ACTIVE in an `active-backup` bond link via ioctl interface. +// +// Multiple calls keeps the status unchanged(shown in the unit test). +func LinkSetBondSlaveActive(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDCHANGEACTIVE, link, master) + if err != nil { + return fmt.Errorf("Failed to set slave %q active in %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave add slave to bond link via ioctl interface. +func LinkSetBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDENSLAVE, link, master) + if err != nil { + return fmt.Errorf("Failed to enslave %q to %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave removes specified slave from bond link via ioctl interface. +func LinkDelBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDRELEASE, link, master) + if err != nil { + return fmt.Errorf("Failed to del slave %q from %q, %v", link.Attrs().Name, master.Attrs().Name, err) } return nil } @@ -3010,9 +3882,86 @@ func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseCanData(link Link, data []syscall.NetlinkRouteAttr) { + can := link.(*Can) + for _, datum := range data { + + switch datum.Attr.Type { + case nl.IFLA_CAN_BITTIMING: + can.BitRate = native.Uint32(datum.Value) + can.SamplePoint = native.Uint32(datum.Value[4:]) + can.TimeQuanta = native.Uint32(datum.Value[8:]) + can.PropagationSegment = native.Uint32(datum.Value[12:]) + can.PhaseSegment1 = native.Uint32(datum.Value[16:]) + can.PhaseSegment2 = native.Uint32(datum.Value[20:]) + can.SyncJumpWidth = native.Uint32(datum.Value[24:]) + can.BitRatePreScaler = native.Uint32(datum.Value[28:]) + case nl.IFLA_CAN_BITTIMING_CONST: + can.Name = string(datum.Value[:16]) + can.TimeSegment1Min = native.Uint32(datum.Value[16:]) + can.TimeSegment1Max = native.Uint32(datum.Value[20:]) + can.TimeSegment2Min = native.Uint32(datum.Value[24:]) + can.TimeSegment2Max = native.Uint32(datum.Value[28:]) + can.SyncJumpWidthMax = native.Uint32(datum.Value[32:]) + can.BitRatePreScalerMin = native.Uint32(datum.Value[36:]) + can.BitRatePreScalerMax = native.Uint32(datum.Value[40:]) + can.BitRatePreScalerInc = native.Uint32(datum.Value[44:]) + case nl.IFLA_CAN_CLOCK: + can.ClockFrequency = native.Uint32(datum.Value) + case nl.IFLA_CAN_STATE: + can.State = native.Uint32(datum.Value) + case nl.IFLA_CAN_CTRLMODE: + can.Mask = native.Uint32(datum.Value) + can.Flags = native.Uint32(datum.Value[4:]) + case nl.IFLA_CAN_BERR_COUNTER: + can.TxError = native.Uint16(datum.Value) + can.RxError = native.Uint16(datum.Value[2:]) + case nl.IFLA_CAN_RESTART_MS: + can.RestartMs = native.Uint32(datum.Value) + case nl.IFLA_CAN_DATA_BITTIMING_CONST: + case nl.IFLA_CAN_RESTART: + case nl.IFLA_CAN_DATA_BITTIMING: + case nl.IFLA_CAN_TERMINATION: + case nl.IFLA_CAN_TERMINATION_CONST: + case nl.IFLA_CAN_BITRATE_CONST: + case nl.IFLA_CAN_DATA_BITRATE_CONST: + case nl.IFLA_CAN_BITRATE_MAX: + } + } +} + func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey))) data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode))) data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast))) } + +func addBareUDPAttrs(bareudp *BareUDP, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + data.AddRtAttr(nl.IFLA_BAREUDP_PORT, nl.Uint16Attr(nl.Swap16(bareudp.Port))) + data.AddRtAttr(nl.IFLA_BAREUDP_ETHERTYPE, nl.Uint16Attr(nl.Swap16(bareudp.EtherType))) + if bareudp.SrcPortMin != 0 { + data.AddRtAttr(nl.IFLA_BAREUDP_SRCPORT_MIN, nl.Uint16Attr(bareudp.SrcPortMin)) + } + if bareudp.MultiProto { + data.AddRtAttr(nl.IFLA_BAREUDP_MULTIPROTO_MODE, []byte{}) + } +} + +func parseBareUDPData(link Link, data []syscall.NetlinkRouteAttr) { + bareudp := link.(*BareUDP) + for _, attr := range data { + switch attr.Attr.Type { + case nl.IFLA_BAREUDP_PORT: + bareudp.Port = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_ETHERTYPE: + bareudp.EtherType = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_SRCPORT_MIN: + bareudp.SrcPortMin = native.Uint16(attr.Value) + case nl.IFLA_BAREUDP_MULTIPROTO_MODE: + bareudp.MultiProto = true + } + } +} diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 379e5655f..32d722e88 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -12,6 +12,7 @@ type Neigh struct { State int Type int Flags int + FlagsExt int IP net.IP HardwareAddr net.HardwareAddr LLIPAddr net.IP //Used in the case of NHRP diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index cb3b55d35..2d93044a6 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -24,7 +24,11 @@ const ( NDA_MASTER NDA_LINK_NETNSID NDA_SRC_VNI - NDA_MAX = NDA_SRC_VNI + NDA_PROTOCOL + NDA_NH_ID + NDA_FDB_EXT_ATTRS + NDA_FLAGS_EXT + NDA_MAX = NDA_FLAGS_EXT ) // Neighbor Cache Entry States. @@ -42,11 +46,19 @@ const ( // Neighbor Flags const ( - NTF_USE = 0x01 - NTF_SELF = 0x02 - NTF_MASTER = 0x04 - NTF_PROXY = 0x08 - NTF_ROUTER = 0x80 + NTF_USE = 0x01 + NTF_SELF = 0x02 + NTF_MASTER = 0x04 + NTF_PROXY = 0x08 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_STICKY = 0x40 + NTF_ROUTER = 0x80 +) + +// Extended Neighbor Flags +const ( + NTF_EXT_MANAGED = 0x00000001 ) // Ndmsg is for adding, removing or receiving information about a neighbor table entry @@ -162,11 +174,16 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { if neigh.LLIPAddr != nil { llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4()) req.AddData(llIPData) - } else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil { + } else if neigh.HardwareAddr != nil { hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) req.AddData(hwData) } + if neigh.FlagsExt != 0 { + flagsExtData := nl.NewRtAttr(NDA_FLAGS_EXT, nl.Uint32Attr(uint32(neigh.FlagsExt))) + req.AddData(flagsExtData) + } + if neigh.Vlan != 0 { vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) req.AddData(vlanData) @@ -243,6 +260,18 @@ func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { // Ignore messages from other interfaces continue } + if msg.Family != 0 && ndm.Family != msg.Family { + continue + } + if msg.State != 0 && ndm.State != msg.State { + continue + } + if msg.Type != 0 && ndm.Type != msg.Type { + continue + } + if msg.Flags != 0 && ndm.Flags != msg.Flags { + continue + } neigh, err := NeighDeserialize(m) if err != nil { @@ -293,6 +322,8 @@ func NeighDeserialize(m []byte) (*Neigh, error) { } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } + case NDA_FLAGS_EXT: + neigh.FlagsExt = int(native.Uint32(attr.Value[0:4])) case NDA_VLAN: neigh.Vlan = int(native.Uint16(attr.Value[0:2])) case NDA_VNI: @@ -308,13 +339,13 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // NeighSubscribe takes a chan down which notifications will be sent // when neighbors are added or deleted. Close the 'done' chan to stop subscription. func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error { - return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // NeighSubscribeAt works like NeighSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error { - return neighSubscribeAt(ns, netns.None(), ch, done, nil, false) + return neighSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // NeighSubscribeOptions contains a set of options to use with @@ -323,6 +354,11 @@ type NeighSubscribeOptions struct { Namespace *netns.NsHandle ErrorCallback func(error) ListExisting bool + + // max size is based on value of /proc/sys/net/core/rmem_max + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // NeighSubscribeWithOptions work like NeighSubscribe but enable to @@ -333,16 +369,17 @@ func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, opti none := netns.None() options.Namespace = &none } - return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) makeRequest := func(family int) error { - req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, - unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + ndmsg := &Ndmsg{Family: uint8(family)} + req.AddData(ndmsg) if err := s.Send(req); err != nil { return err } @@ -351,6 +388,17 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -396,13 +444,12 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + nError := int32(native.Uint32(m.Data[0:4])) + if nError == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(syscall.Errno(-nError)) } return } diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 42d3acf91..da12c42a5 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -16,7 +16,7 @@ func LinkSetMTU(link Link, mtu int) error { return ErrNotImplemented } -func LinkSetMaster(link Link, master *Bridge) error { +func LinkSetMaster(link Link, master Link) error { return ErrNotImplemented } @@ -52,6 +52,10 @@ func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { return ErrNotImplemented } +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return ErrNotImplemented +} + func LinkSetVfTxRate(link Link, vf, rate int) error { return ErrNotImplemented } @@ -72,6 +76,10 @@ func LinkSetXdpFd(link Link, fd int) error { return ErrNotImplemented } +func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { + return ErrNotImplemented +} + func LinkSetARPOff(link Link) error { return ErrNotImplemented } @@ -120,6 +128,22 @@ func LinkSetTxQLen(link Link, qlen int) error { return ErrNotImplemented } +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + func LinkAdd(link Link) error { return ErrNotImplemented } @@ -176,14 +200,34 @@ func RouteAdd(route *Route) error { return ErrNotImplemented } +func RouteAppend(route *Route) error { + return ErrNotImplemented +} + +func RouteChange(route *Route) error { + return ErrNotImplemented +} + func RouteDel(route *Route) error { return ErrNotImplemented } +func RouteGet(destination net.IP) ([]Route, error) { + return nil, ErrNotImplemented +} + func RouteList(link Link, family int) ([]Route, error) { return nil, ErrNotImplemented } +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return nil, ErrNotImplemented +} + +func RouteReplace(route *Route) error { + return ErrNotImplemented +} + func XfrmPolicyAdd(policy *XfrmPolicy) error { return ErrNotImplemented } @@ -196,6 +240,10 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { return nil, ErrNotImplemented } +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return nil, ErrNotImplemented +} + func XfrmStateAdd(policy *XfrmState) error { return ErrNotImplemented } @@ -235,3 +283,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) { func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } + +func SocketDestroy(local, remote net.Addr) (*Socket, error) { + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/netns_linux.go b/vendor/github.com/vishvananda/netlink/netns_linux.go index 77cf6f469..2eb29c7ce 100644 --- a/vendor/github.com/vishvananda/netlink/netns_linux.go +++ b/vendor/github.com/vishvananda/netlink/netns_linux.go @@ -87,7 +87,7 @@ func (h *Handle) getNetNsId(attrType int, val uint32) (int, error) { rtgen := nl.NewRtGenMsg() req.AddData(rtgen) - b := make([]byte, 4, 4) + b := make([]byte, 4) native.PutUint32(b, val) attr := nl.NewRtAttr(attrType, b) req.AddData(attr) @@ -126,12 +126,12 @@ func (h *Handle) setNetNsId(attrType int, val uint32, newnsid uint32) error { rtgen := nl.NewRtGenMsg() req.AddData(rtgen) - b := make([]byte, 4, 4) + b := make([]byte, 4) native.PutUint32(b, val) attr := nl.NewRtAttr(attrType, b) req.AddData(attr) - b1 := make([]byte, 4, 4) + b1 := make([]byte, 4) native.PutUint32(b1, newnsid) attr1 := nl.NewRtAttr(NETNSA_NSID, b1) req.AddData(attr1) diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index 50db3b4cd..6bea4ed02 100644 --- a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -54,24 +54,18 @@ func (msg *IfAddrmsg) Len() int { // __u32 tstamp; /* updated timestamp, hundredths of seconds */ // }; -const IFA_CACHEINFO = 6 -const SizeofIfaCacheInfo = 0x10 - type IfaCacheInfo struct { - IfaPrefered uint32 - IfaValid uint32 - Cstamp uint32 - Tstamp uint32 + unix.IfaCacheinfo } func (msg *IfaCacheInfo) Len() int { - return SizeofIfaCacheInfo + return unix.SizeofIfaCacheinfo } func DeserializeIfaCacheInfo(b []byte) *IfaCacheInfo { - return (*IfaCacheInfo)(unsafe.Pointer(&b[0:SizeofIfaCacheInfo][0])) + return (*IfaCacheInfo)(unsafe.Pointer(&b[0:unix.SizeofIfaCacheinfo][0])) } func (msg *IfaCacheInfo) Serialize() []byte { - return (*(*[SizeofIfaCacheInfo]byte)(unsafe.Pointer(msg)))[:] + return (*(*[unix.SizeofIfaCacheinfo]byte)(unsafe.Pointer(msg)))[:] } diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 79d2b6b89..6989d1edc 100644 --- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -15,6 +15,38 @@ var L4ProtoMap = map[uint8]string{ 17: "udp", } +// From https://git.netfilter.org/libnetfilter_conntrack/tree/include/libnetfilter_conntrack/libnetfilter_conntrack_tcp.h +// enum tcp_state { +// TCP_CONNTRACK_NONE, +// TCP_CONNTRACK_SYN_SENT, +// TCP_CONNTRACK_SYN_RECV, +// TCP_CONNTRACK_ESTABLISHED, +// TCP_CONNTRACK_FIN_WAIT, +// TCP_CONNTRACK_CLOSE_WAIT, +// TCP_CONNTRACK_LAST_ACK, +// TCP_CONNTRACK_TIME_WAIT, +// TCP_CONNTRACK_CLOSE, +// TCP_CONNTRACK_LISTEN, /* obsolete */ +// #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN +// TCP_CONNTRACK_MAX, +// TCP_CONNTRACK_IGNORE +// }; +const ( + TCP_CONNTRACK_NONE = 0 + TCP_CONNTRACK_SYN_SENT = 1 + TCP_CONNTRACK_SYN_RECV = 2 + TCP_CONNTRACK_ESTABLISHED = 3 + TCP_CONNTRACK_FIN_WAIT = 4 + TCP_CONNTRACK_CLOSE_WAIT = 5 + TCP_CONNTRACK_LAST_ACK = 6 + TCP_CONNTRACK_TIME_WAIT = 7 + TCP_CONNTRACK_CLOSE = 8 + TCP_CONNTRACK_LISTEN = 9 + TCP_CONNTRACK_SYN_SENT2 = 9 + TCP_CONNTRACK_MAX = 10 + TCP_CONNTRACK_IGNORE = 11 +) + // All the following constants are coming from: // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -31,6 +63,7 @@ var L4ProtoMap = map[uint8]string{ // IPCTNL_MSG_MAX // }; const ( + IPCTNL_MSG_CT_NEW = 0 IPCTNL_MSG_CT_GET = 1 IPCTNL_MSG_CT_DELETE = 2 ) @@ -40,9 +73,11 @@ const ( NFNETLINK_V0 = 0 ) -// #define NLA_F_NESTED (1 << 15) const ( - NLA_F_NESTED = (1 << 15) + NLA_F_NESTED uint16 = (1 << 15) // #define NLA_F_NESTED (1 << 15) + NLA_F_NET_BYTEORDER uint16 = (1 << 14) // #define NLA_F_NESTED (1 << 14) + NLA_TYPE_MASK = ^(NLA_F_NESTED | NLA_F_NET_BYTEORDER) + NLA_ALIGNTO uint16 = 4 // #define NLA_ALIGNTO 4 ) // enum ctattr_type { @@ -86,7 +121,10 @@ const ( CTA_COUNTERS_REPLY = 10 CTA_USE = 11 CTA_ID = 12 + CTA_ZONE = 18 CTA_TIMESTAMP = 20 + CTA_LABELS = 22 + CTA_LABELS_MASK = 23 ) // enum ctattr_tuple { @@ -147,7 +185,10 @@ const ( // }; // #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) const ( + CTA_PROTOINFO_UNSPEC = 0 CTA_PROTOINFO_TCP = 1 + CTA_PROTOINFO_DCCP = 2 + CTA_PROTOINFO_SCTP = 3 ) // enum ctattr_protoinfo_tcp { diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go index db66faaad..956367b29 100644 --- a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -9,17 +9,56 @@ const ( ) const ( - DEVLINK_CMD_GET = 1 - DEVLINK_CMD_ESWITCH_GET = 29 - DEVLINK_CMD_ESWITCH_SET = 30 + DEVLINK_CMD_GET = 1 + DEVLINK_CMD_PORT_GET = 5 + DEVLINK_CMD_PORT_SET = 6 + DEVLINK_CMD_PORT_NEW = 7 + DEVLINK_CMD_PORT_DEL = 8 + DEVLINK_CMD_ESWITCH_GET = 29 + DEVLINK_CMD_ESWITCH_SET = 30 + DEVLINK_CMD_RESOURCE_DUMP = 36 + DEVLINK_CMD_PARAM_GET = 38 + DEVLINK_CMD_PARAM_SET = 39 + DEVLINK_CMD_INFO_GET = 51 ) const ( - DEVLINK_ATTR_BUS_NAME = 1 - DEVLINK_ATTR_DEV_NAME = 2 - DEVLINK_ATTR_ESWITCH_MODE = 25 - DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 - DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_PORT_INDEX = 3 + DEVLINK_ATTR_PORT_TYPE = 4 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 8 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_RESOURCE_LIST = 63 /* nested */ + DEVLINK_ATTR_RESOURCE = 64 /* nested */ + DEVLINK_ATTR_RESOURCE_NAME = 65 /* string */ + DEVLINK_ATTR_RESOURCE_ID = 66 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE = 67 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69 /* u8 */ + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72 /* u64 */ + DEVLINK_ATTR_RESOURCE_UNIT = 73 /* u8 */ + DEVLINK_ATTR_RESOURCE_OCC = 74 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76 /* u64 */ + DEVLINK_ATTR_PORT_FLAVOUR = 77 + DEVLINK_ATTR_INFO_DRIVER_NAME = 98 + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99 + DEVLINK_ATTR_INFO_VERSION_FIXED = 100 + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101 + DEVLINK_ATTR_INFO_VERSION_STORED = 102 + DEVLINK_ATTR_INFO_VERSION_NAME = 103 + DEVLINK_ATTR_INFO_VERSION_VALUE = 104 + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127 + DEVLINK_ATTR_PORT_FUNCTION = 145 + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164 ) const ( @@ -38,3 +77,66 @@ const ( DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0 DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1 ) + +const ( + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0 + DEVLINK_PORT_FLAVOUR_CPU = 1 + DEVLINK_PORT_FLAVOUR_DSA = 2 + DEVLINK_PORT_FLAVOUR_PCI_PF = 3 + DEVLINK_PORT_FLAVOUR_PCI_VF = 4 + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5 + DEVLINK_PORT_FLAVOUR_UNUSED = 6 + DEVLINK_PORT_FLAVOUR_PCI_SF = 7 +) + +const ( + DEVLINK_PORT_TYPE_NOTSET = 0 + DEVLINK_PORT_TYPE_AUTO = 1 + DEVLINK_PORT_TYPE_ETH = 2 + DEVLINK_PORT_TYPE_IB = 3 +) + +const ( + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1 + DEVLINK_PORT_FN_ATTR_STATE = 2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 3 +) + +const ( + DEVLINK_PORT_FN_STATE_INACTIVE = 0 + DEVLINK_PORT_FN_STATE_ACTIVE = 1 +) + +const ( + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0 + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1 +) + +const ( + DEVLINK_RESOURCE_UNIT_ENTRY uint8 = 0 +) + +const ( + DEVLINK_ATTR_PARAM = iota + 80 /* nested */ + DEVLINK_ATTR_PARAM_NAME /* string */ + DEVLINK_ATTR_PARAM_GENERIC /* flag */ + DEVLINK_ATTR_PARAM_TYPE /* u8 */ + DEVLINK_ATTR_PARAM_VALUES_LIST /* nested */ + DEVLINK_ATTR_PARAM_VALUE /* nested */ + DEVLINK_ATTR_PARAM_VALUE_DATA /* dynamic */ + DEVLINK_ATTR_PARAM_VALUE_CMODE /* u8 */ +) + +const ( + DEVLINK_PARAM_TYPE_U8 = 1 + DEVLINK_PARAM_TYPE_U16 = 2 + DEVLINK_PARAM_TYPE_U32 = 3 + DEVLINK_PARAM_TYPE_STRING = 5 + DEVLINK_PARAM_TYPE_BOOL = 6 +) + +const ( + DEVLINK_PARAM_CMODE_RUNTIME = iota + DEVLINK_PARAM_CMODE_DRIVERINIT + DEVLINK_PARAM_CMODE_PERMANENT +) diff --git a/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go new file mode 100644 index 000000000..d5dd69e0c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go @@ -0,0 +1,21 @@ +package nl + +// id's of route attribute from https://elixir.bootlin.com/linux/v5.17.3/source/include/uapi/linux/lwtunnel.h#L38 +// the value's size are specified in https://elixir.bootlin.com/linux/v5.17.3/source/net/ipv4/ip_tunnel_core.c#L928 + +const ( + LWTUNNEL_IP6_UNSPEC = iota + LWTUNNEL_IP6_ID + LWTUNNEL_IP6_DST + LWTUNNEL_IP6_SRC + LWTUNNEL_IP6_HOPLIMIT + LWTUNNEL_IP6_TC + LWTUNNEL_IP6_FLAGS + LWTUNNEL_IP6_PAD // not implemented + LWTUNNEL_IP6_OPTS // not implemented + __LWTUNNEL_IP6_MAX +) + + + + diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go new file mode 100644 index 000000000..89dd009df --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go @@ -0,0 +1,227 @@ +package nl + +import ( + "strconv" + + "golang.org/x/sys/unix" +) + +const ( + /* The protocol version */ + IPSET_PROTOCOL = 6 + + /* The max length of strings including NUL: set and type identifiers */ + IPSET_MAXNAMELEN = 32 + + /* The maximum permissible comment length we will accept over netlink */ + IPSET_MAX_COMMENT_SIZE = 255 +) + +const ( + _ = iota + IPSET_CMD_PROTOCOL /* 1: Return protocol version */ + IPSET_CMD_CREATE /* 2: Create a new (empty) set */ + IPSET_CMD_DESTROY /* 3: Destroy a (empty) set */ + IPSET_CMD_FLUSH /* 4: Remove all elements from a set */ + IPSET_CMD_RENAME /* 5: Rename a set */ + IPSET_CMD_SWAP /* 6: Swap two sets */ + IPSET_CMD_LIST /* 7: List sets */ + IPSET_CMD_SAVE /* 8: Save sets */ + IPSET_CMD_ADD /* 9: Add an element to a set */ + IPSET_CMD_DEL /* 10: Delete an element from a set */ + IPSET_CMD_TEST /* 11: Test an element in a set */ + IPSET_CMD_HEADER /* 12: Get set header data only */ + IPSET_CMD_TYPE /* 13: Get set type */ +) + +/* Attributes at command level */ +const ( + _ = iota + IPSET_ATTR_PROTOCOL /* 1: Protocol version */ + IPSET_ATTR_SETNAME /* 2: Name of the set */ + IPSET_ATTR_TYPENAME /* 3: Typename */ + IPSET_ATTR_REVISION /* 4: Settype revision */ + IPSET_ATTR_FAMILY /* 5: Settype family */ + IPSET_ATTR_FLAGS /* 6: Flags at command level */ + IPSET_ATTR_DATA /* 7: Nested attributes */ + IPSET_ATTR_ADT /* 8: Multiple data containers */ + IPSET_ATTR_LINENO /* 9: Restore lineno */ + IPSET_ATTR_PROTOCOL_MIN /* 10: Minimal supported version number */ + + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME /* Setname at rename/swap */ + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN /* type rev min */ +) + +/* CADT specific attributes */ +const ( + IPSET_ATTR_IP = 1 + IPSET_ATTR_IP_FROM = 1 + IPSET_ATTR_IP_TO = 2 + IPSET_ATTR_CIDR = 3 + IPSET_ATTR_PORT = 4 + IPSET_ATTR_PORT_FROM = 4 + IPSET_ATTR_PORT_TO = 5 + IPSET_ATTR_TIMEOUT = 6 + IPSET_ATTR_PROTO = 7 + IPSET_ATTR_CADT_FLAGS = 8 + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO /* 9 */ + IPSET_ATTR_MARK = 10 + IPSET_ATTR_MARKMASK = 11 + + /* Reserve empty slots */ + IPSET_ATTR_CADT_MAX = 16 + + /* Create-only specific attributes */ + IPSET_ATTR_GC = 3 + iota + IPSET_ATTR_HASHSIZE + IPSET_ATTR_MAXELEM + IPSET_ATTR_NETMASK + IPSET_ATTR_PROBES + IPSET_ATTR_RESIZE + IPSET_ATTR_SIZE + + /* Kernel-only */ + IPSET_ATTR_ELEMENTS + IPSET_ATTR_REFERENCES + IPSET_ATTR_MEMSIZE + + SET_ATTR_CREATE_MAX +) + +const ( + IPSET_ATTR_IPADDR_IPV4 = 1 + IPSET_ATTR_IPADDR_IPV6 = 2 +) + +/* ADT specific attributes */ +const ( + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1 + IPSET_ATTR_NAME + IPSET_ATTR_NAMEREF + IPSET_ATTR_IP2 + IPSET_ATTR_CIDR2 + IPSET_ATTR_IP2_TO + IPSET_ATTR_IFACE + IPSET_ATTR_BYTES + IPSET_ATTR_PACKETS + IPSET_ATTR_COMMENT + IPSET_ATTR_SKBMARK + IPSET_ATTR_SKBPRIO + IPSET_ATTR_SKBQUEUE +) + +/* Flags at CADT attribute level, upper half of cmdattrs */ +const ( + IPSET_FLAG_BIT_BEFORE = 0 + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE) + IPSET_FLAG_BIT_PHYSDEV = 1 + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV) + IPSET_FLAG_BIT_NOMATCH = 2 + IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH) + IPSET_FLAG_BIT_WITH_COUNTERS = 3 + IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS) + IPSET_FLAG_BIT_WITH_COMMENT = 4 + IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT) + IPSET_FLAG_BIT_WITH_FORCEADD = 5 + IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD) + IPSET_FLAG_BIT_WITH_SKBINFO = 6 + IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO) + IPSET_FLAG_CADT_MAX = 15 +) + +const ( + IPSET_ERR_PRIVATE = 4096 + iota + IPSET_ERR_PROTOCOL + IPSET_ERR_FIND_TYPE + IPSET_ERR_MAX_SETS + IPSET_ERR_BUSY + IPSET_ERR_EXIST_SETNAME2 + IPSET_ERR_TYPE_MISMATCH + IPSET_ERR_EXIST + IPSET_ERR_INVALID_CIDR + IPSET_ERR_INVALID_NETMASK + IPSET_ERR_INVALID_FAMILY + IPSET_ERR_TIMEOUT + IPSET_ERR_REFERENCED + IPSET_ERR_IPADDR_IPV4 + IPSET_ERR_IPADDR_IPV6 + IPSET_ERR_COUNTER + IPSET_ERR_COMMENT + IPSET_ERR_INVALID_MARKMASK + IPSET_ERR_SKBINFO + + /* Type specific error codes */ + IPSET_ERR_TYPE_SPECIFIC = 4352 +) + +type IPSetError uintptr + +func (e IPSetError) Error() string { + switch int(e) { + case IPSET_ERR_PRIVATE: + return "private" + case IPSET_ERR_PROTOCOL: + return "invalid protocol" + case IPSET_ERR_FIND_TYPE: + return "invalid type" + case IPSET_ERR_MAX_SETS: + return "max sets reached" + case IPSET_ERR_BUSY: + return "busy" + case IPSET_ERR_EXIST_SETNAME2: + return "exist_setname2" + case IPSET_ERR_TYPE_MISMATCH: + return "type mismatch" + case IPSET_ERR_EXIST: + return "exist" + case IPSET_ERR_INVALID_CIDR: + return "invalid cidr" + case IPSET_ERR_INVALID_NETMASK: + return "invalid netmask" + case IPSET_ERR_INVALID_FAMILY: + return "invalid family" + case IPSET_ERR_TIMEOUT: + return "timeout" + case IPSET_ERR_REFERENCED: + return "referenced" + case IPSET_ERR_IPADDR_IPV4: + return "invalid ipv4 address" + case IPSET_ERR_IPADDR_IPV6: + return "invalid ipv6 address" + case IPSET_ERR_COUNTER: + return "invalid counter" + case IPSET_ERR_COMMENT: + return "invalid comment" + case IPSET_ERR_INVALID_MARKMASK: + return "invalid markmask" + case IPSET_ERR_SKBINFO: + return "skbinfo" + default: + return "errno " + strconv.Itoa(int(e)) + } +} + +func GetIpsetFlags(cmd int) int { + switch cmd { + case IPSET_CMD_CREATE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_CREATE + case IPSET_CMD_DESTROY, + IPSET_CMD_FLUSH, + IPSET_CMD_RENAME, + IPSET_CMD_SWAP, + IPSET_CMD_TEST: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_LIST, + IPSET_CMD_SAVE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_ROOT | unix.NLM_F_MATCH | unix.NLM_F_DUMP + case IPSET_CMD_ADD, + IPSET_CMD_DEL: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_HEADER, + IPSET_CMD_TYPE, + IPSET_CMD_PROTOCOL: + return unix.NLM_F_REQUEST + default: + return 0 + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index afb16a9c1..0b5be470c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,6 +1,9 @@ package nl import ( + "bytes" + "encoding/binary" + "fmt" "unsafe" ) @@ -28,6 +31,16 @@ const ( IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL ) +const ( + IFLA_NETKIT_UNSPEC = iota + IFLA_NETKIT_PEER_INFO + IFLA_NETKIT_PRIMARY + IFLA_NETKIT_POLICY + IFLA_NETKIT_PEER_POLICY + IFLA_NETKIT_MODE + IFLA_NETKIT_MAX = IFLA_NETKIT_MODE +) + const ( VETH_INFO_UNSPEC = iota VETH_INFO_PEER @@ -83,7 +96,37 @@ const ( IFLA_BRPORT_PROXYARP IFLA_BRPORT_LEARNING_SYNC IFLA_BRPORT_PROXYARP_WIFI - IFLA_BRPORT_MAX = IFLA_BRPORT_PROXYARP_WIFI + IFLA_BRPORT_ROOT_ID + IFLA_BRPORT_BRIDGE_ID + IFLA_BRPORT_DESIGNATED_PORT + IFLA_BRPORT_DESIGNATED_COST + IFLA_BRPORT_ID + IFLA_BRPORT_NO + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK + IFLA_BRPORT_CONFIG_PENDING + IFLA_BRPORT_MESSAGE_AGE_TIMER + IFLA_BRPORT_FORWARD_DELAY_TIMER + IFLA_BRPORT_HOLD_TIMER + IFLA_BRPORT_FLUSH + IFLA_BRPORT_MULTICAST_ROUTER + IFLA_BRPORT_PAD + IFLA_BRPORT_MCAST_FLOOD + IFLA_BRPORT_MCAST_TO_UCAST + IFLA_BRPORT_VLAN_TUNNEL + IFLA_BRPORT_BCAST_FLOOD + IFLA_BRPORT_GROUP_FWD_MASK + IFLA_BRPORT_NEIGH_SUPPRESS + IFLA_BRPORT_ISOLATED + IFLA_BRPORT_BACKUP_PORT + IFLA_BRPORT_MRP_RING_OPEN + IFLA_BRPORT_MRP_IN_OPEN + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT + IFLA_BRPORT_LOCKED + IFLA_BRPORT_MAB + IFLA_BRPORT_MCAST_N_GROUPS + IFLA_BRPORT_MCAST_MAX_GROUPS + IFLA_BRPORT_MAX = IFLA_BRPORT_MCAST_MAX_GROUPS ) const ( @@ -101,7 +144,9 @@ const ( IFLA_MACVLAN_MACADDR IFLA_MACVLAN_MACADDR_DATA IFLA_MACVLAN_MACADDR_COUNT - IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_BC_QUEUE_LEN + IFLA_MACVLAN_BC_QUEUE_LEN_USED + IFLA_MACVLAN_MAX = IFLA_MACVLAN_BC_QUEUE_LEN_USED ) const ( @@ -171,6 +216,25 @@ const ( IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE ) +const ( + IFLA_GENEVE_UNSPEC = iota + IFLA_GENEVE_ID // vni + IFLA_GENEVE_REMOTE + IFLA_GENEVE_TTL + IFLA_GENEVE_TOS + IFLA_GENEVE_PORT // destination port + IFLA_GENEVE_COLLECT_METADATA + IFLA_GENEVE_REMOTE6 + IFLA_GENEVE_UDP_CSUM + IFLA_GENEVE_UDP_ZERO_CSUM6_TX + IFLA_GENEVE_UDP_ZERO_CSUM6_RX + IFLA_GENEVE_LABEL + IFLA_GENEVE_TTL_INHERIT + IFLA_GENEVE_DF + IFLA_GENEVE_INNER_PROTO_INHERIT + IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT +) + const ( IFLA_GRE_UNSPEC = iota IFLA_GRE_LINK @@ -226,7 +290,15 @@ const ( IFLA_VF_TRUST /* Trust state of VF */ IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */ - IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID + IFLA_VF_VLAN_LIST /* nested list of vlans, option for QinQ */ + + IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID +) + +const ( + IFLA_VF_VLAN_INFO_UNSPEC = iota + IFLA_VF_VLAN_INFO /* VLAN ID, QoS and VLAN protocol */ + __IFLA_VF_VLAN_INFO_MAX ) const ( @@ -243,12 +315,15 @@ const ( IFLA_VF_STATS_TX_BYTES IFLA_VF_STATS_BROADCAST IFLA_VF_STATS_MULTICAST - IFLA_VF_STATS_MAX = IFLA_VF_STATS_MULTICAST + IFLA_VF_STATS_RX_DROPPED + IFLA_VF_STATS_TX_DROPPED + IFLA_VF_STATS_MAX = IFLA_VF_STATS_TX_DROPPED ) const ( SizeofVfMac = 0x24 SizeofVfVlan = 0x0c + SizeofVfVlanInfo = 0x10 SizeofVfTxRate = 0x08 SizeofVfRate = 0x0c SizeofVfSpoofchk = 0x08 @@ -304,6 +379,49 @@ func (msg *VfVlan) Serialize() []byte { return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:] } +func DeserializeVfVlanList(b []byte) ([]*VfVlanInfo, error) { + var vfVlanInfoList []*VfVlanInfo + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, element := range attrs { + if element.Attr.Type == IFLA_VF_VLAN_INFO { + vfVlanInfoList = append(vfVlanInfoList, DeserializeVfVlanInfo(element.Value)) + } + } + + if len(vfVlanInfoList) == 0 { + return nil, fmt.Errorf("VF vlan list is defined but no vf vlan info elements were found") + } + + return vfVlanInfoList, nil +} + +// struct ifla_vf_vlan_info { +// __u32 vf; +// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ +// __u32 qos; +// __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ +// }; + +type VfVlanInfo struct { + VfVlan + VlanProto uint16 +} + +func DeserializeVfVlanInfo(b []byte) *VfVlanInfo { + return &VfVlanInfo{ + *(*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])), + binary.BigEndian.Uint16(b[SizeofVfVlan:SizeofVfVlanInfo]), + } +} + +func (msg *VfVlanInfo) Serialize() []byte { + return (*(*[SizeofVfVlanInfo]byte)(unsafe.Pointer(msg)))[:] +} + // struct ifla_vf_tx_rate { // __u32 vf; // __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ @@ -326,6 +444,59 @@ func (msg *VfTxRate) Serialize() []byte { return (*(*[SizeofVfTxRate]byte)(unsafe.Pointer(msg)))[:] } +//struct ifla_vf_stats { +// __u64 rx_packets; +// __u64 tx_packets; +// __u64 rx_bytes; +// __u64 tx_bytes; +// __u64 broadcast; +// __u64 multicast; +//}; + +type VfStats struct { + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 +} + +func DeserializeVfStats(b []byte) VfStats { + var vfstat VfStats + stats, err := ParseRouteAttr(b) + if err != nil { + return vfstat + } + var valueVar uint64 + for _, stat := range stats { + if err := binary.Read(bytes.NewBuffer(stat.Value), NativeEndian(), &valueVar); err != nil { + break + } + switch stat.Attr.Type { + case IFLA_VF_STATS_RX_PACKETS: + vfstat.RxPackets = valueVar + case IFLA_VF_STATS_TX_PACKETS: + vfstat.TxPackets = valueVar + case IFLA_VF_STATS_RX_BYTES: + vfstat.RxBytes = valueVar + case IFLA_VF_STATS_TX_BYTES: + vfstat.TxBytes = valueVar + case IFLA_VF_STATS_MULTICAST: + vfstat.Multicast = valueVar + case IFLA_VF_STATS_BROADCAST: + vfstat.Broadcast = valueVar + case IFLA_VF_STATS_RX_DROPPED: + vfstat.RxDropped = valueVar + case IFLA_VF_STATS_TX_DROPPED: + vfstat.TxDropped = valueVar + } + } + return vfstat +} + // struct ifla_vf_rate { // __u32 vf; // __u32 min_tx_rate; /* Min Bandwidth in Mbps */ @@ -478,6 +649,14 @@ const ( IFLA_XDP_MAX = IFLA_XDP_PROG_ID ) +// XDP program attach mode (used as dump value for IFLA_XDP_ATTACHED) +const ( + XDP_ATTACHED_NONE = iota + XDP_ATTACHED_DRV + XDP_ATTACHED_SKB + XDP_ATTACHED_HW +) + const ( IFLA_IPTUN_UNSPEC = iota IFLA_IPTUN_LINK @@ -608,3 +787,32 @@ const ( IFLA_IPOIB_UMCAST IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST ) + +const ( + IFLA_CAN_UNSPEC = iota + IFLA_CAN_BITTIMING + IFLA_CAN_BITTIMING_CONST + IFLA_CAN_CLOCK + IFLA_CAN_STATE + IFLA_CAN_CTRLMODE + IFLA_CAN_RESTART_MS + IFLA_CAN_RESTART + IFLA_CAN_BERR_COUNTER + IFLA_CAN_DATA_BITTIMING + IFLA_CAN_DATA_BITTIMING_CONST + IFLA_CAN_TERMINATION + IFLA_CAN_TERMINATION_CONST + IFLA_CAN_BITRATE_CONST + IFLA_CAN_DATA_BITRATE_CONST + IFLA_CAN_BITRATE_MAX + IFLA_CAN_MAX = IFLA_CAN_BITRATE_MAX +) + +const ( + IFLA_BAREUDP_UNSPEC = iota + IFLA_BAREUDP_PORT + IFLA_BAREUDP_ETHERTYPE + IFLA_BAREUDP_SRCPORT_MIN + IFLA_BAREUDP_MULTIPROTO_MODE + IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE +) diff --git a/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go b/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go new file mode 100644 index 000000000..bafd593c4 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go @@ -0,0 +1,29 @@ +package nl + +const ( + LWT_BPF_PROG_UNSPEC = iota + LWT_BPF_PROG_FD + LWT_BPF_PROG_NAME + __LWT_BPF_PROG_MAX +) + +const ( + LWT_BPF_PROG_MAX = __LWT_BPF_PROG_MAX - 1 +) + +const ( + LWT_BPF_UNSPEC = iota + LWT_BPF_IN + LWT_BPF_OUT + LWT_BPF_XMIT + LWT_BPF_XMIT_HEADROOM + __LWT_BPF_MAX +) + +const ( + LWT_BPF_MAX = __LWT_BPF_MAX - 1 +) + +const ( + LWT_BPF_MAX_HEADROOM = 256 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index aaf56c671..6cecc4517 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "fmt" "net" + "os" "runtime" "sync" "sync/atomic" @@ -27,7 +28,8 @@ const ( // tc rules or filters, or other more memory requiring data. RECEIVE_BUFFER_SIZE = 65536 // Kernel netlink pid - PidKernel uint32 = 0 + PidKernel uint32 = 0 + SizeofCnMsgOp = 0x18 ) // SupportedNlFamilies contains the list of netlink families this netlink package supports @@ -35,6 +37,12 @@ var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETL var nextSeqNr uint32 +// Default netlink socket timeout, 60s +var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} + +// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets +var EnableErrorMessageReporting bool = false + // GetIPFamily returns the family type of a net.IP. func GetIPFamily(ip net.IP) int { if len(ip) <= net.IPv4len { @@ -77,11 +85,69 @@ func Swap32(i uint32) uint32 { return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 } +const ( + NLMSGERR_ATTR_UNUSED = 0 + NLMSGERR_ATTR_MSG = 1 + NLMSGERR_ATTR_OFFS = 2 + NLMSGERR_ATTR_COOKIE = 3 + NLMSGERR_ATTR_POLICY = 4 +) + type NetlinkRequestData interface { Len() int Serialize() []byte } +const ( + PROC_CN_MCAST_LISTEN = 1 + PROC_CN_MCAST_IGNORE +) + +type CbID struct { + Idx uint32 + Val uint32 +} + +type CnMsg struct { + ID CbID + Seq uint32 + Ack uint32 + Length uint16 + Flags uint16 +} + +type CnMsgOp struct { + CnMsg + // here we differ from the C header + Op uint32 +} + +func NewCnMsg(idx, val, op uint32) *CnMsgOp { + var cm CnMsgOp + + cm.ID.Idx = idx + cm.ID.Val = val + + cm.Ack = 0 + cm.Seq = 1 + cm.Length = uint16(binary.Size(op)) + cm.Op = op + + return &cm +} + +func (msg *CnMsgOp) Serialize() []byte { + return (*(*[SizeofCnMsgOp]byte)(unsafe.Pointer(msg)))[:] +} + +func DeserializeCnMsgOp(b []byte) *CnMsgOp { + return (*CnMsgOp)(unsafe.Pointer(&b[0:SizeofCnMsgOp][0])) +} + +func (msg *CnMsgOp) Len() int { + return SizeofCnMsgOp +} + // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { unix.IfInfomsg @@ -249,6 +315,12 @@ func (msg *IfInfomsg) EncapType() string { return fmt.Sprintf("unknown%d", msg.Type) } +// Round the length of a netlink message up to align it properly. +// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license. +func nlmAlignOf(msglen int) int { + return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) +} + func rtaAlignOf(attrlen int) int { return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) } @@ -259,6 +331,42 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { return msg } +type Uint32Bitfield struct { + Value uint32 + Selector uint32 +} + +func (a *Uint32Bitfield) Serialize() []byte { + return (*(*[SizeofUint32Bitfield]byte)(unsafe.Pointer(a)))[:] +} + +func DeserializeUint32Bitfield(data []byte) *Uint32Bitfield { + return (*Uint32Bitfield)(unsafe.Pointer(&data[0:SizeofUint32Bitfield][0])) +} + +type Uint32Attribute struct { + Type uint16 + Value uint32 +} + +func (a *Uint32Attribute) Serialize() []byte { + native := NativeEndian() + buf := make([]byte, rtaAlignOf(8)) + native.PutUint16(buf[0:2], 8) + native.PutUint16(buf[2:4], a.Type) + + if a.Type&NLA_F_NET_BYTEORDER != 0 { + binary.BigEndian.PutUint32(buf[4:], a.Value) + } else { + native.PutUint32(buf[4:], a.Value) + } + return buf +} + +func (a *Uint32Attribute) Len() int { + return 8 +} + // Extend RtAttr to handle data and children type RtAttr struct { unix.RtAttr @@ -381,10 +489,30 @@ func (req *NetlinkRequest) AddRawData(data []byte) { req.RawData = append(req.RawData, data...) } -// Execute the request against a the given sockType. +// Execute the request against the given sockType. // Returns a list of netlink messages in serialized format, optionally filtered // by resType. func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { + var res [][]byte + err := req.ExecuteIter(sockType, resType, func(msg []byte) bool { + res = append(res, msg) + return true + }) + if err != nil { + return nil, err + } + return res, nil +} + +// ExecuteIter executes the request against the given sockType. +// Calls the provided callback func once for each netlink message. +// If the callback returns false, it is not called again, but +// the remaining messages are consumed/discarded. +// +// Thread safety: ExecuteIter holds a lock on the socket until +// it finishes iteration so the callback must not call back into +// the netlink API. +func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg []byte) bool) error { var ( s *NetlinkSocket err error @@ -401,8 +529,21 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro if s == nil { s, err = getNetlinkSocket(sockType) if err != nil { - return nil, err + return err } + + if err := s.SetSendTimeout(&SocketTimeoutTv); err != nil { + return err + } + if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil { + return err + } + if EnableErrorMessageReporting { + if err := s.SetExtAck(true); err != nil { + return err + } + } + defer s.Close() } else { s.Lock() @@ -410,56 +551,94 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro } if err := s.Send(req); err != nil { - return nil, err + return err } pid, err := s.GetPid() if err != nil { - return nil, err + return err } - var res [][]byte - done: for { msgs, from, err := s.Receive() if err != nil { - return nil, err + return err } if from.Pid != PidKernel { - return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) } for _, m := range msgs { if m.Header.Seq != req.Seq { if sharedSocket { continue } - return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) + return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) } if m.Header.Pid != pid { continue } - if m.Header.Type == unix.NLMSG_DONE { - break done + + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + return syscall.Errno(unix.EINTR) } - if m.Header.Type == unix.NLMSG_ERROR { + + if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { + // NLMSG_DONE might have no payload, if so assume no error. + if m.Header.Type == unix.NLMSG_DONE && len(m.Data) == 0 { + break done + } + native := NativeEndian() - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + errno := int32(native.Uint32(m.Data[0:4])) + if errno == 0 { break done } - return nil, syscall.Errno(-error) + var err error + err = syscall.Errno(-errno) + + unreadData := m.Data[4:] + if m.Header.Flags&unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + // Skip the echoed request message. + echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) + unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] + + // Annotate `err` using nlmsgerr attributes. + for len(unreadData) >= syscall.SizeofRtAttr { + attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0])) + attrData := unreadData[syscall.SizeofRtAttr:attr.Len] + + switch attr.Type { + case NLMSGERR_ATTR_MSG: + err = fmt.Errorf("%w: %s", err, unix.ByteSliceToString(attrData)) + default: + // TODO: handle other NLMSGERR_ATTR types + } + + unreadData = unreadData[rtaAlignOf(int(attr.Len)):] + } + } + + return err } if resType != 0 && m.Header.Type != resType { continue } - res = append(res, m.Data) + if cont := f(m.Data); !cont { + // Drain the rest of the messages from the kernel but don't + // pass them to the iterator func. + f = dummyMsgIterFunc + } if m.Header.Flags&unix.NLM_F_MULTI == 0 { break done } } } - return res, nil + return nil +} + +func dummyMsgIterFunc(msg []byte) bool { + return true } // Create a new netlink request from proto and flags @@ -477,8 +656,9 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { } type NetlinkSocket struct { - fd int32 - lsa unix.SockaddrNetlink + fd int32 + file *os.File + lsa unix.SockaddrNetlink sync.Mutex } @@ -487,8 +667,13 @@ func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { if err != nil { return nil, err } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } s := &NetlinkSocket{ - fd: int32(fd), + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), } s.lsa.Family = unix.AF_NETLINK if err := unix.Bind(fd, &s.lsa); err != nil { @@ -519,12 +704,14 @@ func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSock // In case of success, the caller is expected to execute the returned function // at the end of the code that needs to be executed in the network namespace. // Example: -// func jobAt(...) error { -// d, err := executeInNetns(...) -// if err != nil { return err} -// defer d() -// < code which needs to be executed in specific netns> -// } +// +// func jobAt(...) error { +// d, err := executeInNetns(...) +// if err != nil { return err} +// defer d() +// < code which needs to be executed in specific netns> +// } +// // TODO: his function probably belongs to netns pkg. func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { var ( @@ -573,8 +760,13 @@ func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { if err != nil { return nil, err } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } s := &NetlinkSocket{ - fd: int32(fd), + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), } s.lsa.Family = unix.AF_NETLINK @@ -603,33 +795,36 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne } func (s *NetlinkSocket) Close() { - fd := int(atomic.SwapInt32(&s.fd, -1)) - unix.Close(fd) + s.file.Close() } func (s *NetlinkSocket) GetFd() int { - return int(atomic.LoadInt32(&s.fd)) + return int(s.fd) } func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - fd := int(atomic.LoadInt32(&s.fd)) - if fd < 0 { - return fmt.Errorf("Send called on a closed socket") - } - if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { - return err - } - return nil + return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa) } func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { - fd := int(atomic.LoadInt32(&s.fd)) - if fd < 0 { - return nil, nil, fmt.Errorf("Receive called on a closed socket") + rawConn, err := s.file.SyscallConn() + if err != nil { + return nil, nil, err + } + var ( + fromAddr *unix.SockaddrNetlink + rb [RECEIVE_BUFFER_SIZE]byte + nr int + from unix.Sockaddr + innerErr error + ) + err = rawConn.Read(func(fd uintptr) (done bool) { + nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0) + return innerErr != unix.EWOULDBLOCK + }) + if innerErr != nil { + err = innerErr } - var fromAddr *unix.SockaddrNetlink - var rb [RECEIVE_BUFFER_SIZE]byte - nr, from, err := unix.Recvfrom(fd, rb[:], 0) if err != nil { return nil, nil, err } @@ -640,8 +835,9 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli if nr < unix.NLMSG_HDRLEN { return nil, nil, fmt.Errorf("Got short response from netlink") } - rb2 := make([]byte, nr) - copy(rb2, rb[:nr]) + msgLen := nlmAlignOf(nr) + rb2 := make([]byte, msgLen) + copy(rb2, rb[:msgLen]) nl, err := syscall.ParseNetlinkMessage(rb2) if err != nil { return nil, nil, err @@ -663,9 +859,27 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) } +// SetReceiveBufferSize allows to set a receive buffer size on the socket +func (s *NetlinkSocket) SetReceiveBufferSize(size int, force bool) error { + opt := unix.SO_RCVBUF + if force { + opt = unix.SO_RCVBUFFORCE + } + return unix.SetsockoptInt(int(s.fd), unix.SOL_SOCKET, opt, size) +} + +// SetExtAck requests error messages to be reported on the socket +func (s *NetlinkSocket) SetExtAck(enable bool) error { + var enableN int + if enable { + enableN = 1 + } + + return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN) +} + func (s *NetlinkSocket) GetPid() (uint32, error) { - fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := unix.Getsockname(fd) + lsa, err := unix.Getsockname(int(s.fd)) if err != nil { return 0, err } @@ -709,6 +923,12 @@ func Uint16Attr(v uint16) []byte { return bytes } +func BEUint16Attr(v uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, v) + return bytes +} + func Uint32Attr(v uint32) []byte { native := NativeEndian() bytes := make([]byte, 4) @@ -716,6 +936,12 @@ func Uint32Attr(v uint32) []byte { return bytes } +func BEUint32Attr(v uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, v) + return bytes +} + func Uint64Attr(v uint64) []byte { native := NativeEndian() bytes := make([]byte, 8) @@ -723,6 +949,12 @@ func Uint64Attr(v uint64) []byte { return bytes } +func BEUint64Attr(v uint64) []byte { + bytes := make([]byte, 8) + binary.BigEndian.PutUint64(bytes, v) + return bytes +} + func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr for len(b) >= unix.SizeofRtAttr { @@ -737,6 +969,22 @@ func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { return attrs, nil } +// ParseRouteAttrAsMap parses provided buffer that contains raw RtAttrs and returns a map of parsed +// atttributes indexed by attribute type or error if occured. +func ParseRouteAttrAsMap(b []byte) (map[uint16]syscall.NetlinkRouteAttr, error) { + attrMap := make(map[uint16]syscall.NetlinkRouteAttr) + + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + attrMap[attr.Attr.Type] = attr + } + return attrMap, nil +} + func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { diff --git a/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go new file mode 100644 index 000000000..7f49125cf --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go @@ -0,0 +1,79 @@ +package nl + +import ( + "encoding/binary" + "fmt" + "log" +) + +type Attribute struct { + Type uint16 + Value []byte +} + +func ParseAttributes(data []byte) <-chan Attribute { + native := NativeEndian() + result := make(chan Attribute) + + go func() { + i := 0 + for i+4 < len(data) { + length := int(native.Uint16(data[i : i+2])) + attrType := native.Uint16(data[i+2 : i+4]) + + if length < 4 { + log.Printf("attribute 0x%02x has invalid length of %d bytes", attrType, length) + break + } + + if len(data) < i+length { + log.Printf("attribute 0x%02x of length %d is truncated, only %d bytes remaining", attrType, length, len(data)-i) + break + } + + result <- Attribute{ + Type: attrType, + Value: data[i+4 : i+length], + } + i += rtaAlignOf(length) + } + close(result) + }() + + return result +} + +func PrintAttributes(data []byte) { + printAttributes(data, 0) +} + +func printAttributes(data []byte, level int) { + for attr := range ParseAttributes(data) { + for i := 0; i < level; i++ { + print("> ") + } + nested := attr.Type&NLA_F_NESTED != 0 + fmt.Printf("type=%d nested=%v len=%v %v\n", attr.Type&NLA_TYPE_MASK, nested, len(attr.Value), attr.Value) + if nested { + printAttributes(attr.Value, level+1) + } + } +} + +// Uint32 returns the uint32 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint32() uint32 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint32(attr.Value) + } else { + return NativeEndian().Uint32(attr.Value) + } +} + +// Uint64 returns the uint64 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint64() uint64 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint64(attr.Value) + } else { + return NativeEndian().Uint64(attr.Value) + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go index 1224b747d..ce43ee155 100644 --- a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -11,6 +11,8 @@ const ( const ( RDMA_NLDEV_CMD_GET = 1 RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_NEWLINK = 3 + RDMA_NLDEV_CMD_DELLINK = 4 RDMA_NLDEV_CMD_SYS_GET = 6 RDMA_NLDEV_CMD_SYS_SET = 7 ) @@ -30,6 +32,8 @@ const ( RDMA_NLDEV_ATTR_PORT_STATE = 12 RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_ATTR_NDEV_NAME = 51 + RDMA_NLDEV_ATTR_LINK_TYPE = 65 RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 RDMA_NLDEV_NET_NS_FD = 68 ) diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index 03c1900ff..c26f3bf91 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -48,7 +48,9 @@ type RtNexthop struct { } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) + return &RtNexthop{ + RtNexthop: *((*unix.RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))), + } } func (msg *RtNexthop) Len() int { diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go index 5774cbb15..fe88285f2 100644 --- a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -23,7 +23,7 @@ func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { return false } for i := range s1.Segments { - if s1.Segments[i].Equal(s2.Segments[i]) != true { + if !s1.Segments[i].Equal(s2.Segments[i]) { return false } } @@ -89,7 +89,7 @@ func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { } buf = buf[12:] if len(buf)%16 != 0 { - err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) + err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)", len(buf)) return mode, nil, err } for len(buf) > 0 { diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go index 150017726..8172b8471 100644 --- a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -12,6 +12,7 @@ const ( SEG6_LOCAL_NH6 SEG6_LOCAL_IIF SEG6_LOCAL_OIF + SEG6_LOCAL_BPF __SEG6_LOCAL_MAX ) const ( @@ -34,6 +35,7 @@ const ( SEG6_LOCAL_ACTION_END_S // 12 SEG6_LOCAL_ACTION_END_AS // 13 SEG6_LOCAL_ACTION_END_AM // 14 + SEG6_LOCAL_ACTION_END_BPF // 15 __SEG6_LOCAL_ACTION_MAX ) const ( @@ -71,6 +73,8 @@ func SEG6LocalActionString(action int) string { return "End.AS" case SEG6_LOCAL_ACTION_END_AM: return "End.AM" + case SEG6_LOCAL_ACTION_END_BPF: + return "End.BPF" } return "unknown" } diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index f7f7f92e6..b5ba039ac 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -1,6 +1,6 @@ package nl -// syscall package lack of rule atributes type. +// syscall package lack of rule attributes type. // Thus there are defined below const ( FRA_UNSPEC = iota @@ -21,6 +21,13 @@ const ( FRA_TABLE /* Extended table id */ FRA_FWMASK /* mask for netfilter mark */ FRA_OIFNAME + FRA_PAD + FRA_L3MDEV /* iif or oif is l3mdev goto its table */ + FRA_UID_RANGE /* UID range */ + FRA_PROTOCOL /* Originator of the rule */ + FRA_IP_PROTO /* ip proto */ + FRA_SPORT_RANGE /* sport */ + FRA_DPORT_RANGE /* dport */ ) // ip rule netlink request types @@ -39,6 +46,7 @@ const ( // socket diags related const ( SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */ + SOCK_DESTROY = 21 TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ ) diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 501f554b2..0720729a9 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -1,8 +1,13 @@ package nl import ( + "bytes" "encoding/binary" + "fmt" + "net" "unsafe" + + "golang.org/x/sys/unix" ) // LinkLayer @@ -42,7 +47,14 @@ const ( TCA_FCNT TCA_STATS2 TCA_STAB - TCA_MAX = TCA_STAB + TCA_PAD + TCA_DUMP_INVISIBLE + TCA_CHAIN + TCA_HW_OFFLOAD + TCA_INGRESS_BLOCK + TCA_EGRESS_BLOCK + TCA_DUMP_FLAGS + TCA_MAX = TCA_DUMP_FLAGS ) const ( @@ -56,6 +68,12 @@ const ( TCA_ACT_OPTIONS TCA_ACT_INDEX TCA_ACT_STATS + TCA_ACT_PAD + TCA_ACT_COOKIE + TCA_ACT_FLAGS + TCA_ACT_HW_STATS + TCA_ACT_USED_HW_STATS + TCA_ACT_IN_HW_COUNT TCA_ACT_MAX ) @@ -71,7 +89,11 @@ const ( TCA_STATS_RATE_EST TCA_STATS_QUEUE TCA_STATS_APP - TCA_STATS_MAX = TCA_STATS_APP + TCA_STATS_RATE_EST64 + TCA_STATS_PAD + TCA_STATS_BASIC_HW + TCA_STATS_PKT64 + TCA_STATS_MAX = TCA_STATS_PKT64 ) const ( @@ -83,17 +105,23 @@ const ( SizeofTcNetemCorr = 0x0c SizeofTcNetemReorder = 0x08 SizeofTcNetemCorrupt = 0x08 + SizeOfTcNetemRate = 0x10 SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14 SizeofTcHtbGlob = 0x14 SizeofTcU32Key = 0x10 SizeofTcU32Sel = 0x10 // without keys - SizeofTcGen = 0x14 + SizeofTcGen = 0x16 SizeofTcConnmark = SizeofTcGen + 0x04 + SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 SizeofTcTunnelKey = SizeofTcGen + 0x04 SizeofTcSkbEdit = SizeofTcGen SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 + SizeofTcSfqQopt = 0x0b + SizeofTcSfqRedStats = 0x18 + SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c + SizeofUint32Bitfield = 0x8 ) // struct tcmsg { @@ -127,6 +155,18 @@ func (x *TcMsg) Serialize() []byte { return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] } +type Tcf struct { + Install uint64 + LastUse uint64 + Expires uint64 + FirstUse uint64 +} + +func DeserializeTcf(b []byte) *Tcf { + const size = int(unsafe.Sizeof(Tcf{})) + return (*Tcf)(unsafe.Pointer(&b[0:size][0])) +} + // struct tcamsg { // unsigned char tca_family; // unsigned char tca__pad1; @@ -333,6 +373,26 @@ func (x *TcNetemCorrupt) Serialize() []byte { return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:] } +// TcNetemRate is a struct that represents the rate of a netem qdisc +type TcNetemRate struct { + Rate uint32 + PacketOverhead int32 + CellSize uint32 + CellOverhead int32 +} + +func (msg *TcNetemRate) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcNetemRate(b []byte) *TcNetemRate { + return (*TcNetemRate)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (msg *TcNetemRate) Serialize() []byte { + return (*(*[SizeOfTcNetemRate]byte)(unsafe.Pointer(msg)))[:] +} + // struct tc_tbf_qopt { // struct tc_ratespec rate; // struct tc_ratespec peakrate; @@ -691,6 +751,36 @@ func (x *TcConnmark) Serialize() []byte { return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_CSUM_UNSPEC = iota + TCA_CSUM_PARMS + TCA_CSUM_TM + TCA_CSUM_PAD + TCA_CSUM_MAX = TCA_CSUM_PAD +) + +// struct tc_csum { +// tc_gen; +// __u32 update_flags; +// } + +type TcCsum struct { + TcGen + UpdateFlags uint32 +} + +func (msg *TcCsum) Len() int { + return SizeofTcCsum +} + +func DeserializeTcCsum(b []byte) *TcCsum { + return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0])) +} + +func (x *TcCsum) Serialize() []byte { + return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_ACT_MIRRED = 8 ) @@ -735,7 +825,13 @@ const ( TCA_TUNNEL_KEY_ENC_IPV6_SRC TCA_TUNNEL_KEY_ENC_IPV6_DST TCA_TUNNEL_KEY_ENC_KEY_ID - TCA_TUNNEL_KEY_MAX = TCA_TUNNEL_KEY_ENC_KEY_ID + TCA_TUNNEL_KEY_PAD + TCA_TUNNEL_KEY_ENC_DST_PORT + TCA_TUNNEL_KEY_NO_CSUM + TCA_TUNNEL_KEY_ENC_OPTS + TCA_TUNNEL_KEY_ENC_TOS + TCA_TUNNEL_KEY_ENC_TTL + TCA_TUNNEL_KEY_MAX ) type TcTunnelKey struct { @@ -764,7 +860,8 @@ const ( TCA_SKBEDIT_MARK TCA_SKBEDIT_PAD TCA_SKBEDIT_PTYPE - TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK + TCA_SKBEDIT_MASK + TCA_SKBEDIT_MAX ) type TcSkbEdit struct { @@ -851,6 +948,10 @@ const ( TCA_FQ_FLOW_REFILL_DELAY // flow credit refill delay in usec TCA_FQ_ORPHAN_MASK // mask applied to orphaned skb hashes TCA_FQ_LOW_RATE_THRESHOLD // per packet delay under this rate + TCA_FQ_CE_THRESHOLD // DCTCP-like CE-marking threshold + TCA_FQ_TIMER_SLACK // timer slack + TCA_FQ_HORIZON // time horizon in us + TCA_FQ_HORIZON_DROP // drop packets beyond horizon, or cap their EDT ) const ( @@ -872,3 +973,639 @@ const ( TCA_HFSC_FSC TCA_HFSC_USC ) + +const ( + TCA_FLOWER_UNSPEC = iota + TCA_FLOWER_CLASSID + TCA_FLOWER_INDEV + TCA_FLOWER_ACT + TCA_FLOWER_KEY_ETH_DST /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_DST_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_TYPE /* be16 */ + TCA_FLOWER_KEY_IP_PROTO /* u8 */ + TCA_FLOWER_KEY_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_TCP_SRC /* be16 */ + TCA_FLOWER_KEY_TCP_DST /* be16 */ + TCA_FLOWER_KEY_UDP_SRC /* be16 */ + TCA_FLOWER_KEY_UDP_DST /* be16 */ + + TCA_FLOWER_FLAGS + TCA_FLOWER_KEY_VLAN_ID /* be16 */ + TCA_FLOWER_KEY_VLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_VLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_KEY_ID /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST_MASK /* struct in6_addr */ + + TCA_FLOWER_KEY_TCP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_TCP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_DST_MASK /* be16 */ + + TCA_FLOWER_KEY_SCTP_SRC /* be16 */ + TCA_FLOWER_KEY_SCTP_DST /* be16 */ + + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK /* be16 */ + + TCA_FLOWER_KEY_FLAGS /* be32 */ + TCA_FLOWER_KEY_FLAGS_MASK /* be32 */ + + TCA_FLOWER_KEY_ICMPV4_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE_MASK /* u8 */ + + TCA_FLOWER_KEY_ARP_SIP /* be32 */ + TCA_FLOWER_KEY_ARP_SIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_TIP /* be32 */ + TCA_FLOWER_KEY_ARP_TIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_OP /* u8 */ + TCA_FLOWER_KEY_ARP_OP_MASK /* u8 */ + TCA_FLOWER_KEY_ARP_SHA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_SHA_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA_MASK /* ETH_ALEN */ + + TCA_FLOWER_KEY_MPLS_TTL /* u8 - 8 bits */ + TCA_FLOWER_KEY_MPLS_BOS /* u8 - 1 bit */ + TCA_FLOWER_KEY_MPLS_TC /* u8 - 3 bits */ + TCA_FLOWER_KEY_MPLS_LABEL /* be32 - 20 bits */ + + TCA_FLOWER_KEY_TCP_FLAGS /* be16 */ + TCA_FLOWER_KEY_TCP_FLAGS_MASK /* be16 */ + + TCA_FLOWER_KEY_IP_TOS /* u8 */ + TCA_FLOWER_KEY_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_IP_TTL /* u8 */ + TCA_FLOWER_KEY_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_CVLAN_ID /* be16 */ + TCA_FLOWER_KEY_CVLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_CVLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_IP_TOS /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_ENC_OPTS + TCA_FLOWER_KEY_ENC_OPTS_MASK + + __TCA_FLOWER_MAX +) + +const TCA_CLS_FLAGS_SKIP_HW = 1 << 0 /* don't offload filter to HW */ +const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */ + +// struct tc_sfq_qopt { +// unsigned quantum; /* Bytes per round allocated to flow */ +// int perturb_period; /* Period of hash perturbation */ +// __u32 limit; /* Maximal packets in queue */ +// unsigned divisor; /* Hash divisor */ +// unsigned flows; /* Maximal number of flows */ +// }; + +type TcSfqQopt struct { + Quantum uint8 + Perturb int32 + Limit uint32 + Divisor uint8 + Flows uint8 +} + +func (x *TcSfqQopt) Len() int { + return SizeofTcSfqQopt +} + +func DeserializeTcSfqQopt(b []byte) *TcSfqQopt { + return (*TcSfqQopt)(unsafe.Pointer(&b[0:SizeofTcSfqQopt][0])) +} + +func (x *TcSfqQopt) Serialize() []byte { + return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfqred_stats { +// __u32 prob_drop; /* Early drops, below max threshold */ +// __u32 forced_drop; /* Early drops, after max threshold */ +// __u32 prob_mark; /* Marked packets, below max threshold */ +// __u32 forced_mark; /* Marked packets, after max threshold */ +// __u32 prob_mark_head; /* Marked packets, below max threshold */ +// __u32 forced_mark_head;/* Marked packets, after max threshold */ +// }; +type TcSfqRedStats struct { + ProbDrop uint32 + ForcedDrop uint32 + ProbMark uint32 + ForcedMark uint32 + ProbMarkHead uint32 + ForcedMarkHead uint32 +} + +func (x *TcSfqRedStats) Len() int { + return SizeofTcSfqRedStats +} + +func DeserializeTcSfqRedStats(b []byte) *TcSfqRedStats { + return (*TcSfqRedStats)(unsafe.Pointer(&b[0:SizeofTcSfqRedStats][0])) +} + +func (x *TcSfqRedStats) Serialize() []byte { + return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfq_qopt_v1 { +// struct tc_sfq_qopt v0; +// unsigned int depth; /* max number of packets per flow */ +// unsigned int headdrop; +// +// /* SFQRED parameters */ +// +// __u32 limit; /* HARD maximal flow queue length (bytes) */ +// __u32 qth_min; /* Min average length threshold (bytes) */ +// __u32 qth_max; /* Max average length threshold (bytes) */ +// unsigned char Wlog; /* log(W) */ +// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ +// unsigned char Scell_log; /* cell size for idle damping */ +// unsigned char flags; +// __u32 max_P; /* probability, high resolution */ +// +// /* SFQRED stats */ +// +// struct tc_sfqred_stats stats; +// }; +type TcSfqQoptV1 struct { + TcSfqQopt + Depth uint32 + HeadDrop uint32 + Limit uint32 + QthMin uint32 + QthMax uint32 + Wlog byte + Plog byte + ScellLog byte + Flags byte + MaxP uint32 + TcSfqRedStats +} + +func (x *TcSfqQoptV1) Len() int { + return SizeofTcSfqQoptV1 +} + +func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 { + return (*TcSfqQoptV1)(unsafe.Pointer(&b[0:SizeofTcSfqQoptV1][0])) +} + +func (x *TcSfqQoptV1) Serialize() []byte { + return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:] +} + +// IPProto represents Flower ip_proto attribute +type IPProto uint8 + +const ( + IPPROTO_TCP IPProto = unix.IPPROTO_TCP + IPPROTO_UDP IPProto = unix.IPPROTO_UDP + IPPROTO_SCTP IPProto = unix.IPPROTO_SCTP + IPPROTO_ICMP IPProto = unix.IPPROTO_ICMP + IPPROTO_ICMPV6 IPProto = unix.IPPROTO_ICMPV6 +) + +func (i IPProto) Serialize() []byte { + arr := make([]byte, 1) + arr[0] = byte(i) + return arr +} + +func (i IPProto) String() string { + switch i { + case IPPROTO_TCP: + return "tcp" + case IPPROTO_UDP: + return "udp" + case IPPROTO_SCTP: + return "sctp" + case IPPROTO_ICMP: + return "icmp" + case IPPROTO_ICMPV6: + return "icmpv6" + } + return fmt.Sprintf("%d", i) +} + +const ( + MaxOffs = 128 + SizeOfPeditSel = 24 + SizeOfPeditKey = 24 + + TCA_PEDIT_KEY_EX_HTYPE = 1 + TCA_PEDIT_KEY_EX_CMD = 2 +) + +const ( + TCA_PEDIT_UNSPEC = iota + TCA_PEDIT_TM + TCA_PEDIT_PARMS + TCA_PEDIT_PAD + TCA_PEDIT_PARMS_EX + TCA_PEDIT_KEYS_EX + TCA_PEDIT_KEY_EX +) + +// /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It +// * means no specific header type - offset is relative to the network layer +// */ +type PeditHeaderType uint16 + +const ( + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = iota + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + __PEDIT_HDR_TYPE_MAX +) + +type PeditCmd uint16 + +const ( + TCA_PEDIT_KEY_EX_CMD_SET = 0 + TCA_PEDIT_KEY_EX_CMD_ADD = 1 +) + +type TcPeditSel struct { + TcGen + NKeys uint8 + Flags uint8 +} + +func DeserializeTcPeditKey(b []byte) *TcPeditKey { + return (*TcPeditKey)(unsafe.Pointer(&b[0:SizeOfPeditKey][0])) +} + +func DeserializeTcPedit(b []byte) (*TcPeditSel, []TcPeditKey) { + x := &TcPeditSel{} + copy((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(x)))[:SizeOfPeditSel], b) + + var keys []TcPeditKey + + next := SizeOfPeditKey + var i uint8 + for i = 0; i < x.NKeys; i++ { + keys = append(keys, *DeserializeTcPeditKey(b[next:])) + next += SizeOfPeditKey + } + + return x, keys +} + +type TcPeditKey struct { + Mask uint32 + Val uint32 + Off uint32 + At uint32 + OffMask uint32 + Shift uint32 +} + +type TcPeditKeyEx struct { + HeaderType PeditHeaderType + Cmd PeditCmd +} + +type TcPedit struct { + Sel TcPeditSel + Keys []TcPeditKey + KeysEx []TcPeditKeyEx + Extend uint8 +} + +func (p *TcPedit) Encode(parent *RtAttr) { + parent.AddRtAttr(TCA_ACT_KIND, ZeroTerminated("pedit")) + actOpts := parent.AddRtAttr(TCA_ACT_OPTIONS, nil) + + bbuf := bytes.NewBuffer(make([]byte, 0, int(unsafe.Sizeof(p.Sel)+unsafe.Sizeof(p.Keys)))) + + bbuf.Write((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(&p.Sel)))[:]) + + for i := uint8(0); i < p.Sel.NKeys; i++ { + bbuf.Write((*(*[SizeOfPeditKey]byte)(unsafe.Pointer(&p.Keys[i])))[:]) + } + actOpts.AddRtAttr(TCA_PEDIT_PARMS_EX, bbuf.Bytes()) + + exAttrs := actOpts.AddRtAttr(int(TCA_PEDIT_KEYS_EX|NLA_F_NESTED), nil) + for i := uint8(0); i < p.Sel.NKeys; i++ { + keyAttr := exAttrs.AddRtAttr(int(TCA_PEDIT_KEY_EX|NLA_F_NESTED), nil) + + htypeBuf := make([]byte, 2) + cmdBuf := make([]byte, 2) + + NativeEndian().PutUint16(htypeBuf, uint16(p.KeysEx[i].HeaderType)) + NativeEndian().PutUint16(cmdBuf, uint16(p.KeysEx[i].Cmd)) + + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_HTYPE, htypeBuf) + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_CMD, cmdBuf) + } +} + +func (p *TcPedit) SetEthDst(mac net.HardwareAddr) { + u32 := NativeEndian().Uint32(mac) + u16 := NativeEndian().Uint16(mac[4:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = uint32(u16) + tKey.Mask = 0xffff0000 + tKey.Off = 4 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetEthSrc(mac net.HardwareAddr) { + u16 := NativeEndian().Uint16(mac) + u32 := NativeEndian().Uint32(mac[2:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = uint32(u16) << 16 + tKey.Mask = 0x0000ffff + tKey.Off = 4 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Mask = 0 + tKey.Off = 8 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv6Src(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 8 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 20 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetDstIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Dst(ip) + } else { + p.SetIPv6Dst(ip) + } +} + +func (p *TcPedit) SetSrcIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Src(ip) + } else { + p.SetIPv6Src(ip) + } +} + +func (p *TcPedit) SetIPv6Dst(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 24 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 28 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 32 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 36 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Src(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Dst(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetDstPort only tcp and udp are supported to set port +func (p *TcPedit) SetDstPort(dstPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(dstPort)) << 16 + tKey.Mask = 0x0000ffff + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetSrcPort only tcp and udp are supported to set port +func (p *TcPedit) SetSrcPort(srcPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(srcPort)) + tKey.Mask = 0xffff0000 + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} diff --git a/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go new file mode 100644 index 000000000..f209125df --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go @@ -0,0 +1,41 @@ +package nl + +const ( + VDPA_GENL_NAME = "vdpa" + VDPA_GENL_VERSION = 0x1 +) + +const ( + VDPA_CMD_UNSPEC = iota + VDPA_CMD_MGMTDEV_NEW + VDPA_CMD_MGMTDEV_GET /* can dump */ + VDPA_CMD_DEV_NEW + VDPA_CMD_DEV_DEL + VDPA_CMD_DEV_GET /* can dump */ + VDPA_CMD_DEV_CONFIG_GET /* can dump */ + VDPA_CMD_DEV_VSTATS_GET +) + +const ( + VDPA_ATTR_UNSPEC = iota + VDPA_ATTR_MGMTDEV_BUS_NAME + VDPA_ATTR_MGMTDEV_DEV_NAME + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES + VDPA_ATTR_DEV_NAME + VDPA_ATTR_DEV_ID + VDPA_ATTR_DEV_VENDOR_ID + VDPA_ATTR_DEV_MAX_VQS + VDPA_ATTR_DEV_MAX_VQ_SIZE + VDPA_ATTR_DEV_MIN_VQ_SIZE + VDPA_ATTR_DEV_NET_CFG_MACADDR + VDPA_ATTR_DEV_NET_STATUS + VDPA_ATTR_DEV_NET_CFG_MAX_VQP + VDPA_ATTR_DEV_NET_CFG_MTU + VDPA_ATTR_DEV_NEGOTIATED_FEATURES + VDPA_ATTR_DEV_MGMTDEV_MAX_VQS + VDPA_ATTR_DEV_SUPPORTED_FEATURES + VDPA_ATTR_DEV_QUEUE_INDEX + VDPA_ATTR_DEV_VENDOR_ATTR_NAME + VDPA_ATTR_DEV_VENDOR_ATTR_VALUE + VDPA_ATTR_DEV_FEATURES +) diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index dce9073f7..cdb318ba5 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -131,7 +131,15 @@ func (x *XfrmAddress) ToIP() net.IP { return ip } -func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet { +// family is only used when x and prefixlen are both 0 +func (x *XfrmAddress) ToIPNet(prefixlen uint8, family uint16) *net.IPNet { + empty := [SizeofXfrmAddress]byte{} + if bytes.Equal(x[:], empty[:]) && prefixlen == 0 { + if family == FAMILY_V6 { + return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(int(prefixlen), 128)} + } + return &net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(int(prefixlen), 32)} + } ip := x.ToIP() if GetIPFamily(ip) == FAMILY_V4 { return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go index b6290fd54..e8920b9a6 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -13,8 +13,9 @@ const ( SizeofXfrmAlgoAuth = 0x48 SizeofXfrmAlgoAEAD = 0x48 SizeofXfrmEncapTmpl = 0x18 - SizeofXfrmUsersaFlush = 0x8 + SizeofXfrmUsersaFlush = 0x1 SizeofXfrmReplayStateEsn = 0x18 + SizeofXfrmReplayState = 0x0c ) const ( @@ -28,6 +29,11 @@ const ( XFRM_STATE_ESN = 128 ) +const ( + XFRM_SA_XFLAG_DONT_ENCAP_DSCP = 1 + XFRM_SA_XFLAG_OSEQ_MAY_WRAP = 2 +) + // struct xfrm_usersa_id { // xfrm_address_t daddr; // __be32 spi; @@ -103,6 +109,7 @@ func (msg *XfrmStats) Serialize() []byte { // }; // // #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +// #define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 // type XfrmUsersaInfo struct { @@ -332,3 +339,23 @@ func (msg *XfrmReplayStateEsn) Serialize() []byte { // We deliberately do not pass Bmp, as it gets set by the kernel. return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:] } + +// struct xfrm_replay_state { +// __u32 oseq; +// __u32 seq; +// __u32 bitmap; +// }; + +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func DeserializeXfrmReplayState(b []byte) *XfrmReplayState { + return (*XfrmReplayState)(unsafe.Pointer(&b[0:SizeofXfrmReplayState][0])) +} + +func (msg *XfrmReplayState) Serialize() []byte { + return (*(*[SizeofXfrmReplayState]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/vendor/github.com/vishvananda/netlink/proc_event_linux.go new file mode 100644 index 000000000..ac8762bd8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/proc_event_linux.go @@ -0,0 +1,208 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const CN_IDX_PROC = 0x1 + +const ( + PROC_EVENT_NONE = 0x00000000 + PROC_EVENT_FORK = 0x00000001 + PROC_EVENT_EXEC = 0x00000002 + PROC_EVENT_UID = 0x00000004 + PROC_EVENT_GID = 0x00000040 + PROC_EVENT_SID = 0x00000080 + PROC_EVENT_PTRACE = 0x00000100 + PROC_EVENT_COMM = 0x00000200 + PROC_EVENT_COREDUMP = 0x40000000 + PROC_EVENT_EXIT = 0x80000000 +) + +const ( + CN_VAL_PROC = 0x1 + PROC_CN_MCAST_LISTEN = 0x1 +) + +type ProcEventMsg interface { + Pid() uint32 + Tgid() uint32 +} + +type ProcEventHeader struct { + What uint32 + CPU uint32 + Timestamp uint64 +} + +type ProcEvent struct { + ProcEventHeader + Msg ProcEventMsg +} + +func (pe *ProcEvent) setHeader(h ProcEventHeader) { + pe.What = h.What + pe.CPU = h.CPU + pe.Timestamp = h.Timestamp +} + +type ExitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +func (e *ExitProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExitProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ExecProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +func (e *ExecProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExecProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ForkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +func (e *ForkProcEvent) Pid() uint32 { + return e.ParentPid +} + +func (e *ForkProcEvent) Tgid() uint32 { + return e.ParentTgid +} + +type CommProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + Comm [16]byte +} + +func (e *CommProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *CommProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +func ProcEventMonitor(ch chan<- ProcEvent, done <-chan struct{}, errorChan chan<- error) error { + h, err := NewHandle() + if err != nil { + return err + } + defer h.Delete() + + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_CONNECTOR, CN_IDX_PROC) + if err != nil { + return err + } + + var nlmsg nl.NetlinkRequest + + nlmsg.Pid = uint32(os.Getpid()) + nlmsg.Type = unix.NLMSG_DONE + nlmsg.Len = uint32(unix.SizeofNlMsghdr) + + cm := nl.NewCnMsg(CN_IDX_PROC, CN_VAL_PROC, PROC_CN_MCAST_LISTEN) + nlmsg.AddData(cm) + + s.Send(&nlmsg) + + if done != nil { + go func() { + <-done + s.Close() + }() + } + + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + errorChan <- err + return + } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } + + for _, m := range msgs { + e := parseNetlinkMessage(m) + if e != nil { + ch <- *e + } + } + + } + }() + + return nil +} + +func parseNetlinkMessage(m syscall.NetlinkMessage) *ProcEvent { + if m.Header.Type == unix.NLMSG_DONE { + buf := bytes.NewBuffer(m.Data) + msg := &nl.CnMsg{} + hdr := &ProcEventHeader{} + binary.Read(buf, nl.NativeEndian(), msg) + binary.Read(buf, nl.NativeEndian(), hdr) + + pe := &ProcEvent{} + pe.setHeader(*hdr) + switch hdr.What { + case PROC_EVENT_EXIT: + event := &ExitProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_FORK: + event := &ForkProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_EXEC: + event := &ExecProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_COMM: + event := &CommProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + } + return nil + } + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go index 60b23b374..0163cba3a 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo.go +++ b/vendor/github.com/vishvananda/netlink/protinfo.go @@ -6,14 +6,16 @@ import ( // Protinfo represents bridge flags from netlink. type Protinfo struct { - Hairpin bool - Guard bool - FastLeave bool - RootBlock bool - Learning bool - Flood bool - ProxyArp bool - ProxyArpWiFi bool + Hairpin bool + Guard bool + FastLeave bool + RootBlock bool + Learning bool + Flood bool + ProxyArp bool + ProxyArpWiFi bool + Isolated bool + NeighSuppress bool } // String returns a list of enabled flags @@ -47,6 +49,12 @@ func (prot *Protinfo) String() string { if prot.ProxyArpWiFi { boolStrings = append(boolStrings, "ProxyArpWiFi") } + if prot.Isolated { + boolStrings = append(boolStrings, "Isolated") + } + if prot.NeighSuppress { + boolStrings = append(boolStrings, "NeighSuppress") + } return strings.Join(boolStrings, " ") } diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 15b65123c..1ba25d3cd 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -68,6 +68,10 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) { pi.ProxyArp = byteToBool(info.Value[0]) case nl.IFLA_BRPORT_PROXYARP_WIFI: pi.ProxyArpWiFi = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_ISOLATED: + pi.Isolated = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_NEIGH_SUPPRESS: + pi.NeighSuppress = byteToBool(info.Value[0]) } } return diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go index af78305ac..067743d39 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -17,19 +17,29 @@ const ( HANDLE_MIN_EGRESS = 0xFFFFFFF3 ) +const ( + HORIZON_DROP_POLICY_CAP = 0 + HORIZON_DROP_POLICY_DROP = 1 + HORIZON_DROP_POLICY_DEFAULT = 255 +) + type Qdisc interface { Attrs() *QdiscAttrs Type() string } +type QdiscStatistics ClassStatistics + // QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link, // has a handle, a parent and a refcnt. The root qdisc of a device should // have parent == HANDLE_ROOT. type QdiscAttrs struct { - LinkIndex int - Handle uint32 - Parent uint32 - Refcnt uint32 // read only + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only + IngressBlock *uint32 + Statistics *QdiscStatistics } func (q QdiscAttrs) String() string { @@ -113,6 +123,7 @@ type Htb struct { Defcls uint32 Debug uint32 DirectPkts uint32 + DirectQlen *uint32 } func NewHtb(attrs QdiscAttrs) *Htb { @@ -123,6 +134,7 @@ func NewHtb(attrs QdiscAttrs) *Htb { Rate2Quantum: 10, Debug: 0, DirectPkts: 0, + DirectQlen: nil, } } @@ -150,6 +162,7 @@ type NetemQdiscAttrs struct { ReorderCorr float32 // in % CorruptProb float32 // in % CorruptCorr float32 // in % + Rate64 uint64 } func (q NetemQdiscAttrs) String() string { @@ -174,6 +187,7 @@ type Netem struct { ReorderCorr uint32 CorruptProb uint32 CorruptCorr uint32 + Rate64 uint64 } func (netem *Netem) String() string { @@ -210,6 +224,19 @@ func (qdisc *Tbf) Type() string { return "tbf" } +// Clsact is a qdisc for adding filters +type Clsact struct { + QdiscAttrs +} + +func (qdisc *Clsact) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Clsact) Type() string { + return "clsact" +} + // Ingress is a qdisc for adding ingress filters type Ingress struct { QdiscAttrs @@ -278,22 +305,25 @@ type Fq struct { FlowDefaultRate uint32 FlowMaxRate uint32 // called BucketsLog under the hood - Buckets uint32 - FlowRefillDelay uint32 - LowRateThreshold uint32 + Buckets uint32 + FlowRefillDelay uint32 + LowRateThreshold uint32 + Horizon uint32 + HorizonDropPolicy uint8 } func (fq *Fq) String() string { return fmt.Sprintf( - "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}", - fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, + "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v, Horizon: %v, HorizonDropPolicy: %v}", + fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, fq.Horizon, fq.HorizonDropPolicy, ) } func NewFq(attrs QdiscAttrs) *Fq { return &Fq{ - QdiscAttrs: attrs, - Pacing: 1, + QdiscAttrs: attrs, + Pacing: 1, + HorizonDropPolicy: HORIZON_DROP_POLICY_DEFAULT, } } @@ -308,13 +338,15 @@ func (qdisc *Fq) Type() string { // FQ_Codel (Fair Queuing Controlled Delay) is queuing discipline that combines Fair Queuing with the CoDel AQM scheme. type FqCodel struct { QdiscAttrs - Target uint32 - Limit uint32 - Interval uint32 - ECN uint32 - Flows uint32 - Quantum uint32 - // There are some more attributes here, but support for them seems not ubiquitous + Target uint32 + Limit uint32 + Interval uint32 + ECN uint32 + Flows uint32 + Quantum uint32 + CEThreshold uint32 + DropBatchSize uint32 + MemoryLimit uint32 } func (fqcodel *FqCodel) String() string { @@ -338,3 +370,27 @@ func (qdisc *FqCodel) Attrs() *QdiscAttrs { func (qdisc *FqCodel) Type() string { return "fq_codel" } + +type Sfq struct { + QdiscAttrs + // TODO: Only the simplified options for SFQ are handled here. Support for the extended one can be added later. + Quantum uint8 + Perturb uint8 + Limit uint32 + Divisor uint8 +} + +func (sfq *Sfq) String() string { + return fmt.Sprintf( + "{%v -- Quantum: %v, Perturb: %v, Limit: %v, Divisor: %v}", + sfq.Attrs(), sfq.Quantum, sfq.Perturb, sfq.Limit, sfq.Divisor, + ) +} + +func (qdisc *Sfq) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Sfq) Type() string { + return "sfq" +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e9eee5908..e732ae3bd 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "strconv" "strings" + "sync" "syscall" "github.com/vishvananda/netlink/nl" @@ -17,6 +18,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { var lossCorr, delayCorr, duplicateCorr uint32 var reorderProb, reorderCorr uint32 var corruptProb, corruptCorr uint32 + var rate64 uint64 latency := nattrs.Latency loss := Percentage2u32(nattrs.Loss) @@ -57,6 +59,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { corruptProb = Percentage2u32(nattrs.CorruptProb) corruptCorr = Percentage2u32(nattrs.CorruptCorr) + rate64 = nattrs.Rate64 return &Netem{ QdiscAttrs: attrs, @@ -73,6 +76,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { ReorderCorr: reorderCorr, CorruptProb: corruptProb, CorruptCorr: corruptCorr, + Rate64: rate64, } } @@ -159,6 +163,9 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + if qdisc.Attrs().IngressBlock != nil { + req.AddData(nl.NewRtAttr(nl.TCA_INGRESS_BLOCK, nl.Uint32Attr(*qdisc.Attrs().IngressBlock))) + } options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -194,7 +201,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { opt.Debug = qdisc.Debug opt.DirectPkts = qdisc.DirectPkts options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize()) - // options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + if qdisc.DirectQlen != nil { + options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, nl.Uint32Attr(*qdisc.DirectQlen)) + } case *Hfsc: opt := nl.TcHfscOpt{} opt.Defcls = qdisc.Defcls @@ -231,6 +240,19 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if reorder.Probability > 0 { options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize()) } + // Rate + if qdisc.Rate64 > 0 { + rate := nl.TcNetemRate{} + if qdisc.Rate64 >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_NETEM_RATE64, nl.Uint64Attr(qdisc.Rate64)) + rate.Rate = ^uint32(0) + } else { + rate.Rate = uint32(qdisc.Rate64) + } + options.AddRtAttr(nl.TCA_NETEM_RATE, rate.Serialize()) + } + case *Clsact: + options = nil case *Ingress: // ingress filters must use the proper handle if qdisc.Attrs().Parent != HANDLE_INGRESS { @@ -250,13 +272,24 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.Quantum > 0 { options.AddRtAttr(nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) } - + if qdisc.CEThreshold > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_CE_THRESHOLD, nl.Uint32Attr(qdisc.CEThreshold)) + } + if qdisc.DropBatchSize > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_DROP_BATCH_SIZE, nl.Uint32Attr(qdisc.DropBatchSize)) + } + if qdisc.MemoryLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_MEMORY_LIMIT, nl.Uint32Attr(qdisc.MemoryLimit)) + } case *Fq: options.AddRtAttr(nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) if qdisc.Buckets > 0 { options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) } + if qdisc.PacketLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_PLIMIT, nl.Uint32Attr((uint32(qdisc.PacketLimit)))) + } if qdisc.LowRateThreshold > 0 { options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) } @@ -278,6 +311,20 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.FlowDefaultRate > 0 { options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) } + if qdisc.Horizon > 0 { + options.AddRtAttr(nl.TCA_FQ_HORIZON, nl.Uint32Attr(qdisc.Horizon)) + } + if qdisc.HorizonDropPolicy != HORIZON_DROP_POLICY_DEFAULT { + options.AddRtAttr(nl.TCA_FQ_HORIZON_DROP, nl.Uint8Attr(qdisc.HorizonDropPolicy)) + } + case *Sfq: + opt := nl.TcSfqQoptV1{} + opt.TcSfqQopt.Quantum = qdisc.Quantum + opt.TcSfqQopt.Perturb = int32(qdisc.Perturb) + opt.TcSfqQopt.Limit = qdisc.Limit + opt.TcSfqQopt.Divisor = qdisc.Divisor + + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) default: options = nil } @@ -362,6 +409,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { qdisc = &FqCodel{} case "netem": qdisc = &Netem{} + case "sfq": + qdisc = &Sfq{} + case "clsact": + qdisc = &Clsact{} default: qdisc = &GenericQdisc{QdiscType: qdiscType} } @@ -417,9 +468,29 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { if err := parseNetemData(qdisc, attr.Value); err != nil { return nil, err } + case "sfq": + if err := parseSfqData(qdisc, attr.Value); err != nil { + return nil, err + } // no options for ingress } + case nl.TCA_INGRESS_BLOCK: + ingressBlock := new(uint32) + *ingressBlock = native.Uint32(attr.Value) + base.IngressBlock = ingressBlock + case nl.TCA_STATS: + s, err := parseTcStats(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) + case nl.TCA_STATS2: + s, err := parseTcStats2(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) } } *qdisc.Attrs() = base @@ -446,7 +517,6 @@ func parsePrioData(qdisc Qdisc, value []byte) error { } func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() htb := qdisc.(*Htb) for _, datum := range data { switch datum.Attr.Type { @@ -458,15 +528,14 @@ func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { htb.Debug = opt.Debug htb.DirectPkts = opt.DirectPkts case nl.TCA_HTB_DIRECT_QLEN: - // TODO - //htb.DirectQlen = native.uint32(datum.Value) + directQlen := native.Uint32(datum.Value) + htb.DirectQlen = &directQlen } } return nil } func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() fqCodel := qdisc.(*FqCodel) for _, datum := range data { @@ -483,6 +552,12 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { fqCodel.Flows = native.Uint32(datum.Value) case nl.TCA_FQ_CODEL_QUANTUM: fqCodel.Quantum = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_CE_THRESHOLD: + fqCodel.CEThreshold = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_DROP_BATCH_SIZE: + fqCodel.DropBatchSize = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_MEMORY_LIMIT: + fqCodel.MemoryLimit = native.Uint32(datum.Value) } } return nil @@ -490,13 +565,11 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { func parseHfscData(qdisc Qdisc, data []byte) error { Hfsc := qdisc.(*Hfsc) - native = nl.NativeEndian() Hfsc.Defcls = native.Uint16(data) return nil } func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() fq := qdisc.(*Fq) for _, datum := range data { switch datum.Attr.Type { @@ -522,6 +595,11 @@ func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { fq.FlowMaxRate = native.Uint32(datum.Value) case nl.TCA_FQ_FLOW_DEFAULT_RATE: fq.FlowDefaultRate = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON: + fq.Horizon = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON_DROP: + fq.HorizonDropPolicy = datum.Value[0] + } } return nil @@ -540,6 +618,8 @@ func parseNetemData(qdisc Qdisc, value []byte) error { if err != nil { return err } + var rate *nl.TcNetemRate + var rate64 uint64 for _, datum := range data { switch datum.Attr.Type { case nl.TCA_NETEM_CORR: @@ -555,13 +635,23 @@ func parseNetemData(qdisc Qdisc, value []byte) error { opt := nl.DeserializeTcNetemReorder(datum.Value) netem.ReorderProb = opt.Probability netem.ReorderCorr = opt.Correlation + case nl.TCA_NETEM_RATE: + rate = nl.DeserializeTcNetemRate(datum.Value) + case nl.TCA_NETEM_RATE64: + rate64 = native.Uint64(datum.Value) } } + if rate != nil { + netem.Rate64 = uint64(rate.Rate) + if rate64 > 0 { + netem.Rate64 = rate64 + } + } + return nil } func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() tbf := qdisc.(*Tbf) for _, datum := range data { switch datum.Attr.Type { @@ -582,6 +672,17 @@ func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { return nil } +func parseSfqData(qdisc Qdisc, value []byte) error { + sfq := qdisc.(*Sfq) + opt := nl.DeserializeTcSfqQoptV1(value) + sfq.Quantum = opt.TcSfqQopt.Quantum + sfq.Perturb = uint8(opt.TcSfqQopt.Perturb) + sfq.Limit = opt.TcSfqQopt.Limit + sfq.Divisor = opt.TcSfqQopt.Divisor + + return nil +} + const ( TIME_UNITS_PER_SEC = 1000000 ) @@ -590,6 +691,9 @@ var ( tickInUsec float64 clockFactor float64 hz float64 + + // Without this, the go race detector may report races. + initClockMutex sync.Mutex ) func initClock() { @@ -598,10 +702,10 @@ func initClock() { return } parts := strings.Split(strings.TrimSpace(string(data)), " ") - if len(parts) < 3 { + if len(parts) < 4 { return } - var vals [3]uint64 + var vals [4]uint64 for i := range vals { val, err := strconv.ParseUint(parts[i], 16, 32) if err != nil { @@ -615,10 +719,17 @@ func initClock() { } clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor - hz = float64(vals[0]) + if vals[2] == 1000000 { + // ref https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/lib/utils.c#n963 + hz = float64(vals[3]) + } else { + hz = 100 + } } func TickInUsec() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if tickInUsec == 0.0 { initClock() } @@ -626,6 +737,8 @@ func TickInUsec() float64 { } func ClockFactor() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if clockFactor == 0.0 { initClock() } @@ -633,6 +746,8 @@ func ClockFactor() float64 { } func Hz() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() if hz == 0.0 { initClock() } @@ -663,6 +778,11 @@ func latency(rate uint64, limit, buffer uint32) float64 { return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) } -func Xmittime(rate uint64, size uint32) float64 { - return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate)) +func Xmittime(rate uint64, size uint32) uint32 { + // https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/tc/tc_core.c#n62 + return time2Tick(uint32(TIME_UNITS_PER_SEC * (float64(size) / float64(rate)))) +} + +func Xmitsize(rate uint64, ticks uint32) uint32 { + return uint32((float64(rate) * float64(tick2Time(ticks))) / TIME_UNITS_PER_SEC) } diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index 2d0bdc8c3..036399db6 100644 --- a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -77,28 +77,39 @@ func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) { return &link, nil } -func execRdmaGetLink(req *nl.NetlinkRequest, name string) (*RdmaLink, error) { +func execRdmaSetLink(req *nl.NetlinkRequest) error { + + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func RdmaLinkList() ([]*RdmaLink, error) { + return pkgHandle.RdmaLinkList() +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) msgs, err := req.Execute(unix.NETLINK_RDMA, 0) if err != nil { return nil, err } + + var res []*RdmaLink for _, m := range msgs { link, err := executeOneGetRdmaLink(m) if err != nil { return nil, err } - if link.Attrs.Name == name { - return link, nil - } + res = append(res, link) } - return nil, fmt.Errorf("Rdma device %v not found", name) -} - -func execRdmaSetLink(req *nl.NetlinkRequest) error { - _, err := req.Execute(unix.NETLINK_RDMA, 0) - return err + return res, nil } // RdmaLinkByName finds a link by name and returns a pointer to the object if @@ -110,11 +121,16 @@ func RdmaLinkByName(name string) (*RdmaLink, error) { // RdmaLinkByName finds a link by name and returns a pointer to the object if // found and nil error, otherwise returns error code. func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { - - proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) - req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) - - return execRdmaGetLink(req, name) + links, err := h.RdmaLinkList() + if err != nil { + return nil, err + } + for _, link := range links { + if link.Attrs.Name == name { + return link, nil + } + } + return nil, fmt.Errorf("Rdma device %v not found", name) } // RdmaLinkSetName sets the name of the rdma link device. Return nil on success @@ -262,3 +278,54 @@ func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { return execRdmaSetLink(req) } + +// RdmaLinkDel deletes an rdma link +// +// Similar to: rdma link delete NAME +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkDel(name string) error { + return pkgHandle.RdmaLinkDel(name) +} + +// RdmaLinkDel deletes an rdma link. +func (h *Handle) RdmaLinkDel(name string) error { + link, err := h.RdmaLinkByName(name) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_DELLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + b := make([]byte, 4) + native.PutUint32(b, link.Attrs.Index) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b)) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +// Similar to: rdma link add NAME type TYPE netdev NETDEV +// NAME - specifies the new name of the rdma link to add +// TYPE - specifies which rdma type to use. Link types: +// rxe - Soft RoCE driver +// siw - Soft iWARP driver +// NETDEV - specifies the network device to which the link is bound +// +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkAdd(linkName, linkType, netdev string) error { + return pkgHandle.RdmaLinkAdd(linkName, linkType, netdev) +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +func (h *Handle) RdmaLinkAdd(linkName string, linkType string, netdev string) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_NEWLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, nl.ZeroTerminated(linkName))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_LINK_TYPE, nl.ZeroTerminated(linkType))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_NDEV_NAME, nl.ZeroTerminated(netdev))) + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 58ff1af60..1b4555d5c 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -11,6 +11,24 @@ type Scope uint8 type NextHopFlag int +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE + RT_FILTER_HOPLIMIT + RT_FILTER_PRIORITY + RT_FILTER_MARK + RT_FILTER_MASK + RT_FILTER_REALM +) + type Destination interface { Family() int Decode([]byte) error @@ -27,27 +45,46 @@ type Encap interface { Equal(Encap) bool } +//Protocol describe what was the originator of the route +type RouteProtocol int + // Route represents a netlink route. type Route struct { - LinkIndex int - ILinkIndex int - Scope Scope - Dst *net.IPNet - Src net.IP - Gw net.IP - MultiPath []*NexthopInfo - Protocol int - Priority int - Table int - Type int - Tos int - Flags int - MPLSDst *int - NewDst Destination - Encap Encap - MTU int - AdvMSS int - Hoplimit int + LinkIndex int + ILinkIndex int + Scope Scope + Dst *net.IPNet + Src net.IP + Gw net.IP + MultiPath []*NexthopInfo + Protocol RouteProtocol + Priority int + Family int + Table int + Type int + Tos int + Flags int + MPLSDst *int + NewDst Destination + Encap Encap + Via Destination + Realm int + MTU int + Window int + Rtt int + RttVar int + Ssthresh int + Cwnd int + AdvMSS int + Reordering int + Hoplimit int + InitCwnd int + Features int + RtoMin int + InitRwnd int + QuickACK int + Congctl string + FastOpenNoCookie int } func (r Route) String() string { @@ -66,6 +103,9 @@ func (r Route) String() string { if r.Encap != nil { elems = append(elems, fmt.Sprintf("Encap: %s", r.Encap)) } + if r.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", r.Via)) + } elems = append(elems, fmt.Sprintf("Src: %s", r.Src)) if len(r.MultiPath) > 0 { elems = append(elems, fmt.Sprintf("Gw: %s", r.MultiPath)) @@ -74,6 +114,7 @@ func (r Route) String() string { } elems = append(elems, fmt.Sprintf("Flags: %s", r.ListFlags())) elems = append(elems, fmt.Sprintf("Table: %d", r.Table)) + elems = append(elems, fmt.Sprintf("Realm: %d", r.Realm)) return fmt.Sprintf("{%s}", strings.Join(elems, " ")) } @@ -87,6 +128,7 @@ func (r Route) Equal(x Route) bool { nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) && r.Protocol == x.Protocol && r.Priority == x.Priority && + r.Realm == x.Realm && r.Table == x.Table && r.Type == x.Type && r.Tos == x.Tos && @@ -94,6 +136,7 @@ func (r Route) Equal(x Route) bool { r.Flags == x.Flags && (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && + (r.Via == x.Via || (r.Via != nil && r.Via.Equal(x.Via))) && (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap))) } @@ -111,8 +154,15 @@ type flagString struct { } // RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE + +// NlFlags is only non-zero for RTM_NEWROUTE, the following flags can be set: +// - unix.NLM_F_REPLACE - Replace existing matching config object with this request +// - unix.NLM_F_EXCL - Don't replace the config object if it already exists +// - unix.NLM_F_CREATE - Create config object if it doesn't already exist +// - unix.NLM_F_APPEND - Add to the end of the object list type RouteUpdate struct { - Type uint16 + Type uint16 + NlFlags uint16 Route } @@ -123,6 +173,7 @@ type NexthopInfo struct { Flags int NewDst Destination Encap Encap + Via Destination } func (n *NexthopInfo) String() string { @@ -134,6 +185,9 @@ func (n *NexthopInfo) String() string { if n.Encap != nil { elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap)) } + if n.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", n.Via)) + } elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1)) elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw)) elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags())) diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index c69c595ed..0cd4f8363 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -1,8 +1,11 @@ package netlink import ( + "bytes" + "encoding/binary" "fmt" "net" + "strconv" "strings" "syscall" @@ -21,19 +24,22 @@ const ( SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE ) -const ( - RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) - RT_FILTER_SCOPE - RT_FILTER_TYPE - RT_FILTER_TOS - RT_FILTER_IIF - RT_FILTER_OIF - RT_FILTER_DST - RT_FILTER_SRC - RT_FILTER_GW - RT_FILTER_TABLE - RT_FILTER_HOPLIMIT -) +func (s Scope) String() string { + switch s { + case SCOPE_UNIVERSE: + return "universe" + case SCOPE_SITE: + return "site" + case SCOPE_LINK: + return "link" + case SCOPE_HOST: + return "host" + case SCOPE_NOWHERE: + return "nowhere" + default: + return "unknown" + } +} const ( FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK @@ -128,7 +134,6 @@ func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { return fmt.Errorf("lack of bytes") } - native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { return fmt.Errorf("lack of bytes") @@ -144,7 +149,6 @@ func (e *MPLSEncap) Decode(buf []byte) error { func (e *MPLSEncap) Encode() ([]byte, error) { s := nl.EncodeMPLSStack(e.Labels...) - native := nl.NativeEndian() hdr := make([]byte, 4) native.PutUint16(hdr, uint16(len(s)+4)) native.PutUint16(hdr[2:], nl.MPLS_IPTUNNEL_DST) @@ -200,7 +204,6 @@ func (e *SEG6Encap) Decode(buf []byte) error { if len(buf) < 4 { return fmt.Errorf("lack of bytes") } - native := nl.NativeEndian() // Get Length(l) & Type(typ) : 2 + 2 bytes l := native.Uint16(buf) if len(buf) < int(l) { @@ -220,7 +223,6 @@ func (e *SEG6Encap) Decode(buf []byte) error { } func (e *SEG6Encap) Encode() ([]byte, error) { s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) - native := nl.NativeEndian() hdr := make([]byte, 4) native.PutUint16(hdr, uint16(len(s)+4)) native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) @@ -230,7 +232,7 @@ func (e *SEG6Encap) String() string { segs := make([]string, 0, len(e.Segments)) // append segment backwards (from n to 0) since seg#0 is the last segment. for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + segs = append(segs, e.Segments[i-1].String()) } str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), len(e.Segments), strings.Join(segs, " ")) @@ -271,6 +273,16 @@ type SEG6LocalEncap struct { In6Addr net.IP Iif int Oif int + bpf bpfObj +} + +func (e *SEG6LocalEncap) SetProg(progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("seg6local bpf SetProg: invalid fd") + } + e.bpf.progFd = progFd + e.bpf.progName = progName + return nil } func (e *SEG6LocalEncap) Type() int { @@ -281,7 +293,6 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { if err != nil { return err } - native := nl.NativeEndian() for _, attr := range attrs { switch attr.Attr.Type { case nl.SEG6_LOCAL_ACTION: @@ -305,13 +316,28 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { case nl.SEG6_LOCAL_OIF: e.Oif = int(native.Uint32(attr.Value[0:4])) e.Flags[nl.SEG6_LOCAL_OIF] = true + case nl.SEG6_LOCAL_BPF: + var bpfAttrs []syscall.NetlinkRouteAttr + bpfAttrs, err = nl.ParseRouteAttr(attr.Value) + bpfobj := bpfObj{} + for _, bpfAttr := range bpfAttrs { + switch bpfAttr.Attr.Type { + case nl.LWT_BPF_PROG_FD: + bpfobj.progFd = int(native.Uint32(bpfAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfobj.progName = string(bpfAttr.Value) + default: + err = fmt.Errorf("seg6local bpf decode: unknown attribute: Type %d", bpfAttr.Attr) + } + } + e.bpf = bpfobj + e.Flags[nl.SEG6_LOCAL_BPF] = true } } return err } func (e *SEG6LocalEncap) Encode() ([]byte, error) { var err error - native := nl.NativeEndian() res := make([]byte, 8) native.PutUint16(res, 8) // length native.PutUint16(res[2:], nl.SEG6_LOCAL_ACTION) @@ -367,6 +393,16 @@ func (e *SEG6LocalEncap) Encode() ([]byte, error) { native.PutUint32(attr[4:], uint32(e.Oif)) res = append(res, attr...) } + if e.Flags[nl.SEG6_LOCAL_BPF] { + attr := nl.NewRtAttr(nl.SEG6_LOCAL_BPF, []byte{}) + if e.bpf.progFd != 0 { + attr.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(e.bpf.progFd))) + } + if e.bpf.progName != "" { + attr.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(e.bpf.progName)) + } + res = append(res, attr.Serialize()...) + } return res, err } func (e *SEG6LocalEncap) String() string { @@ -400,12 +436,15 @@ func (e *SEG6LocalEncap) String() string { } if e.Flags[nl.SEG6_LOCAL_SRH] { segs := make([]string, 0, len(e.Segments)) - //append segment backwards (from n to 0) since seg#0 is the last segment. + // append segment backwards (from n to 0) since seg#0 is the last segment. for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + segs = append(segs, e.Segments[i-1].String()) } strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " "))) } + if e.Flags[nl.SEG6_LOCAL_BPF] { + strs = append(strs, fmt.Sprintf("bpf %s[%d]", e.bpf.progName, e.bpf.progFd)) + } return strings.Join(strs, " ") } func (e *SEG6LocalEncap) Equal(x Encap) bool { @@ -437,12 +476,316 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool { if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { return false } - if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif { + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf { + return false + } + return true +} + +// Encap BPF definitions +type bpfObj struct { + progFd int + progName string +} +type BpfEncap struct { + progs [nl.LWT_BPF_MAX]bpfObj + headroom int +} + +// SetProg adds a bpf function to the route via netlink RTA_ENCAP. The fd must be a bpf +// program loaded with bpf(type=BPF_PROG_TYPE_LWT_*) matching the direction the program should +// be applied to (LWT_BPF_IN, LWT_BPF_OUT, LWT_BPF_XMIT). +func (e *BpfEncap) SetProg(mode, progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("lwt bpf SetProg: invalid fd") + } + if mode <= nl.LWT_BPF_UNSPEC || mode >= nl.LWT_BPF_XMIT_HEADROOM { + return fmt.Errorf("lwt bpf SetProg:invalid mode") + } + e.progs[mode].progFd = progFd + e.progs[mode].progName = fmt.Sprintf("%s[fd:%d]", progName, progFd) + return nil +} + +// SetXmitHeadroom sets the xmit headroom (LWT_BPF_MAX_HEADROOM) via netlink RTA_ENCAP. +// maximum headroom is LWT_BPF_MAX_HEADROOM +func (e *BpfEncap) SetXmitHeadroom(headroom int) error { + if headroom > nl.LWT_BPF_MAX_HEADROOM || headroom < 0 { + return fmt.Errorf("invalid headroom size. range is 0 - %d", nl.LWT_BPF_MAX_HEADROOM) + } + e.headroom = headroom + return nil +} + +func (e *BpfEncap) Type() int { + return nl.LWTUNNEL_ENCAP_BPF +} +func (e *BpfEncap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lwt bpf decode: lack of bytes") + } + native := nl.NativeEndian() + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing attribute. err: %v", err) + } + for _, attr := range attrs { + if int(attr.Attr.Type) < 1 { + // nl.LWT_BPF_UNSPEC + continue + } + if int(attr.Attr.Type) > nl.LWT_BPF_MAX { + return fmt.Errorf("lwt bpf decode: received unknown attribute type: %d", attr.Attr.Type) + } + switch int(attr.Attr.Type) { + case nl.LWT_BPF_MAX_HEADROOM: + e.headroom = int(native.Uint32(attr.Value)) + default: + bpfO := bpfObj{} + parsedAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing route attribute") + } + for _, parsedAttr := range parsedAttrs { + switch int(parsedAttr.Attr.Type) { + case nl.LWT_BPF_PROG_FD: + bpfO.progFd = int(native.Uint32(parsedAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfO.progName = string(parsedAttr.Value) + default: + return fmt.Errorf("lwt bpf decode: received unknown attribute: type: %d, len: %d", parsedAttr.Attr.Type, parsedAttr.Attr.Len) + } + } + e.progs[attr.Attr.Type] = bpfO + } + } + return nil +} + +func (e *BpfEncap) Encode() ([]byte, error) { + buf := make([]byte, 0) + native = nl.NativeEndian() + for index, attr := range e.progs { + nlMsg := nl.NewRtAttr(index, []byte{}) + if attr.progFd != 0 { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(attr.progFd))) + } + if attr.progName != "" { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(attr.progName)) + } + if nlMsg.Len() > 4 { + buf = append(buf, nlMsg.Serialize()...) + } + } + if len(buf) <= 4 { + return nil, fmt.Errorf("lwt bpf encode: bpf obj definitions returned empty buffer") + } + if e.headroom > 0 { + hRoom := nl.NewRtAttr(nl.LWT_BPF_XMIT_HEADROOM, nl.Uint32Attr(uint32(e.headroom))) + buf = append(buf, hRoom.Serialize()...) + } + return buf, nil +} + +func (e *BpfEncap) String() string { + progs := make([]string, 0) + for index, obj := range e.progs { + empty := bpfObj{} + switch index { + case nl.LWT_BPF_IN: + if obj != empty { + progs = append(progs, fmt.Sprintf("in: %s", obj.progName)) + } + case nl.LWT_BPF_OUT: + if obj != empty { + progs = append(progs, fmt.Sprintf("out: %s", obj.progName)) + } + case nl.LWT_BPF_XMIT: + if obj != empty { + progs = append(progs, fmt.Sprintf("xmit: %s", obj.progName)) + } + } + } + if e.headroom > 0 { + progs = append(progs, fmt.Sprintf("xmit headroom: %d", e.headroom)) + } + return strings.Join(progs, " ") +} + +func (e *BpfEncap) Equal(x Encap) bool { + o, ok := x.(*BpfEncap) + if !ok { + return false + } + if e.headroom != o.headroom { + return false + } + for i := range o.progs { + if o.progs[i] != e.progs[i] { + return false + } + } + return true +} + +// IP6tnlEncap definition +type IP6tnlEncap struct { + ID uint64 + Dst net.IP + Src net.IP + Hoplimit uint8 + TC uint8 + Flags uint16 +} + +func (e *IP6tnlEncap) Type() int { + return nl.LWTUNNEL_ENCAP_IP6 +} + +func (e *IP6tnlEncap) Decode(buf []byte) error { + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.LWTUNNEL_IP6_ID: + e.ID = uint64(native.Uint64(attr.Value[0:4])) + case nl.LWTUNNEL_IP6_DST: + e.Dst = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_SRC: + e.Src = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_HOPLIMIT: + e.Hoplimit = attr.Value[0] + case nl.LWTUNNEL_IP6_TC: + // e.TC = attr.Value[0] + err = fmt.Errorf("decoding TC in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_FLAGS: + // e.Flags = uint16(native.Uint16(attr.Value[0:2])) + err = fmt.Errorf("decoding FLAG in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_PAD: + err = fmt.Errorf("decoding PAD in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_OPTS: + err = fmt.Errorf("decoding OPTS in IP6tnlEncap is not supported") + } + } + return err +} + +func (e *IP6tnlEncap) Encode() ([]byte, error) { + + final := []byte{} + + resID := make([]byte, 12) + native.PutUint16(resID, 12) // 2+2+8 + native.PutUint16(resID[2:], nl.LWTUNNEL_IP6_ID) + native.PutUint64(resID[4:], 0) + final = append(final, resID...) + + resDst := make([]byte, 4) + native.PutUint16(resDst, 20) // 2+2+16 + native.PutUint16(resDst[2:], nl.LWTUNNEL_IP6_DST) + resDst = append(resDst, e.Dst...) + final = append(final, resDst...) + + resSrc := make([]byte, 4) + native.PutUint16(resSrc, 20) + native.PutUint16(resSrc[2:], nl.LWTUNNEL_IP6_SRC) + resSrc = append(resSrc, e.Src...) + final = append(final, resSrc...) + + // resTc := make([]byte, 5) + // native.PutUint16(resTc, 5) + // native.PutUint16(resTc[2:], nl.LWTUNNEL_IP6_TC) + // resTc[4] = e.TC + // final = append(final,resTc...) + + resHops := make([]byte, 5) + native.PutUint16(resHops, 5) + native.PutUint16(resHops[2:], nl.LWTUNNEL_IP6_HOPLIMIT) + resHops[4] = e.Hoplimit + final = append(final, resHops...) + + // resFlags := make([]byte, 6) + // native.PutUint16(resFlags, 6) + // native.PutUint16(resFlags[2:], nl.LWTUNNEL_IP6_FLAGS) + // native.PutUint16(resFlags[4:], e.Flags) + // final = append(final,resFlags...) + + return final, nil +} + +func (e *IP6tnlEncap) String() string { + return fmt.Sprintf("id %d src %s dst %s hoplimit %d tc %d flags 0x%.4x", e.ID, e.Src, e.Dst, e.Hoplimit, e.TC, e.Flags) +} + +func (e *IP6tnlEncap) Equal(x Encap) bool { + o, ok := x.(*IP6tnlEncap) + if !ok { + return false + } + + if e.ID != o.ID || e.Flags != o.Flags || e.Hoplimit != o.Hoplimit || e.Src.Equal(o.Src) || e.Dst.Equal(o.Dst) || e.TC != o.TC { return false } return true } +type Via struct { + AddrFamily int + Addr net.IP +} + +func (v *Via) Equal(x Destination) bool { + o, ok := x.(*Via) + if !ok { + return false + } + if v.AddrFamily == x.Family() && v.Addr.Equal(o.Addr) { + return true + } + return false +} + +func (v *Via) String() string { + return fmt.Sprintf("Family: %d, Address: %s", v.AddrFamily, v.Addr.String()) +} + +func (v *Via) Family() int { + return v.AddrFamily +} + +func (v *Via) Encode() ([]byte, error) { + buf := &bytes.Buffer{} + err := binary.Write(buf, native, uint16(v.AddrFamily)) + if err != nil { + return nil, err + } + err = binary.Write(buf, native, v.Addr) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (v *Via) Decode(b []byte) error { + if len(b) < 6 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.AddrFamily = int(native.Uint16(b[0:2])) + if v.AddrFamily == nl.FAMILY_V4 { + v.Addr = net.IP(b[2:6]) + return nil + } else if v.AddrFamily == nl.FAMILY_V6 { + if len(b) < 18 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.Addr = net.IP(b[2:]) + return nil + } + return fmt.Errorf("decoding failed: address family %d unknown", v.AddrFamily) +} + // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -454,7 +797,51 @@ func RouteAdd(route *Route) error { func (h *Handle) RouteAdd(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func RouteAppend(route *Route) error { + return pkgHandle.RouteAppend(route) +} + +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func (h *Handle) RouteAppend(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteAddEcmp will add a route to the system. +func RouteAddEcmp(route *Route) error { + return pkgHandle.RouteAddEcmp(route) +} + +// RouteAddEcmp will add a route to the system. +func (h *Handle) RouteAddEcmp(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func RouteChange(route *Route) error { + return pkgHandle.RouteChange(route) +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func (h *Handle) RouteChange(route *Route) error { + flags := unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteReplace will add a route to the system. @@ -468,7 +855,8 @@ func RouteReplace(route *Route) error { func (h *Handle) RouteReplace(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteDel will delete a route from the system. @@ -481,12 +869,27 @@ func RouteDel(route *Route) error { // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) - return h.routeHandle(route, req, nl.NewRtDelMsg()) + _, err := h.routeHandle(route, req, nl.NewRtDelMsg()) + return err } -func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { - if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { - return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil") +func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) ([][]byte, error) { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return nil, err + } + return req.Execute(unix.NETLINK_ROUTE, 0) +} + +func (h *Handle) routeHandleIter(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg, f func(msg []byte) bool) error { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return err + } + return req.ExecuteIter(unix.NETLINK_ROUTE, 0, f) +} + +func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { + if req.NlMsghdr.Type != unix.RTM_GETROUTE && (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { + return fmt.Errorf("either Dst.IP, Src.IP or Gw must be set") } family := -1 @@ -530,7 +933,13 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + switch route.Encap.Type() { + case nl.LWTUNNEL_ENCAP_BPF: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP|unix.NLA_F_NESTED, buf)) + default: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + } + } if route.Src != nil { @@ -564,6 +973,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) } + if route.Via != nil { + buf, err := route.Via.Encode() + if err != nil { + return fmt.Errorf("failed to encode RTA_VIA: %v", err) + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_VIA, buf)) + } + if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { @@ -606,6 +1023,13 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } + if nh.Via != nil { + buf, err := nh.Via.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(unix.RTA_VIA, buf)) + } rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } @@ -628,6 +1052,11 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg native.PutUint32(b, uint32(route.Priority)) rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) } + if route.Realm > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Realm)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_FLOW, b)) + } if route.Tos > 0 { msg.Tos = uint8(route.Tos) } @@ -639,19 +1068,70 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } var metrics []*nl.RtAttr - // TODO: support other rta_metric values if route.MTU > 0 { b := nl.Uint32Attr(uint32(route.MTU)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) } + if route.Window > 0 { + b := nl.Uint32Attr(uint32(route.Window)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_WINDOW, b)) + } + if route.Rtt > 0 { + b := nl.Uint32Attr(uint32(route.Rtt)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTT, b)) + } + if route.RttVar > 0 { + b := nl.Uint32Attr(uint32(route.RttVar)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTTVAR, b)) + } + if route.Ssthresh > 0 { + b := nl.Uint32Attr(uint32(route.Ssthresh)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_SSTHRESH, b)) + } + if route.Cwnd > 0 { + b := nl.Uint32Attr(uint32(route.Cwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CWND, b)) + } if route.AdvMSS > 0 { b := nl.Uint32Attr(uint32(route.AdvMSS)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) } + if route.Reordering > 0 { + b := nl.Uint32Attr(uint32(route.Reordering)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_REORDERING, b)) + } if route.Hoplimit > 0 { b := nl.Uint32Attr(uint32(route.Hoplimit)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_HOPLIMIT, b)) } + if route.InitCwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitCwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITCWND, b)) + } + if route.Features > 0 { + b := nl.Uint32Attr(uint32(route.Features)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FEATURES, b)) + } + if route.RtoMin > 0 { + b := nl.Uint32Attr(uint32(route.RtoMin)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTO_MIN, b)) + } + if route.InitRwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitRwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITRWND, b)) + } + if route.QuickACK > 0 { + b := nl.Uint32Attr(uint32(route.QuickACK)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_QUICKACK, b)) + } + if route.Congctl != "" { + b := nl.ZeroTerminated(route.Congctl) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CC_ALGO, b)) + } + if route.FastOpenNoCookie > 0 { + b := nl.Uint32Attr(uint32(route.FastOpenNoCookie)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FASTOPEN_NO_COOKIE, b)) + } if metrics != nil { attr := nl.NewRtAttr(unix.RTA_METRICS, nil) @@ -663,22 +1143,21 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) - msg.Family = uint8(family) + // only overwrite family if it was not set in msg + if msg.Family == 0 { + msg.Family = uint8(family) + } req.AddData(msg) for _, attr := range rtAttrs { req.AddData(attr) } - var ( - b = make([]byte, 4) - native = nl.NativeEndian() - ) - native.PutUint32(b, uint32(route.LinkIndex)) - - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + if (req.NlMsghdr.Type != unix.RTM_GETROUTE) || (req.NlMsghdr.Type == unix.RTM_GETROUTE && route.LinkIndex > 0) { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.LinkIndex)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + return nil } // RouteList gets a list of routes in the system. @@ -692,13 +1171,13 @@ func RouteList(link Link, family int) ([]Route, error) { // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. func (h *Handle) RouteList(link Link, family int) ([]Route, error) { - var routeFilter *Route + routeFilter := &Route{} if link != nil { - routeFilter = &Route{ - LinkIndex: link.Attrs().Index, - } + routeFilter.LinkIndex = link.Attrs().Index + + return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) } - return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) + return h.RouteListFiltered(family, routeFilter, 0) } // RouteListFiltered gets a list of routes in the system filtered with specified rules. @@ -710,65 +1189,94 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) - - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + var res []Route + err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) { + res = append(res, route) + return true + }) if err != nil { return nil, err } + return res, nil +} - var res []Route - for _, m := range msgs { +// RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues +// until all routes are loaded or the func returns false. +func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f) +} + +func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) + rtmsg := &nl.RtMsg{} + rtmsg.Family = uint8(family) + + var parseErr error + err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { msg := nl.DeserializeRtMsg(m) + if family != FAMILY_ALL && msg.Family != uint8(family) { + // Ignore routes not matching requested family + return true + } if msg.Flags&unix.RTM_F_CLONED != 0 { // Ignore cloned routes - continue + return true } if msg.Table != unix.RT_TABLE_MAIN { - if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { + if filter == nil || filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables - continue + return true } } route, err := deserializeRoute(m) if err != nil { - return nil, err + parseErr = err + return false } if filter != nil { switch { case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: - continue + return true case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: - continue + return true case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope: - continue + return true case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type: - continue + return true case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: - continue + return true + case filterMask&RT_FILTER_REALM != 0 && route.Realm != filter.Realm: + return true case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: - continue + return true case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: - continue + return true case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw): - continue + return true case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src): - continue + return true case filterMask&RT_FILTER_DST != 0: if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) { + if filter.Dst == nil { + filter.Dst = genZeroIPNet(family) + } if !ipNetEqual(route.Dst, filter.Dst) { - continue + return true } } case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit: - continue + return true } } - res = append(res, route) + return f(route) + }) + if err != nil { + return err } - return res, nil + if parseErr != nil { + return parseErr + } + return nil } // deserializeRoute decodes a binary netlink message into a Route struct @@ -780,14 +1288,14 @@ func deserializeRoute(m []byte) (Route, error) { } route := Route{ Scope: Scope(msg.Scope), - Protocol: int(msg.Protocol), + Protocol: RouteProtocol(int(msg.Protocol)), Table: int(msg.Table), Type: int(msg.Type), Tos: int(msg.Tos), Flags: int(msg.Flags), + Family: int(msg.Family), } - native := nl.NativeEndian() var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { @@ -814,6 +1322,8 @@ func deserializeRoute(m []byte) (Route, error) { route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) case unix.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_FLOW: + route.Realm = int(native.Uint32(attr.Value[0:4])) case unix.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) case unix.RTA_MULTIPATH: @@ -853,6 +1363,12 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case unix.RTA_ENCAP: encap = attr + case unix.RTA_VIA: + d := &Via{} + if err := d.Decode(attr.Value); err != nil { + return nil, nil, err + } + info.Via = d } } @@ -890,6 +1406,12 @@ func deserializeRoute(m []byte) (Route, error) { return route, err } route.NewDst = d + case unix.RTA_VIA: + v := &Via{} + if err := v.Decode(attr.Value); err != nil { + return route, err + } + route.Via = v case unix.RTA_ENCAP_TYPE: encapType = attr case unix.RTA_ENCAP: @@ -903,15 +1425,62 @@ func deserializeRoute(m []byte) (Route, error) { switch metric.Attr.Type { case unix.RTAX_MTU: route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_WINDOW: + route.Window = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTT: + route.Rtt = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTTVAR: + route.RttVar = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_SSTHRESH: + route.Ssthresh = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CWND: + route.Cwnd = int(native.Uint32(metric.Value[0:4])) case unix.RTAX_ADVMSS: route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_REORDERING: + route.Reordering = int(native.Uint32(metric.Value[0:4])) case unix.RTAX_HOPLIMIT: route.Hoplimit = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITCWND: + route.InitCwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_FEATURES: + route.Features = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTO_MIN: + route.RtoMin = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITRWND: + route.InitRwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_QUICKACK: + route.QuickACK = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CC_ALGO: + route.Congctl = nl.BytesToString(metric.Value) + case unix.RTAX_FASTOPEN_NO_COOKIE: + route.FastOpenNoCookie = int(native.Uint32(metric.Value[0:4])) } } } } + // Same logic to generate "default" dst with iproute2 implementation + if route.Dst == nil { + var addLen int + var ip net.IP + switch msg.Family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + + if addLen != 0 { + route.Dst = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*addLen), + } + } + } + if len(encap.Value) != 0 && len(encapType.Value) != 0 { typ := int(native.Uint16(encapType.Value[0:2])) var e Encap @@ -931,6 +1500,11 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } + case nl.LWTUNNEL_ENCAP_BPF: + e = &BpfEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } } route.Encap = e } @@ -938,15 +1512,34 @@ func deserializeRoute(m []byte) (Route, error) { return route, nil } +// RouteGetOptions contains a set of options to use with +// RouteGetWithOptions +type RouteGetOptions struct { + Iif string + IifIndex int + Oif string + VrfName string + SrcAddr net.IP + UID *uint32 + Mark uint32 + FIBMatch bool +} + +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { + return pkgHandle.RouteGetWithOptions(destination, options) +} + // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func RouteGet(destination net.IP) ([]Route, error) { return pkgHandle.RouteGet(destination) } -// RouteGet gets a route to a specific destination from the host system. -// Equivalent to: 'ip route get'. -func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte @@ -961,11 +1554,88 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { msg := &nl.RtMsg{} msg.Family = uint8(family) msg.Dst_len = bitlen + if options != nil && options.SrcAddr != nil { + msg.Src_len = bitlen + } + msg.Flags = unix.RTM_F_LOOKUP_TABLE + if options != nil && options.FIBMatch { + msg.Flags |= unix.RTM_F_FIB_MATCH + } req.AddData(msg) rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) req.AddData(rtaDst) + if options != nil { + if options.VrfName != "" { + link, err := h.LinkByName(options.VrfName) + if err != nil { + return nil, err + } + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + + iifIndex := 0 + if len(options.Iif) > 0 { + link, err := h.LinkByName(options.Iif) + if err != nil { + return nil, err + } + + iifIndex = link.Attrs().Index + } else if options.IifIndex > 0 { + iifIndex = options.IifIndex + } + + if iifIndex > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(iifIndex)) + + req.AddData(nl.NewRtAttr(unix.RTA_IIF, b)) + } + + if len(options.Oif) > 0 { + link, err := h.LinkByName(options.Oif) + if err != nil { + return nil, err + } + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + + if options.SrcAddr != nil { + var srcAddr []byte + if family == FAMILY_V4 { + srcAddr = options.SrcAddr.To4() + } else { + srcAddr = options.SrcAddr.To16() + } + + req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr)) + } + + if options.UID != nil { + uid := *options.UID + b := make([]byte, 4) + native.PutUint32(b, uid) + + req.AddData(nl.NewRtAttr(unix.RTA_UID, b)) + } + + if options.Mark > 0 { + b := make([]byte, 4) + native.PutUint32(b, options.Mark) + + req.AddData(nl.NewRtAttr(unix.RTA_MARK, b)) + } + } + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { return nil, err @@ -980,27 +1650,35 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { res = append(res, route) } return res, nil +} +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { + return h.RouteGetWithOptions(destination, nil) } // RouteSubscribe takes a chan down which notifications will be sent // when routes are added or deleted. Close the 'done' chan to stop subscription. func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false) + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) } // RouteSubscribeAt works like RouteSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { - return routeSubscribeAt(ns, netns.None(), ch, done, nil, false) + return routeSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) } // RouteSubscribeOptions contains a set of options to use with // RouteSubscribeWithOptions. type RouteSubscribeOptions struct { - Namespace *netns.NsHandle - ErrorCallback func(error) - ListExisting bool + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval } // RouteSubscribeWithOptions work like RouteSubscribe but enable to @@ -1011,14 +1689,27 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti none := netns.None() options.Namespace = &none } - return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting) + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) } -func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } if done != nil { go func() { <-done @@ -1040,7 +1731,8 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -1055,27 +1747,103 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } route, err := deserializeRoute(m.Data) if err != nil { if cberr != nil { cberr(err) } - return + continue + } + ch <- RouteUpdate{ + Type: m.Header.Type, + NlFlags: m.Header.Flags & (unix.NLM_F_REPLACE | unix.NLM_F_EXCL | unix.NLM_F_CREATE | unix.NLM_F_APPEND), + Route: route, } - ch <- RouteUpdate{Type: m.Header.Type, Route: route} } } }() return nil } + +func (p RouteProtocol) String() string { + switch int(p) { + case unix.RTPROT_BABEL: + return "babel" + case unix.RTPROT_BGP: + return "bgp" + case unix.RTPROT_BIRD: + return "bird" + case unix.RTPROT_BOOT: + return "boot" + case unix.RTPROT_DHCP: + return "dhcp" + case unix.RTPROT_DNROUTED: + return "dnrouted" + case unix.RTPROT_EIGRP: + return "eigrp" + case unix.RTPROT_GATED: + return "gated" + case unix.RTPROT_ISIS: + return "isis" + // case unix.RTPROT_KEEPALIVED: + // return "keepalived" + case unix.RTPROT_KERNEL: + return "kernel" + case unix.RTPROT_MROUTED: + return "mrouted" + case unix.RTPROT_MRT: + return "mrt" + case unix.RTPROT_NTK: + return "ntk" + case unix.RTPROT_OSPF: + return "ospf" + case unix.RTPROT_RA: + return "ra" + case unix.RTPROT_REDIRECT: + return "redirect" + case unix.RTPROT_RIP: + return "rip" + case unix.RTPROT_STATIC: + return "static" + case unix.RTPROT_UNSPEC: + return "unspec" + case unix.RTPROT_XORP: + return "xorp" + case unix.RTPROT_ZEBRA: + return "zebra" + default: + return strconv.Itoa(int(p)) + } +} + +// genZeroIPNet returns 0.0.0.0/0 or ::/0 for IPv4 or IPv6, otherwise nil +func genZeroIPNet(family int) *net.IPNet { + var addLen int + var ip net.IP + switch family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + if addLen != 0 { + return &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(0, 8*addLen), + } + } + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/route_unspecified.go b/vendor/github.com/vishvananda/netlink/route_unspecified.go index 2701862b4..db7372689 100644 --- a/vendor/github.com/vishvananda/netlink/route_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/route_unspecified.go @@ -2,6 +2,8 @@ package netlink +import "strconv" + func (r *Route) ListFlags() []string { return []string{} } @@ -9,3 +11,11 @@ func (r *Route) ListFlags() []string { func (n *NexthopInfo) ListFlags() []string { return []string{} } + +func (s Scope) String() string { + return "unknown" +} + +func (p RouteProtocol) String() string { + return strconv.Itoa(int(p)) +} diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 7fc8ae5df..9d74c7cd8 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -10,8 +10,9 @@ type Rule struct { Priority int Family int Table int - Mark int - Mask int + Mark uint32 + Mask *uint32 + Tos uint TunID uint Goto int Src *net.IPNet @@ -22,10 +23,27 @@ type Rule struct { SuppressIfgroup int SuppressPrefixlen int Invert bool + Dport *RulePortRange + Sport *RulePortRange + IPProto int + UIDRange *RuleUIDRange + Protocol uint8 + Type uint8 } func (r Rule) String() string { - return fmt.Sprintf("ip rule %d: from %s table %d", r.Priority, r.Src, r.Table) + from := "all" + if r.Src != nil && r.Src.String() != "" { + from = r.Src.String() + } + + to := "all" + if r.Dst != nil && r.Dst.String() != "" { + to = r.Dst.String() + } + + return fmt.Sprintf("ip rule %d: from %s to %s table %d %s", + r.Priority, from, to, r.Table, r.typeString()) } // NewRule return empty rules. @@ -34,9 +52,31 @@ func NewRule() *Rule { SuppressIfgroup: -1, SuppressPrefixlen: -1, Priority: -1, - Mark: -1, - Mask: -1, + Mark: 0, + Mask: nil, Goto: -1, Flow: -1, } } + +// NewRulePortRange creates rule sport/dport range. +func NewRulePortRange(start, end uint16) *RulePortRange { + return &RulePortRange{Start: start, End: end} +} + +// RulePortRange represents rule sport/dport range. +type RulePortRange struct { + Start uint16 + End uint16 +} + +// NewRuleUIDRange creates rule uid range. +func NewRuleUIDRange(start, end uint32) *RuleUIDRange { + return &RuleUIDRange{Start: start, End: end} +} + +// RuleUIDRange represents rule uid range. +type RuleUIDRange struct { + Start uint32 + End uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index e12569fe4..ddff99cfa 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "bytes" "fmt" "net" @@ -42,8 +43,8 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Protocol = unix.RTPROT_BOOT msg.Scope = unix.RT_SCOPE_UNIVERSE msg.Table = unix.RT_TABLE_UNSPEC - msg.Type = unix.RTN_UNSPEC - if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { + msg.Type = rule.Type // usually 0, same as unix.RTN_UNSPEC + if msg.Type == 0 && req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { msg.Type = unix.RTN_UNICAST } if rule.Invert { @@ -55,6 +56,9 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { if rule.Table >= 0 && rule.Table < 256 { msg.Table = uint8(rule.Table) } + if rule.Tos != 0 { + msg.Tos = uint8(rule.Tos) + } var dstFamily uint8 var rtAttrs []*nl.RtAttr @@ -93,21 +97,19 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(rtAttrs[i]) } - native := nl.NativeEndian() - if rule.Priority >= 0 { b := make([]byte, 4) native.PutUint32(b, uint32(rule.Priority)) req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b)) } - if rule.Mark >= 0 { + if rule.Mark != 0 || rule.Mask != nil { b := make([]byte, 4) - native.PutUint32(b, uint32(rule.Mark)) + native.PutUint32(b, rule.Mark) req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b)) } - if rule.Mask >= 0 { + if rule.Mask != nil { b := make([]byte, 4) - native.PutUint32(b, uint32(rule.Mask)) + native.PutUint32(b, *rule.Mask) req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b)) } if rule.Flow >= 0 { @@ -138,10 +140,10 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { } } if rule.IifName != "" { - req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName))) + req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName+"\x00"))) } if rule.OifName != "" { - req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName))) + req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName+"\x00"))) } if rule.Goto >= 0 { msg.Type = nl.FR_ACT_GOTO @@ -150,6 +152,31 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } + if rule.IPProto > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.IPProto)) + req.AddData(nl.NewRtAttr(nl.FRA_IP_PROTO, b)) + } + + if rule.Dport != nil { + b := rule.Dport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b)) + } + + if rule.Sport != nil { + b := rule.Sport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b)) + } + + if rule.UIDRange != nil { + b := rule.UIDRange.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_UID_RANGE, b)) + } + + if rule.Protocol > 0 { + req.AddData(nl.NewRtAttr(nl.FRA_PROTOCOL, nl.Uint8Attr(rule.Protocol))) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -163,6 +190,19 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { + return h.RuleListFiltered(family, nil, 0) +} + +// RuleListFiltered gets a list of rules in the system filtered by the +// specified rule template `filter`. +// Equivalent to: ip rule list +func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { + return pkgHandle.RuleListFiltered(family, filter, filterMask) +} + +// RuleListFiltered lists rules in the system. +// Equivalent to: ip rule list +func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) @@ -172,7 +212,6 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { return nil, err } - native := nl.NativeEndian() var res = make([]Rule, 0) for i := range msgs { msg := nl.DeserializeRtMsg(msgs[i]) @@ -182,8 +221,11 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { } rule := NewRule() + rule.Priority = 0 // The default priority from kernel rule.Invert = msg.Flags&FibRuleInvert > 0 + rule.Family = int(msg.Family) + rule.Tos = uint(msg.Tos) for j := range attrs { switch attrs[j].Attr.Type { @@ -200,11 +242,12 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), } case nl.FRA_FWMARK: - rule.Mark = int(native.Uint32(attrs[j].Value[0:4])) + rule.Mark = native.Uint32(attrs[j].Value[0:4]) case nl.FRA_FWMASK: - rule.Mask = int(native.Uint32(attrs[j].Value[0:4])) + mask := native.Uint32(attrs[j].Value[0:4]) + rule.Mask = &mask case nl.FRA_TUN_ID: - rule.TunID = uint(native.Uint64(attrs[j].Value[0:4])) + rule.TunID = uint(native.Uint64(attrs[j].Value[0:8])) case nl.FRA_IIFNAME: rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) case nl.FRA_OIFNAME: @@ -225,10 +268,98 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_PRIORITY: rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_IP_PROTO: + rule.IPProto = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_DPORT_RANGE: + rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_SPORT_RANGE: + rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_UID_RANGE: + rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8])) + case nl.FRA_PROTOCOL: + rule.Protocol = uint8(attrs[j].Value[0]) + } + } + + if filter != nil { + switch { + case filterMask&RT_FILTER_SRC != 0 && + (rule.Src == nil || rule.Src.String() != filter.Src.String()): + continue + case filterMask&RT_FILTER_DST != 0 && + (rule.Dst == nil || rule.Dst.String() != filter.Dst.String()): + continue + case filterMask&RT_FILTER_TABLE != 0 && + filter.Table != unix.RT_TABLE_UNSPEC && rule.Table != filter.Table: + continue + case filterMask&RT_FILTER_TOS != 0 && rule.Tos != filter.Tos: + continue + case filterMask&RT_FILTER_PRIORITY != 0 && rule.Priority != filter.Priority: + continue + case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark: + continue + case filterMask&RT_FILTER_MASK != 0 && !ptrEqual(rule.Mask, filter.Mask): + continue } } + res = append(res, *rule) } return res, nil } + +func (pr *RulePortRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 2), make([]byte, 2)} + native.PutUint16(b[0], pr.Start) + native.PutUint16(b[1], pr.End) + return bytes.Join(b, []byte{}) +} + +func (pr *RuleUIDRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 4), make([]byte, 4)} + native.PutUint32(b[0], pr.Start) + native.PutUint32(b[1], pr.End) + return bytes.Join(b, []byte{}) +} + +func ptrEqual(a, b *uint32) bool { + if a == b { + return true + } + if (a == nil) || (b == nil) { + return false + } + return *a == *b +} + +func (r Rule) typeString() string { + switch r.Type { + case unix.RTN_UNSPEC: // zero + return "" + case unix.RTN_UNICAST: + return "" + case unix.RTN_LOCAL: + return "local" + case unix.RTN_BROADCAST: + return "broadcast" + case unix.RTN_ANYCAST: + return "anycast" + case unix.RTN_MULTICAST: + return "multicast" + case unix.RTN_BLACKHOLE: + return "blackhole" + case unix.RTN_UNREACHABLE: + return "unreachable" + case unix.RTN_PROHIBIT: + return "prohibit" + case unix.RTN_THROW: + return "throw" + case unix.RTN_NAT: + return "nat" + case unix.RTN_XRESOLVE: + return "xresolve" + default: + return fmt.Sprintf("type(0x%x)", r.Type) + } +} diff --git a/vendor/github.com/vishvananda/netlink/rule_nonlinux.go b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go new file mode 100644 index 000000000..2b19aa64c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package netlink + +func (r Rule) typeString() string { + return "" +} diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go index 41aa72624..e65efb130 100644 --- a/vendor/github.com/vishvananda/netlink/socket.go +++ b/vendor/github.com/vishvananda/netlink/socket.go @@ -25,3 +25,80 @@ type Socket struct { UID uint32 INode uint32 } + +// UnixSocket represents a netlink unix socket. +type UnixSocket struct { + Type uint8 + Family uint8 + State uint8 + pad uint8 + INode uint32 + Cookie [2]uint32 +} + +// XDPSocket represents an XDP socket (and the common diagnosis part in +// particular). Please note that in contrast to [UnixSocket] the XDPSocket type +// does not feature “State” information. +type XDPSocket struct { + // xdp_diag_msg + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 + Family uint8 + Type uint8 + pad uint16 + Ino uint32 + Cookie [2]uint32 +} + +type XDPInfo struct { + // XDP_DIAG_INFO/xdp_diag_info + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L51 + Ifindex uint32 + QueueID uint32 + + // XDP_DIAG_UID + UID uint32 + + // XDP_RX_RING + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L56 + RxRingEntries uint32 + TxRingEntries uint32 + UmemFillRingEntries uint32 + UmemCompletionRingEntries uint32 + + // XDR_DIAG_UMEM + Umem *XDPDiagUmem + + // XDR_DIAG_STATS + Stats *XDPDiagStats +} + +const ( + XDP_DU_F_ZEROCOPY = 1 << iota +) + +// XDPDiagUmem describes the umem attached to an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L62 +type XDPDiagUmem struct { + Size uint64 + ID uint32 + NumPages uint32 + ChunkSize uint32 + Headroom uint32 + Ifindex uint32 + QueueID uint32 + Flags uint32 + Refs uint32 +} + +// XDPDiagStats contains ring statistics for an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L74 +type XDPDiagStats struct { + RxDropped uint64 + RxInvalid uint64 + RxFull uint64 + FillRingEmpty uint64 + TxInvalid uint64 + TxRingEmpty uint64 +} diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index c4d89c17e..4eb4aeafb 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,15 +4,18 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) const ( - sizeofSocketID = 0x30 - sizeofSocketRequest = sizeofSocketID + 0x8 - sizeofSocket = sizeofSocketID + 0x18 + sizeofSocketID = 0x30 + sizeofSocketRequest = sizeofSocketID + 0x8 + sizeofSocket = sizeofSocketID + 0x18 + sizeofUnixSocketRequest = 0x18 // 24 byte + sizeofUnixSocket = 0x10 // 16 byte ) type socketRequest struct { @@ -49,10 +52,13 @@ func (r *socketRequest) Serialize() []byte { native.PutUint32(b.Next(4), r.States) networkOrder.PutUint16(b.Next(2), r.ID.SourcePort) networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort) - copy(b.Next(4), r.ID.Source.To4()) - b.Next(12) - copy(b.Next(4), r.ID.Destination.To4()) - b.Next(12) + if r.Family == unix.AF_INET6 { + copy(b.Next(16), r.ID.Source) + copy(b.Next(16), r.ID.Destination) + } else { + copy(b.Next(16), r.ID.Source.To4()) + copy(b.Next(16), r.ID.Destination.To4()) + } native.PutUint32(b.Next(4), r.ID.Interface) native.PutUint32(b.Next(4), r.ID.Cookie[0]) native.PutUint32(b.Next(4), r.ID.Cookie[1]) @@ -61,6 +67,32 @@ func (r *socketRequest) Serialize() []byte { func (r *socketRequest) Len() int { return sizeofSocketRequest } +// According to linux/include/uapi/linux/unix_diag.h +type unixSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + States uint32 + INode uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *unixSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofUnixSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.States) + native.PutUint32(b.Next(4), r.INode) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *unixSocketRequest) Len() int { return sizeofUnixSocketRequest } + type readBuffer struct { Bytes []byte pos int @@ -89,10 +121,15 @@ func (s *Socket) deserialize(b []byte) error { s.Retrans = rb.Read() s.ID.SourcePort = networkOrder.Uint16(rb.Next(2)) s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2)) - s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) - rb.Next(12) - s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) - rb.Next(12) + if s.Family == unix.AF_INET6 { + s.ID.Source = net.IP(rb.Next(16)) + s.ID.Destination = net.IP(rb.Next(16)) + } else { + s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + } s.ID.Interface = native.Uint32(rb.Next(4)) s.ID.Cookie[0] = native.Uint32(rb.Next(4)) s.ID.Cookie[1] = native.Uint32(rb.Next(4)) @@ -104,31 +141,126 @@ func (s *Socket) deserialize(b []byte) error { return nil } +func (u *UnixSocket) deserialize(b []byte) error { + if len(b) < sizeofUnixSocket { + return fmt.Errorf("unix diag data short read (%d); want %d", len(b), sizeofUnixSocket) + } + rb := readBuffer{Bytes: b} + u.Type = rb.Read() + u.Family = rb.Read() + u.State = rb.Read() + u.pad = rb.Read() + u.INode = native.Uint32(rb.Next(4)) + u.Cookie[0] = native.Uint32(rb.Next(4)) + u.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// SocketGet returns the Socket identified by its local and remote addresses. +func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { + var protocol uint8 + var localIP, remoteIP net.IP + var localPort, remotePort uint16 + switch l := local.(type) { + case *net.TCPAddr: + r, ok := remote.(*net.TCPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_TCP + case *net.UDPAddr: + r, ok := remote.(*net.UDPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_UDP + default: + return nil, ErrNotImplemented + } + + var family uint8 + if localIP.To4() != nil && remoteIP.To4() != nil { + family = unix.AF_INET + } + + if family == 0 && localIP.To16() != nil && remoteIP.To16() != nil { + family = unix.AF_INET6 + } + + if family == 0 { + return nil, ErrNotImplemented + } + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: protocol, + States: 0xffffffff, + ID: SocketID{ + SourcePort: localPort, + DestinationPort: remotePort, + Source: localIP, + Destination: remoteIP, + Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, + }, + }) + + msgs, err := req.Execute(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY) + if err != nil { + return nil, err + } + if len(msgs) == 0 { + return nil, errors.New("no message nor error from netlink") + } + if len(msgs) > 2 { + return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) + } + + sock := &Socket{} + if err := sock.deserialize(msgs[0]); err != nil { + return nil, err + } + return sock, nil +} + // SocketGet returns the Socket identified by its local and remote addresses. func SocketGet(local, remote net.Addr) (*Socket, error) { + return pkgHandle.SocketGet(local, remote) +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func (h *Handle) SocketDestroy(local, remote net.Addr) error { localTCP, ok := local.(*net.TCPAddr) if !ok { - return nil, ErrNotImplemented + return ErrNotImplemented } remoteTCP, ok := remote.(*net.TCPAddr) if !ok { - return nil, ErrNotImplemented + return ErrNotImplemented } localIP := localTCP.IP.To4() if localIP == nil { - return nil, ErrNotImplemented + return ErrNotImplemented } remoteIP := remoteTCP.IP.To4() if remoteIP == nil { - return nil, ErrNotImplemented + return ErrNotImplemented } s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) if err != nil { - return nil, err + return err } defer s.Close() - req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) + req := h.newNetlinkRequest(nl.SOCK_DESTROY, unix.NLM_F_ACK) req.AddData(&socketRequest{ Family: unix.AF_INET, Protocol: unix.IPPROTO_TCP, @@ -140,23 +272,319 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, }, }) - s.Send(req) - msgs, from, err := s.Receive() + + _, err = req.Execute(unix.NETLINK_INET_DIAG, 0) + return err +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func SocketDestroy(local, remote net.Addr) error { + return pkgHandle.SocketDestroy(local, remote) +} + +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*InetDiagTCPInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *InetDiagTCPInfoResp + if res, err = attrsToInetDiagTCPInfoResp(attrs, sockInfo); err != nil { + return false + } + + result = append(result, res) + return true + }) + + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + return pkgHandle.SocketDiagTCPInfo(family) +} + +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*Socket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + result = append(result, sockInfo) + return true + }) if err != nil { return nil, err } - if from.Pid != nl.PidKernel { - return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return result, nil +} + +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func SocketDiagTCP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagTCP(family) +} + +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << (INET_DIAG_VEGASINFO - 1) + extensions |= 1 << (INET_DIAG_INFO - 1) + extensions |= 1 << (INET_DIAG_MEMINFO - 1) + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_UDP, + Ext: extensions, + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*InetDiagUDPInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *InetDiagUDPInfoResp + if res, err = attrsToInetDiagUDPInfoResp(attrs, sockInfo); err != nil { + return false + } + + result = append(result, res) + return true + }) + if err != nil { + return nil, err } - if len(msgs) == 0 { - return nil, errors.New("no message nor error from netlink") + return result, nil +} + +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + return pkgHandle.SocketDiagUDPInfo(family) +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_UDP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*Socket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + result = append(result, sockInfo) + return true + }) + if err != nil { + return nil, err } - if len(msgs) > 2 { - return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) + return result, nil +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func SocketDiagUDP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagUDP(family) +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << UNIX_DIAG_NAME + extensions |= 1 << UNIX_DIAG_PEER + extensions |= 1 << UNIX_DIAG_RQLEN + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + Show: uint32(extensions), + }) + + var result []*UnixDiagInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family != unix.AF_UNIX { + return false + } + + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *UnixDiagInfoResp + if res, err = attrsToUnixDiagInfoResp(attrs, sockInfo); err != nil { + return false + } + result = append(result, res) + return true + }) + if err != nil { + return nil, err } - sock := &Socket{} - if err := sock.deserialize(msgs[0].Data); err != nil { + return result, nil +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + return pkgHandle.UnixSocketDiagInfo() +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + }) + + var result []*UnixSocket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family == unix.AF_UNIX { + result = append(result, sockInfo) + } + return true + }) + if err != nil { return nil, err } - return sock, nil + return result, nil +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func UnixSocketDiag() ([]*UnixSocket, error) { + return pkgHandle.UnixSocketDiag() +} + +func attrsToInetDiagTCPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagTCPInfoResp, error) { + info := &InetDiagTCPInfoResp{ + InetDiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case INET_DIAG_INFO: + info.TCPInfo = &TCPInfo{} + if err := info.TCPInfo.deserialize(a.Value); err != nil { + return nil, err + } + case INET_DIAG_BBRINFO: + info.TCPBBRInfo = &TCPBBRInfo{} + if err := info.TCPBBRInfo.deserialize(a.Value); err != nil { + return nil, err + } + } + } + + return info, nil +} + +func attrsToInetDiagUDPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagUDPInfoResp, error) { + info := &InetDiagUDPInfoResp{ + InetDiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case INET_DIAG_MEMINFO: + info.Memory = &MemInfo{} + if err := info.Memory.deserialize(a.Value); err != nil { + return nil, err + } + } + } + + return info, nil +} + +func attrsToUnixDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *UnixSocket) (*UnixDiagInfoResp, error) { + info := &UnixDiagInfoResp{ + DiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case UNIX_DIAG_NAME: + name := string(a.Value[:a.Attr.Len]) + info.Name = &name + case UNIX_DIAG_PEER: + peer := native.Uint32(a.Value) + info.Peer = &peer + case UNIX_DIAG_RQLEN: + info.Queue = &QueueInfo{ + RQueue: native.Uint32(a.Value[:4]), + WQueue: native.Uint32(a.Value[4:]), + } + // default: + // fmt.Println("unknown unix attribute type", a.Attr.Type, "with data", a.Value) + } + } + + return info, nil } diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go new file mode 100644 index 000000000..20c82f9c7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go @@ -0,0 +1,195 @@ +package netlink + +import ( + "errors" + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + sizeofXDPSocketRequest = 1 + 1 + 2 + 4 + 4 + 2*4 + sizeofXDPSocket = 0x10 +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L12 +type xdpSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + Ino uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *xdpSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.Ino) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *xdpSocketRequest) Len() int { return sizeofXDPSocketRequest } + +func (s *XDPSocket) deserialize(b []byte) error { + if len(b) < sizeofXDPSocket { + return fmt.Errorf("XDP socket data short read (%d); want %d", len(b), sizeofXDPSocket) + } + rb := readBuffer{Bytes: b} + s.Family = rb.Read() + s.Type = rb.Read() + s.pad = native.Uint16(rb.Next(2)) + s.Ino = native.Uint32(rb.Next(4)) + s.Cookie[0] = native.Uint32(rb.Next(4)) + s.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// XDPSocketGet returns the XDP socket identified by its inode number and/or +// socket cookie. Specify the cookie as SOCK_ANY_COOKIE if +func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { + // We have a problem here: dumping AF_XDP sockets currently does not support + // filtering. We thus need to dump all XSKs and then only filter afterwards + // :( + xsks, err := SocketDiagXDP() + if err != nil { + return nil, err + } + checkCookie := cookie != SOCK_ANY_COOKIE && cookie != 0 + crumblingCookie := [2]uint32{uint32(cookie), uint32(cookie >> 32)} + checkIno := ino != 0 + var xskinfo *XDPDiagInfoResp + for _, xsk := range xsks { + if checkIno && xsk.XDPDiagMsg.Ino != ino { + continue + } + if checkCookie && xsk.XDPDiagMsg.Cookie != crumblingCookie { + continue + } + if xskinfo != nil { + return nil, errors.New("multiple matching XDP sockets") + } + xskinfo = xsk + } + if xskinfo == nil { + return nil, errors.New("no matching XDP socket") + } + return xskinfo, nil +} + +// SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets. +func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { + var result []*XDPDiagInfoResp + err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error { + sockInfo := &XDPSocket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return err + } + attrs, err := nl.ParseRouteAttr(m.Data[sizeofXDPSocket:]) + if err != nil { + return err + } + + res, err := attrsToXDPDiagInfoResp(attrs, sockInfo) + if err != nil { + return err + } + + result = append(result, res) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets. +func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error { + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return err + } + defer s.Close() + + req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&xdpSocketRequest{ + Family: unix.AF_XDP, + Show: XDP_SHOW_INFO | XDP_SHOW_RING_CFG | XDP_SHOW_UMEM | XDP_SHOW_STATS, + }) + if err := s.Send(req); err != nil { + return err + } + +loop: + for { + msgs, from, err := s.Receive() + if err != nil { + return err + } + if from.Pid != nl.PidKernel { + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } + if len(msgs) == 0 { + return errors.New("no message nor error from netlink") + } + + for _, m := range msgs { + switch m.Header.Type { + case unix.NLMSG_DONE: + break loop + case unix.NLMSG_ERROR: + error := int32(native.Uint32(m.Data[0:4])) + return syscall.Errno(-error) + } + if err := receiver(m); err != nil { + return err + } + } + } + return nil +} + +func attrsToXDPDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *XDPSocket) (*XDPDiagInfoResp, error) { + resp := &XDPDiagInfoResp{ + XDPDiagMsg: sockInfo, + XDPInfo: &XDPInfo{}, + } + for _, a := range attrs { + switch a.Attr.Type { + case XDP_DIAG_INFO: + resp.XDPInfo.Ifindex = native.Uint32(a.Value[0:4]) + resp.XDPInfo.QueueID = native.Uint32(a.Value[4:8]) + case XDP_DIAG_UID: + resp.XDPInfo.UID = native.Uint32(a.Value[0:4]) + case XDP_DIAG_RX_RING: + resp.XDPInfo.RxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_TX_RING: + resp.XDPInfo.TxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_FILL_RING: + resp.XDPInfo.UmemFillRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_COMPLETION_RING: + resp.XDPInfo.UmemCompletionRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM: + umem := &XDPDiagUmem{} + if err := umem.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Umem = umem + case XDP_DIAG_STATS: + stats := &XDPDiagStats{} + if err := stats.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Stats = stats + } + } + return resp, nil +} diff --git a/vendor/github.com/vishvananda/netlink/tcp.go b/vendor/github.com/vishvananda/netlink/tcp.go new file mode 100644 index 000000000..43f80a0fc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp.go @@ -0,0 +1,92 @@ +package netlink + +// TCP States +const ( + TCP_ESTABLISHED = iota + 0x01 + TCP_SYN_SENT + TCP_SYN_RECV + TCP_FIN_WAIT1 + TCP_FIN_WAIT2 + TCP_TIME_WAIT + TCP_CLOSE + TCP_CLOSE_WAIT + TCP_LAST_ACK + TCP_LISTEN + TCP_CLOSING + TCP_NEW_SYN_REC + TCP_MAX_STATES +) + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Snd_wscale uint8 // no uint4 + Rcv_wscale uint8 + Delivery_rate_app_limited uint8 + Fastopen_client_fail uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + Bytes_received uint64 /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ + Segs_out uint32 /* RFC4898 tcpEStatsPerfSegsOut */ + Segs_in uint32 /* RFC4898 tcpEStatsPerfSegsIn */ + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 /* RFC4898 tcpEStatsDataSegsIn */ + Data_segs_out uint32 /* RFC4898 tcpEStatsDataSegsOut */ + Delivery_rate uint64 + Busy_time uint64 /* Time (usec) busy sending data */ + Rwnd_limited uint64 /* Time (usec) limited by receive window */ + Sndbuf_limited uint64 /* Time (usec) limited by send buffer */ + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ + Bytes_retrans uint64 /* RFC4898 tcpEStatsPerfOctetsRetrans */ + Dsack_dups uint32 /* RFC4898 tcpEStatsStackDSACKDups */ + Reord_seen uint32 /* reordering events seen */ + Rcv_ooopack uint32 /* Out-of-order packets received */ + Snd_wnd uint32 /* peer's advertised receive window after * scaling (bytes) */ +} + +type TCPBBRInfo struct { + BBRBW uint64 + BBRMinRTT uint32 + BBRPacingGain uint32 + BBRCwndGain uint32 +} + +// According to https://man7.org/linux/man-pages/man7/sock_diag.7.html +type MemInfo struct { + RMem uint32 + WMem uint32 + FMem uint32 + TMem uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/tcp_linux.go b/vendor/github.com/vishvananda/netlink/tcp_linux.go new file mode 100644 index 000000000..e98036da5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp_linux.go @@ -0,0 +1,368 @@ +package netlink + +import ( + "bytes" + "errors" + "io" +) + +const ( + tcpBBRInfoLen = 20 + memInfoLen = 16 +) + +func checkDeserErr(err error) error { + if err == io.EOF { + return nil + } + return err +} + +func (t *TCPInfo) deserialize(b []byte) error { + var err error + rb := bytes.NewBuffer(b) + + t.State, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Ca_state, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Retransmits, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Probes, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Backoff, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Options, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + scales, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Snd_wscale = scales >> 4 // first 4 bits + t.Rcv_wscale = scales & 0xf // last 4 bits + + rateLimAndFastOpen, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Delivery_rate_app_limited = rateLimAndFastOpen >> 7 // get first bit + t.Fastopen_client_fail = rateLimAndFastOpen >> 5 & 3 // get next two bits + + next := rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rto = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Ato = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Unacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Sacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Lost = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Retrans = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Fackets = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Pmtu = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rttvar = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_cwnd = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Advmss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reordering = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_space = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Total_retrans = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Max_pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_acked = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_received = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_out = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Notsent_bytes = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Min_rtt = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_out = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Delivery_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Busy_time = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Rwnd_limited = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Sndbuf_limited = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered_ce = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_sent = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_retrans = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Dsack_dups = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reord_seen = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ooopack = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_wnd = native.Uint32(next) + return nil +} + +func (t *TCPBBRInfo) deserialize(b []byte) error { + if len(b) != tcpBBRInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + t.BBRBW = native.Uint64(rb.Next(8)) + t.BBRMinRTT = native.Uint32(rb.Next(4)) + t.BBRPacingGain = native.Uint32(rb.Next(4)) + t.BBRCwndGain = native.Uint32(rb.Next(4)) + + return nil +} + +func (m *MemInfo) deserialize(b []byte) error { + if len(b) != memInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + m.RMem = native.Uint32(rb.Next(4)) + m.WMem = native.Uint32(rb.Next(4)) + m.FMem = native.Uint32(rb.Next(4)) + m.TMem = native.Uint32(rb.Next(4)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/unix_diag.go b/vendor/github.com/vishvananda/netlink/unix_diag.go new file mode 100644 index 000000000..d81776f36 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/unix_diag.go @@ -0,0 +1,27 @@ +package netlink + +// According to linux/include/uapi/linux/unix_diag.h +const ( + UNIX_DIAG_NAME = iota + UNIX_DIAG_VFS + UNIX_DIAG_PEER + UNIX_DIAG_ICONS + UNIX_DIAG_RQLEN + UNIX_DIAG_MEMINFO + UNIX_DIAG_SHUTDOWN + UNIX_DIAG_UID + UNIX_DIAG_MAX +) + +type UnixDiagInfoResp struct { + DiagMsg *UnixSocket + Name *string + Peer *uint32 + Queue *QueueInfo + Shutdown *uint8 +} + +type QueueInfo struct { + RQueue uint32 + WQueue uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go new file mode 100644 index 000000000..7c15986d0 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go @@ -0,0 +1,463 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/vishvananda/netlink/nl" +) + +type vdpaDevID struct { + Name string + ID uint32 +} + +// VDPADev contains info about VDPA device +type VDPADev struct { + vdpaDevID + VendorID uint32 + MaxVQS uint32 + MaxVQSize uint16 + MinVQSize uint16 +} + +// VDPADevConfig contains configuration of the VDPA device +type VDPADevConfig struct { + vdpaDevID + Features uint64 + NegotiatedFeatures uint64 + Net VDPADevConfigNet +} + +// VDPADevVStats conatins vStats for the VDPA device +type VDPADevVStats struct { + vdpaDevID + QueueIndex uint32 + Vendor []VDPADevVStatsVendor + NegotiatedFeatures uint64 +} + +// VDPADevVStatsVendor conatins name and value for vendor specific vstat option +type VDPADevVStatsVendor struct { + Name string + Value uint64 +} + +// VDPADevConfigNet conatins status and net config for the VDPA device +type VDPADevConfigNet struct { + Status VDPADevConfigNetStatus + Cfg VDPADevConfigNetCfg +} + +// VDPADevConfigNetStatus contains info about net status +type VDPADevConfigNetStatus struct { + LinkUp bool + Announce bool +} + +// VDPADevConfigNetCfg contains net config for the VDPA device +type VDPADevConfigNetCfg struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 +} + +// VDPAMGMTDev conatins info about VDPA management device +type VDPAMGMTDev struct { + BusName string + DevName string + SupportedClasses uint64 + SupportedFeatures uint64 + MaxVQS uint32 +} + +// VDPANewDevParams contains parameters for new VDPA device +// use SetBits to configure requried features for the device +// example: +// +// VDPANewDevParams{Features: SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR)} +type VDPANewDevParams struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 + Features uint64 +} + +// SetBits set provided bits in the uint64 input value +// usage example: +// features := SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR) +func SetBits(input uint64, pos ...int) uint64 { + for _, p := range pos { + input |= 1 << uint64(p) + } + return input +} + +// IsBitSet check if specific bit is set in the uint64 input value +// usage example: +// hasNetClass := IsBitSet(mgmtDev, VIRTIO_ID_NET) +func IsBitSet(input uint64, pos int) bool { + val := input & (1 << uint64(pos)) + return val > 0 +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + return pkgHandle.VDPANewDev(name, mgmtBus, mgmtName, params) +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func VDPADelDev(name string) error { + return pkgHandle.VDPADelDev(name) +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func VDPAGetDevList() ([]*VDPADev, error) { + return pkgHandle.VDPAGetDevList() +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func VDPAGetDevByName(name string) (*VDPADev, error) { + return pkgHandle.VDPAGetDevByName(name) +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigList() +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigByName(name) +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + return pkgHandle.VDPAGetDevVStats(name, queueIndex) +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevList() +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevByBusAndName(bus, name) +} + +type vdpaNetlinkMessage []syscall.NetlinkRouteAttr + +func (id *vdpaDevID) parseIDAttribute(attr syscall.NetlinkRouteAttr) { + switch attr.Attr.Type { + case nl.VDPA_ATTR_DEV_NAME: + id.Name = nl.BytesToString(attr.Value) + case nl.VDPA_ATTR_DEV_ID: + id.ID = native.Uint32(attr.Value) + } +} + +func (netStatus *VDPADevConfigNetStatus) parseStatusAttribute(value []byte) { + a := native.Uint16(value) + netStatus.Announce = (a & VIRTIO_NET_S_ANNOUNCE) > 0 + netStatus.LinkUp = (a & VIRTIO_NET_S_LINK_UP) > 0 +} + +func (d *VDPADev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + d.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_VENDOR_ID: + d.VendorID = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQ_SIZE: + d.MaxVQSize = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_MIN_VQ_SIZE: + d.MinVQSize = native.Uint16(a.Value) + } + } +} + +func (c *VDPADevConfig) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + c.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_NET_CFG_MACADDR: + c.Net.Cfg.MACAddr = a.Value + case nl.VDPA_ATTR_DEV_NET_STATUS: + c.Net.Status.parseStatusAttribute(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP: + c.Net.Cfg.MaxVQP = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MTU: + c.Net.Cfg.MTU = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_FEATURES: + c.Features = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + c.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (s *VDPADevVStats) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + s.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_QUEUE_INDEX: + s.QueueIndex = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_NAME: + s.Vendor = append(s.Vendor, VDPADevVStatsVendor{Name: nl.BytesToString(a.Value)}) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_VALUE: + if len(s.Vendor) == 0 { + break + } + s.Vendor[len(s.Vendor)-1].Value = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + s.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (d *VDPAMGMTDev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + switch a.Attr.Type { + case nl.VDPA_ATTR_MGMTDEV_BUS_NAME: + d.BusName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_DEV_NAME: + d.DevName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES: + d.SupportedClasses = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_SUPPORTED_FEATURES: + d.SupportedFeatures = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_MGMTDEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + } + } +} + +func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) ([]vdpaNetlinkMessage, error) { + f, err := h.GenlFamilyGet(nl.VDPA_GENL_NAME) + if err != nil { + return nil, err + } + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_ACK|extraFlags) + req.AddData(&nl.Genlmsg{ + Command: command, + Version: nl.VDPA_GENL_VERSION, + }) + for _, a := range attrs { + req.AddData(a) + } + + resp, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + messages := make([]vdpaNetlinkMessage, 0, len(resp)) + for _, m := range resp { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + messages = append(messages, attrs) + } + return messages, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + devs := make([]*VDPADev, 0, len(messages)) + for _, m := range messages { + d := &VDPADev{} + d.parseAttributes(m) + devs = append(devs, d) + } + return devs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPADevConfig, 0, len(messages)) + for _, m := range messages { + cfg := &VDPADevConfig{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(*dev)), + ) + if bus != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(*bus)), + ) + } + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPAMGMTDev, 0, len(messages)) + for _, m := range messages { + cfg := &VDPAMGMTDev{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func (h *Handle) VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + attrs := []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(mgmtName)), + } + if mgmtBus != "" { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(mgmtBus))) + } + if len(params.MACAddr) != 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MACADDR, params.MACAddr)) + } + if params.MaxVQP > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP, nl.Uint16Attr(params.MaxVQP))) + } + if params.MTU > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MTU, nl.Uint16Attr(params.MTU))) + } + if params.Features > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_FEATURES, nl.Uint64Attr(params.Features))) + } + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_NEW, 0, attrs) + return err +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func (h *Handle) VDPADelDev(name string) error { + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_DEL, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name))}) + return err +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) { + return h.vdpaDevGet(nil) +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) { + devs, err := h.vdpaDevGet(&name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("device not found") + } + return devs[0], nil +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return h.vdpaDevConfigGet(nil) +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func (h *Handle) VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + cfgs, err := h.vdpaDevConfigGet(&name) + if err != nil { + return nil, err + } + if len(cfgs) == 0 { + return nil, fmt.Errorf("configuration not found") + } + return cfgs[0], nil +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_VSTATS_GET, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_DEV_QUEUE_INDEX, nl.Uint32Attr(queueIndex)), + }) + if err != nil { + return nil, err + } + if len(messages) == 0 { + return nil, fmt.Errorf("stats not found") + } + stats := &VDPADevVStats{} + stats.parseAttributes(messages[0]) + return stats, nil +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return h.vdpaMGMTDevGet(nil, nil) +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func (h *Handle) VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + var busPtr *string + if bus != "" { + busPtr = &bus + } + devs, err := h.vdpaMGMTDevGet(busPtr, &name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("mgmtdev not found") + } + return devs[0], nil +} diff --git a/vendor/github.com/vishvananda/netlink/virtio.go b/vendor/github.com/vishvananda/netlink/virtio.go new file mode 100644 index 000000000..78a497bbc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/virtio.go @@ -0,0 +1,132 @@ +package netlink + +// features for virtio net +const ( + VIRTIO_NET_F_CSUM = 0 // Host handles pkts w/ partial csum + VIRTIO_NET_F_GUEST_CSUM = 1 // Guest handles pkts w/ partial csum + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS = 2 // Dynamic offload configuration. + VIRTIO_NET_F_MTU = 3 // Initial MTU advice + VIRTIO_NET_F_MAC = 5 // Host has given MAC address. + VIRTIO_NET_F_GUEST_TSO4 = 7 // Guest can handle TSOv4 in. + VIRTIO_NET_F_GUEST_TSO6 = 8 // Guest can handle TSOv6 in. + VIRTIO_NET_F_GUEST_ECN = 9 // Guest can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_GUEST_UFO = 10 // Guest can handle UFO in. + VIRTIO_NET_F_HOST_TSO4 = 11 // Host can handle TSOv4 in. + VIRTIO_NET_F_HOST_TSO6 = 12 // Host can handle TSOv6 in. + VIRTIO_NET_F_HOST_ECN = 13 // Host can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_HOST_UFO = 14 // Host can handle UFO in. + VIRTIO_NET_F_MRG_RXBUF = 15 // Host can merge receive buffers. + VIRTIO_NET_F_STATUS = 16 // virtio_net_config.status available + VIRTIO_NET_F_CTRL_VQ = 17 // Control channel available + VIRTIO_NET_F_CTRL_RX = 18 // Control channel RX mode support + VIRTIO_NET_F_CTRL_VLAN = 19 // Control channel VLAN filtering + VIRTIO_NET_F_CTRL_RX_EXTRA = 20 // Extra RX mode control support + VIRTIO_NET_F_GUEST_ANNOUNCE = 21 // Guest can announce device on the* network + VIRTIO_NET_F_MQ = 22 // Device supports Receive Flow Steering + VIRTIO_NET_F_CTRL_MAC_ADDR = 23 // Set MAC address + VIRTIO_NET_F_VQ_NOTF_COAL = 52 // Device supports virtqueue notification coalescing + VIRTIO_NET_F_NOTF_COAL = 53 // Device supports notifications coalescing + VIRTIO_NET_F_GUEST_USO4 = 54 // Guest can handle USOv4 in. + VIRTIO_NET_F_GUEST_USO6 = 55 // Guest can handle USOv6 in. + VIRTIO_NET_F_HOST_USO = 56 // Host can handle USO in. + VIRTIO_NET_F_HASH_REPORT = 57 // Supports hash report + VIRTIO_NET_F_GUEST_HDRLEN = 59 // Guest provides the exact hdr_len value. + VIRTIO_NET_F_RSS = 60 // Supports RSS RX steering + VIRTIO_NET_F_RSC_EXT = 61 // extended coalescing info + VIRTIO_NET_F_STANDBY = 62 // Act as standby for another device with the same MAC. + VIRTIO_NET_F_SPEED_DUPLEX = 63 // Device set linkspeed and duplex + VIRTIO_NET_F_GSO = 6 // Host handles pkts any GSO type +) + +// virtio net status +const ( + VIRTIO_NET_S_LINK_UP = 1 // Link is up + VIRTIO_NET_S_ANNOUNCE = 2 // Announcement is needed +) + +// virtio config +const ( + // Do we get callbacks when the ring is completely used, even if we've + // suppressed them? + VIRTIO_F_NOTIFY_ON_EMPTY = 24 + // Can the device handle any descriptor layout? + VIRTIO_F_ANY_LAYOUT = 27 + // v1.0 compliant + VIRTIO_F_VERSION_1 = 32 + // If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + // If set - use platform DMA tools to access the memory. + // Note the reverse polarity (compared to most other features), + // this is for compatibility with legacy systems. + VIRTIO_F_ACCESS_PLATFORM = 33 + // Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) + VIRTIO_F_IOMMU_PLATFORM = VIRTIO_F_ACCESS_PLATFORM + // This feature indicates support for the packed virtqueue layout. + VIRTIO_F_RING_PACKED = 34 + // Inorder feature indicates that all buffers are used by the device + // in the same order in which they have been made available. + VIRTIO_F_IN_ORDER = 35 + // This feature indicates that memory accesses by the driver and the + // device are ordered in a way described by the platform. + VIRTIO_F_ORDER_PLATFORM = 36 + // Does the device support Single Root I/O Virtualization? + VIRTIO_F_SR_IOV = 37 + // This feature indicates that the driver passes extra data (besides + // identifying the virtqueue) in its device notifications. + VIRTIO_F_NOTIFICATION_DATA = 38 + // This feature indicates that the driver uses the data provided by the device + // as a virtqueue identifier in available buffer notifications. + VIRTIO_F_NOTIF_CONFIG_DATA = 39 + // This feature indicates that the driver can reset a queue individually. + VIRTIO_F_RING_RESET = 40 +) + +// virtio device ids +const ( + VIRTIO_ID_NET = 1 // virtio net + VIRTIO_ID_BLOCK = 2 // virtio block + VIRTIO_ID_CONSOLE = 3 // virtio console + VIRTIO_ID_RNG = 4 // virtio rng + VIRTIO_ID_BALLOON = 5 // virtio balloon + VIRTIO_ID_IOMEM = 6 // virtio ioMemory + VIRTIO_ID_RPMSG = 7 // virtio remote processor messaging + VIRTIO_ID_SCSI = 8 // virtio scsi + VIRTIO_ID_9P = 9 // 9p virtio console + VIRTIO_ID_MAC80211_WLAN = 10 // virtio WLAN MAC + VIRTIO_ID_RPROC_SERIAL = 11 // virtio remoteproc serial link + VIRTIO_ID_CAIF = 12 // Virtio caif + VIRTIO_ID_MEMORY_BALLOON = 13 // virtio memory balloon + VIRTIO_ID_GPU = 16 // virtio GPU + VIRTIO_ID_CLOCK = 17 // virtio clock/timer + VIRTIO_ID_INPUT = 18 // virtio input + VIRTIO_ID_VSOCK = 19 // virtio vsock transport + VIRTIO_ID_CRYPTO = 20 // virtio crypto + VIRTIO_ID_SIGNAL_DIST = 21 // virtio signal distribution device + VIRTIO_ID_PSTORE = 22 // virtio pstore device + VIRTIO_ID_IOMMU = 23 // virtio IOMMU + VIRTIO_ID_MEM = 24 // virtio mem + VIRTIO_ID_SOUND = 25 // virtio sound + VIRTIO_ID_FS = 26 // virtio filesystem + VIRTIO_ID_PMEM = 27 // virtio pmem + VIRTIO_ID_RPMB = 28 // virtio rpmb + VIRTIO_ID_MAC80211_HWSIM = 29 // virtio mac80211-hwsim + VIRTIO_ID_VIDEO_ENCODER = 30 // virtio video encoder + VIRTIO_ID_VIDEO_DECODER = 31 // virtio video decoder + VIRTIO_ID_SCMI = 32 // virtio SCMI + VIRTIO_ID_NITRO_SEC_MOD = 33 // virtio nitro secure module + VIRTIO_ID_I2C_ADAPTER = 34 // virtio i2c adapter + VIRTIO_ID_WATCHDOG = 35 // virtio watchdog + VIRTIO_ID_CAN = 36 // virtio can + VIRTIO_ID_DMABUF = 37 // virtio dmabuf + VIRTIO_ID_PARAM_SERV = 38 // virtio parameter server + VIRTIO_ID_AUDIO_POLICY = 39 // virtio audio policy + VIRTIO_ID_BT = 40 // virtio bluetooth + VIRTIO_ID_GPIO = 41 // virtio gpio + // Virtio Transitional IDs + VIRTIO_TRANS_ID_NET = 0x1000 // transitional virtio net + VIRTIO_TRANS_ID_BLOCK = 0x1001 // transitional virtio block + VIRTIO_TRANS_ID_BALLOON = 0x1002 // transitional virtio balloon + VIRTIO_TRANS_ID_CONSOLE = 0x1003 // transitional virtio console + VIRTIO_TRANS_ID_SCSI = 0x1004 // transitional virtio SCSI + VIRTIO_TRANS_ID_RNG = 0x1005 // transitional virtio rng + VIRTIO_TRANS_ID_9P = 0x1009 // transitional virtio 9p console +) diff --git a/vendor/github.com/vishvananda/netlink/xdp_diag.go b/vendor/github.com/vishvananda/netlink/xdp_diag.go new file mode 100644 index 000000000..e88825bf5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_diag.go @@ -0,0 +1,34 @@ +package netlink + +import "github.com/vishvananda/netlink/nl" + +const SOCK_ANY_COOKIE = uint64(nl.TCPDIAG_NOCOOKIE)<<32 + uint64(nl.TCPDIAG_NOCOOKIE) + +// XDP diagnosis show flag constants to request particular information elements. +const ( + XDP_SHOW_INFO = 1 << iota + XDP_SHOW_RING_CFG + XDP_SHOW_UMEM + XDP_SHOW_MEMINFO + XDP_SHOW_STATS +) + +// XDP diag element constants +const ( + XDP_DIAG_NONE = iota + XDP_DIAG_INFO // when using XDP_SHOW_INFO + XDP_DIAG_UID // when using XDP_SHOW_INFO + XDP_DIAG_RX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_TX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_UMEM // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_FILL_RING // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_COMPLETION_RING // when using XDP_SHOW_UMEM + XDP_DIAG_MEMINFO // when using XDP_SHOW_MEMINFO + XDP_DIAG_STATS // when using XDP_SHOW_STATS +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 +type XDPDiagInfoResp struct { + XDPDiagMsg *XDPSocket + XDPInfo *XDPInfo +} diff --git a/vendor/github.com/vishvananda/netlink/xdp_linux.go b/vendor/github.com/vishvananda/netlink/xdp_linux.go new file mode 100644 index 000000000..896a406de --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_linux.go @@ -0,0 +1,46 @@ +package netlink + +import ( + "bytes" + "fmt" +) + +const ( + xdrDiagUmemLen = 8 + 8*4 + xdrDiagStatsLen = 6 * 8 +) + +func (x *XDPDiagUmem) deserialize(b []byte) error { + if len(b) < xdrDiagUmemLen { + return fmt.Errorf("XDP umem diagnosis data short read (%d); want %d", len(b), xdrDiagUmemLen) + } + + rb := bytes.NewBuffer(b) + x.Size = native.Uint64(rb.Next(8)) + x.ID = native.Uint32(rb.Next(4)) + x.NumPages = native.Uint32(rb.Next(4)) + x.ChunkSize = native.Uint32(rb.Next(4)) + x.Headroom = native.Uint32(rb.Next(4)) + x.Ifindex = native.Uint32(rb.Next(4)) + x.QueueID = native.Uint32(rb.Next(4)) + x.Flags = native.Uint32(rb.Next(4)) + x.Refs = native.Uint32(rb.Next(4)) + + return nil +} + +func (x *XDPDiagStats) deserialize(b []byte) error { + if len(b) < xdrDiagStatsLen { + return fmt.Errorf("XDP diagnosis statistics short read (%d); want %d", len(b), xdrDiagStatsLen) + } + + rb := bytes.NewBuffer(b) + x.RxDropped = native.Uint64(rb.Next(8)) + x.RxInvalid = native.Uint64(rb.Next(8)) + x.RxFull = native.Uint64(rb.Next(8)) + x.FillRingEmpty = native.Uint64(rb.Next(8)) + x.TxInvalid = native.Uint64(rb.Next(8)) + x.TxRingEmpty = native.Uint64(rb.Next(8)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm_linux.go similarity index 95% rename from vendor/github.com/vishvananda/netlink/xfrm.go rename to vendor/github.com/vishvananda/netlink/xfrm_linux.go index 02b41842e..dd38ed8e0 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_linux.go @@ -14,7 +14,7 @@ const ( XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP XFRM_PROTO_AH Proto = unix.IPPROTO_AH XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS - XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin + XFRM_PROTO_COMP Proto = unix.IPPROTO_COMP XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW ) diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go deleted file mode 100644 index 6219d2772..000000000 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go +++ /dev/null @@ -1,96 +0,0 @@ -package netlink - -import ( - "fmt" - "net" -) - -// Dir is an enum representing an ipsec template direction. -type Dir uint8 - -const ( - XFRM_DIR_IN Dir = iota - XFRM_DIR_OUT - XFRM_DIR_FWD - XFRM_SOCKET_IN - XFRM_SOCKET_OUT - XFRM_SOCKET_FWD -) - -func (d Dir) String() string { - switch d { - case XFRM_DIR_IN: - return "dir in" - case XFRM_DIR_OUT: - return "dir out" - case XFRM_DIR_FWD: - return "dir fwd" - case XFRM_SOCKET_IN: - return "socket in" - case XFRM_SOCKET_OUT: - return "socket out" - case XFRM_SOCKET_FWD: - return "socket fwd" - } - return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) -} - -// PolicyAction is an enum representing an ipsec policy action. -type PolicyAction uint8 - -const ( - XFRM_POLICY_ALLOW PolicyAction = 0 - XFRM_POLICY_BLOCK PolicyAction = 1 -) - -func (a PolicyAction) String() string { - switch a { - case XFRM_POLICY_ALLOW: - return "allow" - case XFRM_POLICY_BLOCK: - return "block" - default: - return fmt.Sprintf("action %d", a) - } -} - -// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec -// policy. These rules are matched with XfrmState to determine encryption -// and authentication algorithms. -type XfrmPolicyTmpl struct { - Dst net.IP - Src net.IP - Proto Proto - Mode Mode - Spi int - Reqid int -} - -func (t XfrmPolicyTmpl) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", - t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) -} - -// XfrmPolicy represents an ipsec policy. It represents the overlay network -// and has a list of XfrmPolicyTmpls representing the base addresses of -// the policy. -type XfrmPolicy struct { - Dst *net.IPNet - Src *net.IPNet - Proto Proto - DstPort int - SrcPort int - Dir Dir - Priority int - Index int - Action PolicyAction - Ifindex int - Ifid int - Mark *XfrmMark - Tmpls []XfrmPolicyTmpl -} - -func (p XfrmPolicy) String() string { - return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", - p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) -} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index a4e132ef5..d526739ce 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,10 +1,104 @@ package netlink import ( + "fmt" + "net" + "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) +// Dir is an enum representing an ipsec template direction. +type Dir uint8 + +const ( + XFRM_DIR_IN Dir = iota + XFRM_DIR_OUT + XFRM_DIR_FWD + XFRM_SOCKET_IN + XFRM_SOCKET_OUT + XFRM_SOCKET_FWD +) + +func (d Dir) String() string { + switch d { + case XFRM_DIR_IN: + return "dir in" + case XFRM_DIR_OUT: + return "dir out" + case XFRM_DIR_FWD: + return "dir fwd" + case XFRM_SOCKET_IN: + return "socket in" + case XFRM_SOCKET_OUT: + return "socket out" + case XFRM_SOCKET_FWD: + return "socket fwd" + } + return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) +} + +// PolicyAction is an enum representing an ipsec policy action. +type PolicyAction uint8 + +const ( + XFRM_POLICY_ALLOW PolicyAction = 0 + XFRM_POLICY_BLOCK PolicyAction = 1 +) + +func (a PolicyAction) String() string { + switch a { + case XFRM_POLICY_ALLOW: + return "allow" + case XFRM_POLICY_BLOCK: + return "block" + default: + return fmt.Sprintf("action %d", a) + } +} + +// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec +// policy. These rules are matched with XfrmState to determine encryption +// and authentication algorithms. +type XfrmPolicyTmpl struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + Optional int +} + +func (t XfrmPolicyTmpl) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", + t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) +} + +// XfrmPolicy represents an ipsec policy. It represents the overlay network +// and has a list of XfrmPolicyTmpls representing the base addresses of +// the policy. +type XfrmPolicy struct { + Dst *net.IPNet + Src *net.IPNet + Proto Proto + DstPort int + SrcPort int + Dir Dir + Priority int + Index int + Action PolicyAction + Ifindex int + Ifid int + Mark *XfrmMark + Tmpls []XfrmPolicyTmpl +} + +func (p XfrmPolicy) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) +} + func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { sel.Family = uint16(nl.FAMILY_V4) if policy.Dst != nil { @@ -75,10 +169,12 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.Family = uint16(nl.GetIPFamily(tmpl.Dst)) userTmpl.XfrmId.Proto = uint8(tmpl.Proto) userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) userTmpl.Mode = uint8(tmpl.Mode) userTmpl.Reqid = uint32(tmpl.Reqid) + userTmpl.Optional = uint8(tmpl.Optional) userTmpl.Aalgos = ^uint32(0) userTmpl.Ealgos = ^uint32(0) userTmpl.Calgos = ^uint32(0) @@ -92,8 +188,10 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -188,8 +286,10 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWPOLICY if nlProto == nl.XFRM_MSG_DELPOLICY { @@ -218,8 +318,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { var policy XfrmPolicy - policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD) - policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS) + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, uint16(family)) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, uint16(family)) policy.Proto = Proto(msg.Sel.Proto) policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) @@ -247,6 +347,7 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { resTmpl.Mode = Mode(tmpl.Mode) resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi)) resTmpl.Reqid = int(tmpl.Reqid) + resTmpl.Optional = int(tmpl.Optional) policy.Tmpls = append(policy.Tmpls, resTmpl) } case nl.XFRMA_MARK: diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go deleted file mode 100644 index 483d8934a..000000000 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ /dev/null @@ -1,131 +0,0 @@ -package netlink - -import ( - "fmt" - "net" - "time" -) - -// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. -type XfrmStateAlgo struct { - Name string - Key []byte - TruncateLen int // Auth only - ICVLen int // AEAD only -} - -func (a XfrmStateAlgo) String() string { - base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) - if a.TruncateLen != 0 { - base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) - } - if a.ICVLen != 0 { - base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) - } - return fmt.Sprintf("%s}", base) -} - -// EncapType is an enum representing the optional packet encapsulation. -type EncapType uint8 - -const ( - XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 - XFRM_ENCAP_ESPINUDP -) - -func (e EncapType) String() string { - switch e { - case XFRM_ENCAP_ESPINUDP_NONIKE: - return "espinudp-non-ike" - case XFRM_ENCAP_ESPINUDP: - return "espinudp" - } - return "unknown" -} - -// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. -type XfrmStateEncap struct { - Type EncapType - SrcPort int - DstPort int - OriginalAddress net.IP -} - -func (e XfrmStateEncap) String() string { - return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", - e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) -} - -// XfrmStateLimits represents the configured limits for the state. -type XfrmStateLimits struct { - ByteSoft uint64 - ByteHard uint64 - PacketSoft uint64 - PacketHard uint64 - TimeSoft uint64 - TimeHard uint64 - TimeUseSoft uint64 - TimeUseHard uint64 -} - -// XfrmStateStats represents the current number of bytes/packets -// processed by this State, the State's installation and first use -// time and the replay window counters. -type XfrmStateStats struct { - ReplayWindow uint32 - Replay uint32 - Failed uint32 - Bytes uint64 - Packets uint64 - AddTime uint64 - UseTime uint64 -} - -// XfrmState represents the state of an ipsec policy. It optionally -// contains an XfrmStateAlgo for encryption and one for authentication. -type XfrmState struct { - Dst net.IP - Src net.IP - Proto Proto - Mode Mode - Spi int - Reqid int - ReplayWindow int - Limits XfrmStateLimits - Statistics XfrmStateStats - Mark *XfrmMark - OutputMark int - Ifid int - Auth *XfrmStateAlgo - Crypt *XfrmStateAlgo - Aead *XfrmStateAlgo - Encap *XfrmStateEncap - ESN bool -} - -func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %d, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) -} -func (sa XfrmState) Print(stats bool) string { - if !stats { - return sa.String() - } - at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) - ut := "-" - if sa.Statistics.UseTime > 0 { - ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) - } - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ - "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", - sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, - sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) -} - -func printLimit(lmt uint64) string { - if lmt == ^uint64(0) { - return "(INF)" - } - return fmt.Sprintf("%d", lmt) -} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 66c99423c..554f2498c 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,12 +2,154 @@ package netlink import ( "fmt" + "net" + "time" "unsafe" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) +// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. +type XfrmStateAlgo struct { + Name string + Key []byte + TruncateLen int // Auth only + ICVLen int // AEAD only +} + +func (a XfrmStateAlgo) String() string { + base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) + if a.TruncateLen != 0 { + base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) + } + if a.ICVLen != 0 { + base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) + } + return fmt.Sprintf("%s}", base) +} + +// EncapType is an enum representing the optional packet encapsulation. +type EncapType uint8 + +const ( + XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 + XFRM_ENCAP_ESPINUDP +) + +func (e EncapType) String() string { + switch e { + case XFRM_ENCAP_ESPINUDP_NONIKE: + return "espinudp-non-ike" + case XFRM_ENCAP_ESPINUDP: + return "espinudp" + } + return "unknown" +} + +// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. +type XfrmStateEncap struct { + Type EncapType + SrcPort int + DstPort int + OriginalAddress net.IP +} + +func (e XfrmStateEncap) String() string { + return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", + e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) +} + +// XfrmStateLimits represents the configured limits for the state. +type XfrmStateLimits struct { + ByteSoft uint64 + ByteHard uint64 + PacketSoft uint64 + PacketHard uint64 + TimeSoft uint64 + TimeHard uint64 + TimeUseSoft uint64 + TimeUseHard uint64 +} + +// XfrmStateStats represents the current number of bytes/packets +// processed by this State, the State's installation and first use +// time and the replay window counters. +type XfrmStateStats struct { + ReplayWindow uint32 + Replay uint32 + Failed uint32 + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +// XfrmReplayState represents the sequence number states for +// "legacy" anti-replay mode. +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func (r XfrmReplayState) String() string { + return fmt.Sprintf("{OSeq: 0x%x, Seq: 0x%x, BitMap: 0x%x}", + r.OSeq, r.Seq, r.BitMap) +} + +// XfrmState represents the state of an ipsec policy. It optionally +// contains an XfrmStateAlgo for encryption and one for authentication. +type XfrmState struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + ReplayWindow int + Limits XfrmStateLimits + Statistics XfrmStateStats + Mark *XfrmMark + OutputMark *XfrmMark + Ifid int + Auth *XfrmStateAlgo + Crypt *XfrmStateAlgo + Aead *XfrmStateAlgo + Encap *XfrmStateEncap + ESN bool + DontEncapDSCP bool + OSeqMayWrap bool + Replay *XfrmReplayState + Selector *XfrmPolicy +} + +func (sa XfrmState) String() string { + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) +} +func (sa XfrmState) Print(stats bool) string { + if !stats { + return sa.String() + } + at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) + ut := "-" + if sa.Statistics.UseTime > 0 { + ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) + } + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ + "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", + sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, + sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) +} + +func printLimit(lmt uint64) string { + if lmt == ^uint64(0) { + return "(INF)" + } + return fmt.Sprintf("%d", lmt) +} func writeStateAlgo(a *XfrmStateAlgo) []byte { algo := nl.XfrmAlgo{ AlgKeyLen: uint32(len(a.Key) * 8), @@ -77,6 +219,14 @@ func writeReplayEsn(replayWindow int) []byte { return replayEsn.Serialize() } +func writeReplay(r *XfrmReplayState) []byte { + return (&nl.XfrmReplayState{ + OSeq: r.OSeq, + Seq: r.Seq, + BitMap: r.BitMap, + }).Serialize() +} + // XfrmStateAdd will add an xfrm state to the system. // Equivalent to: `ip xfrm state add $state` func XfrmStateAdd(state *XfrmState) error { @@ -111,7 +261,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { // A state with spi 0 can't be deleted so don't allow it to be set if state.Spi == 0 { - return fmt.Errorf("Spi must be set when adding xfrm state.") + return fmt.Errorf("Spi must be set when adding xfrm state") } req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) @@ -158,13 +308,34 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) req.AddData(out) } - if state.OutputMark != 0 { - out := nl.NewRtAttr(nl.XFRMA_OUTPUT_MARK, nl.Uint32Attr(uint32(state.OutputMark))) + if state.OutputMark != nil { + out := nl.NewRtAttr(nl.XFRMA_SET_MARK, nl.Uint32Attr(state.OutputMark.Value)) + req.AddData(out) + if state.OutputMark.Mask != 0 { + out = nl.NewRtAttr(nl.XFRMA_SET_MARK_MASK, nl.Uint32Attr(state.OutputMark.Mask)) + req.AddData(out) + } + } + if state.OSeqMayWrap || state.DontEncapDSCP { + var flags uint32 + if state.DontEncapDSCP { + flags |= nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP + } + if state.OSeqMayWrap { + flags |= nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP + } + out := nl.NewRtAttr(nl.XFRMA_SA_EXTRA_FLAGS, nl.Uint32Attr(flags)) + req.AddData(out) + } + if state.Replay != nil { + out := nl.NewRtAttr(nl.XFRMA_REPLAY_VAL, writeReplay(state.Replay)) req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -180,7 +351,6 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { msg.Min = 0x100 msg.Max = 0xffffffff req.AddData(msg) - if state.Mark != nil { out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) req.AddData(out) @@ -277,8 +447,10 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { @@ -306,7 +478,6 @@ var familyError = fmt.Errorf("family error") func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { var state XfrmState - state.Dst = msg.Id.Daddr.ToIP() state.Src = msg.Saddr.ToIP() state.Proto = Proto(msg.Id.Proto) @@ -316,20 +487,25 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) + state.Selector = &XfrmPolicy{ + Dst: msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, msg.Sel.Family), + Src: msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, msg.Sel.Family), + Proto: Proto(msg.Sel.Proto), + DstPort: int(nl.Swap16(msg.Sel.Dport)), + SrcPort: int(nl.Swap16(msg.Sel.Sport)), + Ifindex: int(msg.Sel.Ifindex), + } return &state } func parseXfrmState(m []byte, family int) (*XfrmState, error) { msg := nl.DeserializeXfrmUsersaInfo(m) - // This is mainly for the state dump if family != FAMILY_ALL && family != int(msg.Family) { return nil, familyError } - state := xfrmStateFromXfrmUsersaInfo(msg) - attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:]) if err != nil { return nil, err @@ -377,10 +553,37 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { state.Mark = new(XfrmMark) state.Mark.Value = mark.Value state.Mark.Mask = mark.Mask - case nl.XFRMA_OUTPUT_MARK: - state.OutputMark = int(native.Uint32(attr.Value)) + case nl.XFRMA_SA_EXTRA_FLAGS: + flags := native.Uint32(attr.Value) + if (flags & nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP) != 0 { + state.DontEncapDSCP = true + } + if (flags & nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP) != 0 { + state.OSeqMayWrap = true + } + case nl.XFRMA_SET_MARK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Value = native.Uint32(attr.Value) + case nl.XFRMA_SET_MARK_MASK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Mask = native.Uint32(attr.Value) + if state.OutputMark.Mask == 0xffffffff { + state.OutputMark.Mask = 0 + } case nl.XFRMA_IF_ID: state.Ifid = int(native.Uint32(attr.Value)) + case nl.XFRMA_REPLAY_VAL: + if state.Replay == nil { + state.Replay = new(XfrmReplayState) + } + replay := nl.DeserializeXfrmReplayState(attr.Value[:]) + state.Replay.OSeq = replay.OSeq + state.Replay.Seq = replay.Seq + state.Replay.BitMap = replay.BitMap } } @@ -457,6 +660,9 @@ func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg.Id.Spi = nl.Swap32(uint32(state.Spi)) msg.Reqid = uint32(state.Reqid) msg.ReplayWindow = uint8(state.ReplayWindow) - + msg.Sel = nl.XfrmSelector{} + if state.Selector != nil { + selFromPolicy(&msg.Sel, state.Selector) + } return msg } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go new file mode 100644 index 000000000..12fdd26d7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go @@ -0,0 +1,7 @@ +//go:build !linux +// +build !linux + +package netlink + +type XfrmPolicy struct{} +type XfrmState struct{} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 5e8158bba..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check - return append(s[:i], s[j:]...) + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go index 74f88738c..bd635cb81 100644 --- a/vendor/golang.org/x/exp/slog/handler.go +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "reflect" "strconv" "sync" "time" @@ -504,6 +505,23 @@ func (s *handleState) appendString(str string) { } func (s *handleState) appendValue(v Value) { + defer func() { + if r := recover(); r != nil { + // If it panics with a nil pointer, the most likely cases are + // an encoding.TextMarshaler or error fails to guard against nil, + // in which case "" seems to be the feasible choice. + // + // Adapted from the code in fmt/print.go. + if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { + s.appendString("") + return + } + + // Otherwise just print the original panic message. + s.appendString(fmt.Sprintf("!PANIC: %v", r)) + } + }() + var err error if s.h.json { err = appendJSONValue(s, v) diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index ec07aab05..02609d5b2 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -201,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e7335..7d902b684 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 000000000..cb4a0c572 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c00..aca3199c9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09eb..e14b766a3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200ad..099867dee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8..a6a2d2fc2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac177..d73c4652e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a1..4a55a4005 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3..1ec2b1407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533ef..24b346e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646ba..ebd213100 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f69..824b9c2d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75d..4f178a229 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3..d003c3d43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74..0d45a941a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3..51e13eb05 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee7..d002d8ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8..3f863d898 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acf..61c729310 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c790..b5d17414f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b90..9f2550dc3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc0414..ad05b51a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd17..5cee9a314 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d57..7b97a154c 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3e..4c2e1bdc0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/gopkg.in/gcfg.v1/LICENSE b/vendor/gopkg.in/gcfg.v1/LICENSE new file mode 100644 index 000000000..87a5cede3 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/gcfg.v1/README b/vendor/gopkg.in/gcfg.v1/README new file mode 100644 index 000000000..1ff233a52 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/README @@ -0,0 +1,4 @@ +Gcfg reads INI-style configuration files into Go structs; +supports user-defined types and subsections. + +Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go new file mode 100644 index 000000000..32f3e9d69 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/doc.go @@ -0,0 +1,145 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// It is possible to provide default values for subsections in the section +// "default-" (or by setting values in the corresponding struct +// field "Default_"). +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "gopkg.in/gcfg.v1" diff --git a/vendor/gopkg.in/gcfg.v1/errors.go b/vendor/gopkg.in/gcfg.v1/errors.go new file mode 100644 index 000000000..83a591dac --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/errors.go @@ -0,0 +1,57 @@ +package gcfg + +import warnings "gopkg.in/warnings.v0" + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type loc struct { + section string + subsection *string + variable *string +} + +type extraData struct { + loc +} + +type locErr struct { + msg string + loc +} + +func (l loc) String() string { + s := "section \"" + l.section + "\"" + if l.subsection != nil { + s += ", subsection \"" + *l.subsection + "\"" + } + if l.variable != nil { + s += ", variable \"" + *l.variable + "\"" + } + return s +} + +func (e extraData) Error() string { + return "can't store data at " + e.loc.String() +} + +func (e locErr) Error() string { + return e.msg + " at " + e.loc.String() +} + +var _ error = extraData{} +var _ error = locErr{} diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go new file mode 100644 index 000000000..06796653c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/read.go @@ -0,0 +1,257 @@ +package gcfg + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "gopkg.in/gcfg.v1/scanner" + "gopkg.in/gcfg.v1/token" + "gopkg.in/warnings.v0" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} +var utf8Bom = []byte("\ufeff") + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet, + file *token.File, src []byte, subsectPass bool) error { + // + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.IDENT { + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + } + if tok != token.RBRACK { + if sectsub == "" { + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := c.Collect(set(c, config, sect, sectsub, "", true, "", subsectPass)) + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.STRING { + if err := c.Collect(errfn("expected value")); err != nil { + return err + } + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + } + err := set(c, config, sect, sectsub, n, blank, v, subsectPass) + if err != nil { + return err + } + default: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err + } + } + } +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + err := readIntoPass(c, config, fset, file, src, false) + if err != nil { + return err + } + err = readIntoPass(c, config, fset, file, src, true) + if err != nil { + return err + } + return c.Done() +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +// +// For compatibility with files created on Windows, the ReadFileInto skips a +// single leading UTF8 BOM sequence if it exists. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + + // Skips a single leading UTF8 BOM sequence if it exists. + src = skipLeadingUtf8Bom(src) + + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +func skipLeadingUtf8Bom(src []byte) []byte { + lengthUtf8Bom := len(utf8Bom) + + if len(src) >= lengthUtf8Bom { + if bytes.Equal(src[:lengthUtf8Bom], utf8Bom) { + return src[lengthUtf8Bom:] + } + } + return src +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/errors.go b/vendor/gopkg.in/gcfg.v1/scanner/errors.go new file mode 100644 index 000000000..1a3c0f656 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go new file mode 100644 index 000000000..6d0eee916 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' && s.ch != '"' { + s.error(offs, "unquoted '\\' must be followed by new line or double quote") + break loop + } + s.next() + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go new file mode 100644 index 000000000..73aee5003 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/set.go @@ -0,0 +1,329 @@ +package gcfg + +import ( + "bytes" + "encoding" + "encoding/gob" + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "gopkg.in/gcfg.v1/types" + "gopkg.in/warnings.v0" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(encoding.TextUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + blank bool, value string, subsectPass bool) error { + // + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + l := loc{section: sect} + if !vSect.IsValid() { + err := extraData{loc: l} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil + } + if isSubsect { + l.subsection = &sub + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + var err error + if pv, err = newValue(c, sect, vCfg, vType); err != nil { + return err + } + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + return c.Collect(extraData{loc: l}) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + l.variable = &name + if !vVar.IsValid() { + return c.Collect(extraData{loc: l}) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blank { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blank, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return locErr{msg: err.Error(), loc: l} + } + } + if !ok { + // in case all setters returned errUnsupportedType + return locErr{msg: err.Error(), loc: l} + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/gopkg.in/gcfg.v1/token/position.go b/vendor/gopkg.in/gcfg.v1/token/position.go new file mode 100644 index 000000000..fc45c1e76 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/gopkg.in/gcfg.v1/token/serialize.go b/vendor/gopkg.in/gcfg.v1/token/serialize.go new file mode 100644 index 000000000..4adc8f9e3 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/gopkg.in/gcfg.v1/token/token.go b/vendor/gopkg.in/gcfg.v1/token/token.go new file mode 100644 index 000000000..b3c7c83fa --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/gopkg.in/gcfg.v1/types/bool.go b/vendor/gopkg.in/gcfg.v1/types/bool.go new file mode 100644 index 000000000..8dcae0d8c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/doc.go b/vendor/gopkg.in/gcfg.v1/types/doc.go new file mode 100644 index 000000000..9f9c345f6 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/gopkg.in/gcfg.v1/types/enum.go b/vendor/gopkg.in/gcfg.v1/types/enum.go new file mode 100644 index 000000000..1a0c7ef45 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/int.go b/vendor/gopkg.in/gcfg.v1/types/int.go new file mode 100644 index 000000000..af7e75c12 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/gopkg.in/gcfg.v1/types/scan.go b/vendor/gopkg.in/gcfg.v1/types/scan.go new file mode 100644 index 000000000..db2f6ed3c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml new file mode 100644 index 000000000..21166f5c7 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - tip + - 1.15.x + - 1.14.x + - 1.13.x + - 1.12.x + +env: + - GO111MODULE=on diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 000000000..c3d4cc307 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 000000000..060eae52a --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,179 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // disabled by default +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 000000000..11d066972 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 000000000..465f56927 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// osChown is a var so we can mock it out during tests. +var osChown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return osChown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 000000000..3447cdc05 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,541 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + + size int64 + file *os.File + mu sync.Mutex + + millCh chan bool + startMill sync.Once +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + osStat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// post-rotation processing and removal. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + if err := l.openNew(); err != nil { + return err + } + l.mill() + return nil +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0755) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0600) + info, err := osStat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + + filename := l.filename() + info, err := osStat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// filename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var compress, remove []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + cutoff := currentTime().Add(-1 * diff) + + var remaining []logInfo + for _, f := range files { + if f.timestamp.Before(cutoff) { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } + } + + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } + + return err +} + +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for range l.millCh { + // what am I going to do, log this? + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix) { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := osStat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE new file mode 100644 index 000000000..d65f7e9d8 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Péter Surányi. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README new file mode 100644 index 000000000..974212ba1 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/README @@ -0,0 +1,77 @@ +Package warnings implements error handling with non-fatal errors (warnings). + +import path: "gopkg.in/warnings.v0" +package docs: https://godoc.org/gopkg.in/warnings.v0 +issues: https://github.com/go-warnings/warnings/issues +pull requests: https://github.com/go-warnings/warnings/pulls + +A recurring pattern in Go programming is the following: + + func myfunc(params) error { + if err := doSomething(...); err != nil { + return err + } + if err := doSomethingElse(...); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + return errors.New("my error") + } + ... + return nil + } + +This pattern allows interrupting the flow on any received error. But what if +there are errors that should be noted but still not fatal, for which the flow +should not be interrupted? Implementing such logic at each if statement would +make the code complex and the flow much harder to follow. + +Package warnings provides the Collector type and a clean and simple pattern +for achieving such logic. The Collector takes care of deciding when to break +the flow and when to continue, collecting any non-fatal errors (warnings) +along the way. The only requirement is that fatal and non-fatal errors can be +distinguished programmatically; that is a function such as + + IsFatal(error) bool + +must be implemented. The following is an example of what the above snippet +could look like using the warnings package: + + import "gopkg.in/warnings.v0" + + func isFatal(err error) bool { + _, ok := err.(WarningType) + return !ok + } + + func myfunc(params) error { + c := warnings.NewCollector(isFatal) + c.FatalWithWarnings = true + if err := c.Collect(doSomething()); err != nil { + return err + } + if err := c.Collect(doSomethingElse(...)); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + if err := c.Collect(errors.New("my error")); err != nil { + return err + } + } + ... + return c.Done() + } + +For an example of a non-trivial code base using this library, see +gopkg.in/gcfg.v1 + +Rules for using warnings + + - ensure that warnings are programmatically distinguishable from fatal + errors (i.e. implement an isFatal function and any necessary error types) + - ensure that there is a single Collector instance for a call of each + exported function + - ensure that all errors (fatal or warning) are fed through Collect + - ensure that every time an error is returned, it is one returned by a + Collector (from Collect or Done) + - ensure that Collect is never called after Done diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go new file mode 100644 index 000000000..b849d1e3d --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/warnings.go @@ -0,0 +1,194 @@ +// Package warnings implements error handling with non-fatal errors (warnings). +// +// A recurring pattern in Go programming is the following: +// +// func myfunc(params) error { +// if err := doSomething(...); err != nil { +// return err +// } +// if err := doSomethingElse(...); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// return errors.New("my error") +// } +// ... +// return nil +// } +// +// This pattern allows interrupting the flow on any received error. But what if +// there are errors that should be noted but still not fatal, for which the flow +// should not be interrupted? Implementing such logic at each if statement would +// make the code complex and the flow much harder to follow. +// +// Package warnings provides the Collector type and a clean and simple pattern +// for achieving such logic. The Collector takes care of deciding when to break +// the flow and when to continue, collecting any non-fatal errors (warnings) +// along the way. The only requirement is that fatal and non-fatal errors can be +// distinguished programmatically; that is a function such as +// +// IsFatal(error) bool +// +// must be implemented. The following is an example of what the above snippet +// could look like using the warnings package: +// +// import "gopkg.in/warnings.v0" +// +// func isFatal(err error) bool { +// _, ok := err.(WarningType) +// return !ok +// } +// +// func myfunc(params) error { +// c := warnings.NewCollector(isFatal) +// c.FatalWithWarnings = true +// if err := c.Collect(doSomething()); err != nil { +// return err +// } +// if err := c.Collect(doSomethingElse(...)); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// if err := c.Collect(errors.New("my error")); err != nil { +// return err +// } +// } +// ... +// return c.Done() +// } +// +// For an example of a non-trivial code base using this library, see +// gopkg.in/gcfg.v1 +// +// Rules for using warnings +// +// - ensure that warnings are programmatically distinguishable from fatal +// errors (i.e. implement an isFatal function and any necessary error types) +// - ensure that there is a single Collector instance for a call of each +// exported function +// - ensure that all errors (fatal or warning) are fed through Collect +// - ensure that every time an error is returned, it is one returned by a +// Collector (from Collect or Done) +// - ensure that Collect is never called after Done +// +// TODO +// +// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?) +// - consider interaction with contexts +// - go vet-style invocations verifier +// - semi-automatic code converter +// +package warnings // import "gopkg.in/warnings.v0" + +import ( + "bytes" + "fmt" +) + +// List holds a collection of warnings and optionally one fatal error. +type List struct { + Warnings []error + Fatal error +} + +// Error implements the error interface. +func (l List) Error() string { + b := bytes.NewBuffer(nil) + if l.Fatal != nil { + fmt.Fprintln(b, "fatal:") + fmt.Fprintln(b, l.Fatal) + } + switch len(l.Warnings) { + case 0: + // nop + case 1: + fmt.Fprintln(b, "warning:") + default: + fmt.Fprintln(b, "warnings:") + } + for _, err := range l.Warnings { + fmt.Fprintln(b, err) + } + return b.String() +} + +// A Collector collects errors up to the first fatal error. +type Collector struct { + // IsFatal distinguishes between warnings and fatal errors. + IsFatal func(error) bool + // FatalWithWarnings set to true means that a fatal error is returned as + // a List together with all warnings so far. The default behavior is to + // only return the fatal error and discard any warnings that have been + // collected. + FatalWithWarnings bool + + l List + done bool +} + +// NewCollector returns a new Collector; it uses isFatal to distinguish between +// warnings and fatal errors. +func NewCollector(isFatal func(error) bool) *Collector { + return &Collector{IsFatal: isFatal} +} + +// Collect collects a single error (warning or fatal). It returns nil if +// collection can continue (only warnings so far), or otherwise the errors +// collected. Collect mustn't be called after the first fatal error or after +// Done has been called. +func (c *Collector) Collect(err error) error { + if c.done { + panic("warnings.Collector already done") + } + if err == nil { + return nil + } + if c.IsFatal(err) { + c.done = true + c.l.Fatal = err + } else { + c.l.Warnings = append(c.l.Warnings, err) + } + if c.l.Fatal != nil { + return c.erorr() + } + return nil +} + +// Done ends collection and returns the collected error(s). +func (c *Collector) Done() error { + c.done = true + return c.erorr() +} + +func (c *Collector) erorr() error { + if !c.FatalWithWarnings && c.l.Fatal != nil { + return c.l.Fatal + } + if c.l.Fatal == nil && len(c.l.Warnings) == 0 { + return nil + } + // Note that a single warning is also returned as a List. This is to make it + // easier to determine fatal-ness of the returned error. + return c.l +} + +// FatalOnly returns the fatal error, if any, **in an error returned by a +// Collector**. It returns nil if and only if err is nil or err is a List +// with err.Fatal == nil. +func FatalOnly(err error) error { + l, ok := err.(List) + if !ok { + return err + } + return l.Fatal +} + +// WarningsOnly returns the warnings **in an error returned by a Collector**. +func WarningsOnly(err error) []error { + l, ok := err.(List) + if !ok { + return nil + } + return l.Warnings +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 68eca0624..4ac01cc6f 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -1102,138 +1102,138 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2095 bytes of a gzipped FileDescriptorProto + // 2085 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, - 0xdd, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, - 0x74, 0xb7, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, - 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xc9, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, - 0xcf, 0x9d, 0xd9, 0xcc, 0x46, 0x91, 0x45, 0x6f, 0x9e, 0xdf, 0xf7, 0xfb, 0x7d, 0xbf, 0xb7, 0x86, - 0x8b, 0x87, 0x97, 0xbd, 0x86, 0x61, 0x37, 0x89, 0x63, 0x34, 0x5d, 0xea, 0xd9, 0xbe, 0xab, 0xd3, - 0xe6, 0xd1, 0x25, 0x62, 0x3a, 0x07, 0xe4, 0xcd, 0xe6, 0x3e, 0xb5, 0xa8, 0x4b, 0x18, 0x6d, 0x37, - 0x1c, 0xd7, 0x66, 0x36, 0x7a, 0x39, 0xa0, 0x6e, 0x10, 0xc7, 0x68, 0x84, 0xd4, 0x8d, 0x90, 0x7a, - 0xe5, 0xf5, 0x7d, 0x83, 0x1d, 0xf8, 0xbb, 0x0d, 0xdd, 0xee, 0x34, 0xf7, 0xed, 0x7d, 0xbb, 0x29, - 0x98, 0x76, 0xfd, 0x3d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x81, 0xb0, 0x15, 0x35, 0xa1, 0x5a, 0xb7, - 0x5d, 0xae, 0x36, 0xab, 0x70, 0xe5, 0xad, 0x98, 0xa6, 0x43, 0xf4, 0x03, 0xc3, 0xa2, 0xee, 0x71, - 0xd3, 0x39, 0xdc, 0x4f, 0xdb, 0x3b, 0x0c, 0x97, 0xd7, 0xec, 0x50, 0x46, 0xf2, 0x74, 0x35, 0x8b, - 0xb8, 0x5c, 0xdf, 0x62, 0x46, 0xa7, 0x5f, 0xcd, 0xdb, 0xcf, 0x62, 0xf0, 0xf4, 0x03, 0xda, 0x21, - 0x59, 0x3e, 0xf5, 0xbf, 0x0a, 0x2c, 0xac, 0x9b, 0xa6, 0xad, 0x13, 0x66, 0xd8, 0x16, 0xa6, 0x9e, - 0x6f, 0x32, 0xf4, 0x23, 0x98, 0x68, 0xd3, 0x23, 0x43, 0xa7, 0xde, 0xb2, 0x72, 0x4e, 0x59, 0x9d, - 0x5e, 0x7b, 0xab, 0x31, 0xc8, 0xd9, 0x8d, 0x4d, 0x41, 0x9c, 0x15, 0xa3, 0xcd, 0x3f, 0xec, 0xd6, - 0x47, 0x7a, 0xdd, 0xfa, 0x44, 0x80, 0xf7, 0x70, 0x28, 0x15, 0xdd, 0x81, 0x19, 0xcb, 0x6e, 0xd3, - 0x16, 0x35, 0xa9, 0xce, 0x6c, 0x77, 0xb9, 0x2a, 0xb4, 0x9c, 0x4b, 0x6a, 0xe1, 0x51, 0x68, 0x1c, - 0x5d, 0x6a, 0xdc, 0x48, 0xd0, 0x69, 0x0b, 0xbd, 0x6e, 0x7d, 0x26, 0x09, 0xc1, 0x29, 0x39, 0x68, - 0x0d, 0x40, 0xb7, 0x2d, 0xe6, 0xda, 0xa6, 0x49, 0xdd, 0xe5, 0xd1, 0x73, 0xca, 0xea, 0x94, 0x86, - 0xa4, 0x15, 0xb0, 0x11, 0x61, 0x70, 0x82, 0x4a, 0x7d, 0x5c, 0x85, 0x69, 0x8d, 0x78, 0x86, 0x1e, - 0x58, 0x89, 0x7e, 0x06, 0x40, 0x18, 0x73, 0x8d, 0x5d, 0x9f, 0x89, 0xf3, 0x57, 0x57, 0xa7, 0xd7, - 0xbe, 0x35, 0xf8, 0xfc, 0x09, 0xf6, 0xc6, 0x7a, 0xc4, 0xbb, 0x65, 0x31, 0xf7, 0x58, 0x7b, 0x25, - 0x54, 0x1f, 0x23, 0x7e, 0xfe, 0xaf, 0xfa, 0xec, 0x2d, 0x9f, 0x98, 0xc6, 0x9e, 0x41, 0xdb, 0x37, - 0x48, 0x87, 0xe2, 0x84, 0x46, 0x74, 0x04, 0x93, 0x3a, 0x71, 0x88, 0x6e, 0xb0, 0xe3, 0xe5, 0x8a, - 0xd0, 0xfe, 0x4e, 0x79, 0xed, 0x1b, 0x92, 0x33, 0xd0, 0x7d, 0x5e, 0xea, 0x9e, 0x0c, 0xc1, 0xfd, - 0x9a, 0x23, 0x5d, 0x2b, 0x26, 0xcc, 0x67, 0x6c, 0x47, 0x0b, 0x50, 0x3d, 0xa4, 0xc7, 0x22, 0x07, - 0xa6, 0x30, 0xff, 0x13, 0x6d, 0xc0, 0xd8, 0x11, 0x31, 0x7d, 0xba, 0x5c, 0x11, 0x11, 0x7b, 0xbd, - 0x54, 0x5e, 0x84, 0x52, 0x71, 0xc0, 0xfb, 0x6e, 0xe5, 0xb2, 0xb2, 0x72, 0x08, 0xb3, 0x29, 0x5b, - 0x73, 0x74, 0x6d, 0xa6, 0x75, 0x35, 0x12, 0xba, 0xa2, 0x14, 0x6f, 0x38, 0x87, 0xfb, 0x69, 0xe5, - 0xb7, 0x7c, 0x62, 0x31, 0x83, 0x1d, 0x27, 0x94, 0xa9, 0x57, 0x60, 0x71, 0x63, 0xeb, 0x5a, 0x60, - 0x4d, 0x32, 0x57, 0xe8, 0x3d, 0xc7, 0xa5, 0x9e, 0x67, 0xd8, 0x56, 0xa0, 0x37, 0xce, 0x95, 0xad, - 0x08, 0x83, 0x13, 0x54, 0xea, 0x11, 0x8c, 0xcb, 0x2c, 0x39, 0x07, 0xa3, 0x16, 0xe9, 0x50, 0xc9, - 0x37, 0x23, 0xf9, 0x46, 0x85, 0x4f, 0x05, 0x06, 0x5d, 0x85, 0xb1, 0x5d, 0x1e, 0x19, 0x69, 0xfe, - 0x85, 0xd2, 0x41, 0xd4, 0xa6, 0x7a, 0xdd, 0xfa, 0x98, 0x00, 0xe0, 0x40, 0x84, 0xfa, 0xa0, 0x02, - 0x67, 0xb3, 0x45, 0xb6, 0x61, 0x5b, 0x7b, 0xc6, 0xbe, 0xef, 0x8a, 0x0f, 0xf4, 0x5d, 0x18, 0x0f, - 0x44, 0x4a, 0x8b, 0x56, 0xa5, 0x45, 0xe3, 0x2d, 0x01, 0x7d, 0xda, 0xad, 0x9f, 0xc9, 0xb2, 0x06, - 0x18, 0x2c, 0xf9, 0xd0, 0x2a, 0x4c, 0xba, 0xf4, 0x53, 0x9f, 0x7a, 0xcc, 0x13, 0x79, 0x37, 0xa5, - 0xcd, 0xf0, 0xd4, 0xc1, 0x12, 0x86, 0x23, 0x2c, 0xba, 0xaf, 0xc0, 0x52, 0x50, 0xc9, 0x29, 0x1b, - 0x64, 0x15, 0x5f, 0x2a, 0x93, 0x13, 0x29, 0x46, 0xed, 0xab, 0xd2, 0xd8, 0xa5, 0x1c, 0x24, 0xce, - 0x53, 0xa5, 0xfe, 0x47, 0x81, 0x33, 0xf9, 0x5d, 0x07, 0xed, 0xc1, 0x84, 0x2b, 0xfe, 0x0a, 0x8b, - 0xf7, 0xbd, 0x32, 0x06, 0xc9, 0x63, 0x16, 0xf7, 0xb0, 0xe0, 0xdb, 0xc3, 0xa1, 0x70, 0xa4, 0xc3, - 0xb8, 0x2e, 0x6c, 0x92, 0x55, 0xfa, 0xde, 0x70, 0x3d, 0x32, 0xed, 0x81, 0xb9, 0x30, 0x5c, 0x01, - 0x18, 0x4b, 0xd1, 0xea, 0x6f, 0x15, 0x98, 0xcf, 0x54, 0x11, 0xaa, 0x41, 0xd5, 0xb0, 0x98, 0x48, - 0xab, 0x6a, 0x10, 0xa3, 0x6d, 0x8b, 0xdd, 0xe1, 0xc9, 0x8e, 0x39, 0x02, 0x9d, 0x87, 0xd1, 0x5d, - 0xdb, 0x36, 0x45, 0x38, 0x26, 0xb5, 0xd9, 0x5e, 0xb7, 0x3e, 0xa5, 0xd9, 0xb6, 0x19, 0x50, 0x08, - 0x14, 0xfa, 0x06, 0x8c, 0x7b, 0xcc, 0x35, 0xac, 0x7d, 0xd9, 0x23, 0xe7, 0x7b, 0xdd, 0xfa, 0x74, - 0x4b, 0x40, 0x02, 0x32, 0x89, 0x46, 0xaf, 0xc2, 0xc4, 0x11, 0x75, 0x45, 0x85, 0x8c, 0x09, 0x4a, - 0xd1, 0x81, 0xef, 0x04, 0xa0, 0x80, 0x34, 0x24, 0x50, 0x7f, 0x5f, 0x81, 0x69, 0x19, 0x40, 0x93, - 0x18, 0x1d, 0x74, 0x37, 0x91, 0x50, 0x41, 0x24, 0x5e, 0x1b, 0x22, 0x12, 0xda, 0x42, 0xd8, 0xbc, - 0x72, 0x32, 0x90, 0xc2, 0xb4, 0x6e, 0x5b, 0x1e, 0x73, 0x89, 0x61, 0xc9, 0x74, 0x4d, 0x37, 0x88, - 0x41, 0x89, 0x27, 0xd9, 0xb4, 0x25, 0xa9, 0x60, 0x3a, 0x86, 0x79, 0x38, 0x29, 0x17, 0x7d, 0x1c, - 0x85, 0xb8, 0x2a, 0x34, 0xbc, 0x5d, 0x4a, 0x03, 0x3f, 0x7c, 0xb9, 0xe8, 0xfe, 0x4d, 0x81, 0xe5, - 0x22, 0xa6, 0x54, 0x3d, 0x2a, 0xcf, 0x55, 0x8f, 0x95, 0x93, 0xab, 0xc7, 0x3f, 0x2b, 0x89, 0xd8, - 0x7b, 0x1e, 0xfa, 0x04, 0x26, 0xf9, 0x6a, 0xd3, 0x26, 0x8c, 0xc8, 0x15, 0xe2, 0x8d, 0x41, 0xed, - 0xdb, 0x6b, 0x70, 0x6a, 0x3e, 0xee, 0x6f, 0xee, 0xfe, 0x98, 0xea, 0xec, 0x3a, 0x65, 0x24, 0x6e, - 0xc6, 0x31, 0x0c, 0x47, 0x52, 0xd1, 0x4d, 0x18, 0xf5, 0x1c, 0xaa, 0x0f, 0x33, 0x88, 0x84, 0x69, - 0x2d, 0x87, 0xea, 0x71, 0xbf, 0xe6, 0x5f, 0x58, 0x08, 0x52, 0x7f, 0x95, 0x0c, 0x86, 0xe7, 0xa5, - 0x83, 0x51, 0xe4, 0x62, 0xe5, 0xe4, 0x5c, 0xfc, 0x79, 0xd4, 0x0a, 0x84, 0x7d, 0xd7, 0x0c, 0x8f, - 0xa1, 0x8f, 0xfa, 0xdc, 0xdc, 0x28, 0xe7, 0x66, 0xce, 0x2d, 0x9c, 0x1c, 0x55, 0x59, 0x08, 0x49, - 0xb8, 0xf8, 0x06, 0x8c, 0x19, 0x8c, 0x76, 0xc2, 0xfa, 0xba, 0x50, 0xda, 0xc7, 0xda, 0xac, 0x94, - 0x3a, 0xb6, 0xcd, 0xf9, 0x71, 0x20, 0x46, 0xfd, 0x5d, 0x25, 0x75, 0x02, 0xee, 0x7b, 0xf4, 0x43, - 0x98, 0xf2, 0xe4, 0x44, 0x0e, 0xbb, 0xc4, 0xc5, 0x32, 0x7a, 0xa2, 0x95, 0x70, 0x51, 0xaa, 0x9a, - 0x0a, 0x21, 0x1e, 0x8e, 0x25, 0x26, 0x2a, 0xb8, 0x32, 0x54, 0x05, 0x67, 0xe2, 0x5f, 0x54, 0xc1, - 0xe8, 0x2e, 0xcc, 0x7a, 0xbe, 0xc1, 0xc8, 0xae, 0x49, 0xf9, 0x5a, 0xea, 0x95, 0xde, 0x64, 0x17, - 0x7b, 0xdd, 0xfa, 0x6c, 0x2b, 0xc9, 0x8a, 0xd3, 0x92, 0x54, 0x17, 0xf2, 0x72, 0x03, 0xfd, 0x00, - 0xc6, 0x6d, 0x87, 0x7c, 0xea, 0x53, 0x19, 0xf0, 0x67, 0x2c, 0x87, 0x37, 0x05, 0x6d, 0x5e, 0x06, - 0x02, 0x3f, 0x4e, 0x80, 0xc6, 0x52, 0xa4, 0xfa, 0x40, 0x81, 0x85, 0x6c, 0x9f, 0x1c, 0xa2, 0x11, - 0xed, 0xc0, 0x5c, 0x87, 0x30, 0xfd, 0x20, 0x9a, 0x55, 0xa2, 0x3a, 0xa7, 0xb4, 0xd5, 0x5e, 0xb7, - 0x3e, 0x77, 0x3d, 0x85, 0x79, 0xda, 0xad, 0xa3, 0xf7, 0x7d, 0xd3, 0x3c, 0x4e, 0xaf, 0xa3, 0x19, - 0x7e, 0xf5, 0x17, 0x55, 0x98, 0x4d, 0x8d, 0x85, 0x12, 0x8b, 0xd7, 0x3a, 0xcc, 0xb7, 0xe3, 0x38, - 0x72, 0x84, 0x34, 0xe3, 0x2b, 0x92, 0x38, 0x99, 0x84, 0x82, 0x2f, 0x4b, 0x9f, 0xce, 0xca, 0xea, - 0x0b, 0xcf, 0xca, 0x3b, 0x30, 0x47, 0xa2, 0x45, 0xe0, 0xba, 0xdd, 0xa6, 0x72, 0x0c, 0x37, 0x24, - 0xd7, 0xdc, 0x7a, 0x0a, 0xfb, 0xb4, 0x5b, 0x3f, 0x95, 0x5d, 0x1f, 0x38, 0x1c, 0x67, 0xa4, 0xa0, - 0x57, 0x60, 0x4c, 0xb7, 0x7d, 0x8b, 0x89, 0x59, 0x5d, 0x8d, 0xab, 0x70, 0x83, 0x03, 0x71, 0x80, - 0x43, 0xdf, 0x84, 0x69, 0xd2, 0xee, 0x18, 0xd6, 0xba, 0xae, 0x53, 0xcf, 0x5b, 0x1e, 0x17, 0x5b, - 0x42, 0x34, 0x0b, 0xd7, 0x63, 0x14, 0x4e, 0xd2, 0xa9, 0x7f, 0x52, 0xc2, 0x15, 0xb4, 0x60, 0x55, - 0x42, 0x17, 0xf8, 0xe2, 0x25, 0x50, 0x32, 0x38, 0x89, 0xdd, 0x49, 0x80, 0x71, 0x88, 0x47, 0x5f, - 0x87, 0xf1, 0xb6, 0x6b, 0x1c, 0x51, 0x57, 0x46, 0x26, 0x2a, 0xaf, 0x4d, 0x01, 0xc5, 0x12, 0xcb, - 0x83, 0xed, 0x84, 0xab, 0x4c, 0x22, 0xd8, 0x3b, 0xb6, 0x6d, 0x62, 0x81, 0x11, 0x92, 0x84, 0x55, - 0xd2, 0x85, 0xb1, 0xa4, 0xc0, 0x56, 0x89, 0x55, 0x3f, 0x82, 0xb9, 0xcc, 0xfe, 0x7f, 0x15, 0xaa, - 0x3a, 0x35, 0x65, 0x15, 0x35, 0x07, 0x47, 0xb7, 0xef, 0xf6, 0xa0, 0x4d, 0xf4, 0xba, 0xf5, 0xea, - 0xc6, 0xd6, 0x35, 0xcc, 0x85, 0xa8, 0xbf, 0x51, 0xe0, 0xa5, 0xc2, 0x4a, 0x4b, 0x9c, 0x56, 0x19, - 0x78, 0x5a, 0x02, 0xe0, 0x10, 0x97, 0x74, 0x28, 0xa3, 0xae, 0x97, 0x33, 0xd8, 0xd2, 0xfd, 0x5c, - 0x5e, 0xec, 0x1b, 0x98, 0xfc, 0x64, 0xeb, 0x1e, 0xa3, 0x16, 0xdf, 0xc1, 0xe2, 0x99, 0xb9, 0x13, - 0x09, 0xc2, 0x09, 0xa1, 0xea, 0x1f, 0x2b, 0x70, 0x6a, 0xc7, 0x6e, 0xb7, 0xf4, 0x03, 0xda, 0xf6, - 0x4d, 0xc3, 0xda, 0xe7, 0x97, 0x62, 0x7a, 0x8f, 0x9d, 0xc0, 0xc0, 0xfe, 0x30, 0x35, 0xb0, 0x9f, - 0xd1, 0x88, 0xf3, 0x6c, 0x2c, 0x9a, 0xdc, 0xe8, 0x13, 0xbe, 0xcd, 0x12, 0xe6, 0x87, 0xdd, 0xf7, - 0xf2, 0x73, 0xc8, 0x16, 0xfc, 0x71, 0x64, 0x82, 0x6f, 0x2c, 0xe5, 0xaa, 0x7f, 0x57, 0x60, 0x39, - 0x8f, 0xed, 0x04, 0x86, 0xf0, 0xf7, 0xd2, 0x43, 0x78, 0x6d, 0xf8, 0xb3, 0x15, 0x4c, 0xe3, 0xcf, - 0x0a, 0xce, 0x24, 0xc6, 0xf2, 0x65, 0x98, 0x09, 0xda, 0x15, 0x6d, 0xf3, 0x69, 0x24, 0x13, 0xf7, - 0x94, 0x14, 0x34, 0xd3, 0x4a, 0xe0, 0x70, 0x8a, 0x12, 0xbd, 0x0b, 0x73, 0x8e, 0xcd, 0xa8, 0xc5, - 0x0c, 0x62, 0x06, 0x23, 0x31, 0xb8, 0x4c, 0x22, 0xde, 0xd7, 0x76, 0x52, 0x18, 0x9c, 0xa1, 0x54, - 0x7f, 0xa9, 0xc0, 0x4a, 0x71, 0x74, 0xd0, 0x4f, 0x61, 0x2e, 0x3c, 0xb1, 0xd8, 0x97, 0x4b, 0x5e, - 0xf0, 0x70, 0x92, 0x27, 0x96, 0x2d, 0x43, 0x7e, 0x26, 0xec, 0xb9, 0x29, 0x32, 0x0f, 0x67, 0x54, - 0xa9, 0xbf, 0xae, 0xc0, 0x6c, 0x8a, 0xe4, 0x04, 0x4a, 0xe6, 0x56, 0xaa, 0x64, 0x9a, 0xc3, 0x1c, - 0xb3, 0xa8, 0x56, 0xee, 0x66, 0x6a, 0xe5, 0xd2, 0x30, 0x42, 0x07, 0x17, 0x49, 0x4f, 0x81, 0x5a, - 0x8a, 0x9e, 0xef, 0x10, 0x7e, 0x87, 0xba, 0x98, 0xee, 0x51, 0x97, 0x5a, 0x3a, 0x45, 0x17, 0x61, - 0x92, 0x38, 0xc6, 0x15, 0xd7, 0xf6, 0x1d, 0x99, 0x52, 0x51, 0xea, 0xaf, 0xef, 0x6c, 0x0b, 0x38, - 0x8e, 0x28, 0x38, 0x75, 0x68, 0x91, 0x9c, 0x00, 0x89, 0x3b, 0x61, 0x00, 0xc7, 0x11, 0x45, 0xb4, - 0x18, 0x8c, 0x16, 0x2e, 0x06, 0x1a, 0x54, 0x7d, 0xa3, 0x2d, 0x2f, 0xb2, 0x6f, 0x48, 0x82, 0xea, - 0xed, 0xed, 0xcd, 0xa7, 0xdd, 0xfa, 0xf9, 0xa2, 0xf7, 0x53, 0x76, 0xec, 0x50, 0xaf, 0x71, 0x7b, - 0x7b, 0x13, 0x73, 0x66, 0xf5, 0x2f, 0x0a, 0x2c, 0xa6, 0x0e, 0x79, 0x02, 0x2d, 0x60, 0x27, 0xdd, - 0x02, 0x5e, 0x1b, 0x22, 0x64, 0x05, 0xb5, 0x7f, 0x5f, 0x81, 0xb3, 0x03, 0xcb, 0xa2, 0xc4, 0x9a, - 0xf5, 0x1d, 0x98, 0xf7, 0xad, 0xf4, 0xf2, 0x1b, 0x54, 0xfa, 0x12, 0x5f, 0xb1, 0x6e, 0xa7, 0x51, - 0x38, 0x4b, 0xcb, 0xaf, 0x5b, 0x8b, 0x7d, 0x29, 0x8b, 0x3e, 0xc8, 0xbe, 0x3c, 0x5f, 0x28, 0x7d, - 0xe5, 0x1e, 0xf0, 0xdc, 0x9c, 0x7e, 0x16, 0xae, 0x94, 0x7a, 0x16, 0xfe, 0xbc, 0x02, 0x4b, 0x39, - 0xd9, 0x8f, 0x3e, 0x06, 0x88, 0xb7, 0xae, 0x9c, 0x60, 0xe7, 0x18, 0xd9, 0xf7, 0xa8, 0x34, 0x27, - 0xde, 0x83, 0x63, 0x68, 0x42, 0x22, 0xf2, 0x60, 0xda, 0xa5, 0x1e, 0x75, 0x8f, 0x68, 0xfb, 0x7d, - 0xdb, 0x95, 0x21, 0xff, 0xf6, 0x10, 0x21, 0xef, 0xab, 0xba, 0x78, 0xb9, 0xc3, 0xb1, 0x60, 0x9c, - 0xd4, 0x82, 0x5a, 0x70, 0xba, 0x4d, 0x49, 0xc2, 0x4c, 0xb1, 0xa6, 0xd1, 0xb6, 0x7c, 0x43, 0x3a, - 0x2b, 0x05, 0x9c, 0xde, 0xcc, 0x23, 0xc2, 0xf9, 0xbc, 0xea, 0x3f, 0x15, 0x38, 0x9d, 0xb2, 0xec, - 0x03, 0xda, 0x71, 0x4c, 0xc2, 0xe8, 0x09, 0x74, 0xce, 0xbb, 0xa9, 0xce, 0xf9, 0xce, 0x10, 0xee, - 0x0b, 0x8d, 0x2c, 0x7c, 0x27, 0xf8, 0x87, 0x02, 0x2f, 0xe5, 0x72, 0x9c, 0x40, 0x27, 0xf8, 0x30, - 0xdd, 0x09, 0xde, 0x7c, 0x8e, 0x73, 0x15, 0x74, 0x84, 0x47, 0x45, 0xa7, 0x6a, 0x05, 0x1b, 0xd6, - 0x97, 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x26, 0xa4, 0xe4, 0x37, 0x86, 0x12, 0x3d, 0x6d, 0x0d, - 0x40, 0xfe, 0x40, 0x16, 0xbe, 0x9f, 0x55, 0x63, 0xbb, 0xaf, 0x44, 0x18, 0x9c, 0xa0, 0x42, 0x57, - 0x01, 0x85, 0x16, 0xb6, 0x4c, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x55, 0xf0, 0xae, 0x48, 0x5e, 0x84, - 0xfb, 0x28, 0x70, 0x0e, 0x97, 0xfa, 0x57, 0x25, 0x5e, 0x32, 0x04, 0xf8, 0xff, 0xd5, 0xf3, 0xc2, - 0xb8, 0x42, 0xcf, 0x27, 0x87, 0xa4, 0xa0, 0x0c, 0x4b, 0xc3, 0x94, 0x29, 0xfd, 0xe2, 0x4a, 0x23, - 0x94, 0xf8, 0x9c, 0x43, 0x52, 0x58, 0x57, 0x50, 0x12, 0x0f, 0xaa, 0x99, 0x53, 0x88, 0x52, 0x28, - 0x7b, 0x99, 0xbb, 0x26, 0xaf, 0xae, 0x81, 0x5b, 0x5f, 0x2d, 0x67, 0x0e, 0x4f, 0xd3, 0xdc, 0x6b, - 0xee, 0x45, 0x98, 0xb4, 0xec, 0x36, 0x15, 0x8f, 0x19, 0x99, 0x55, 0xe8, 0x86, 0x84, 0xe3, 0x88, - 0xa2, 0xef, 0xe7, 0xd5, 0xd1, 0x17, 0xf4, 0xf3, 0x2a, 0x5f, 0xdf, 0x4c, 0xb9, 0xd5, 0x8f, 0x89, - 0xc9, 0x10, 0xaf, 0x6f, 0x12, 0x8e, 0x23, 0x0a, 0x74, 0x33, 0x9e, 0xe5, 0xe3, 0x22, 0x26, 0x5f, - 0x2b, 0x33, 0xcb, 0x8b, 0xc7, 0xb8, 0xa6, 0x3d, 0x7c, 0x52, 0x1b, 0x79, 0xf4, 0xa4, 0x36, 0xf2, - 0xc5, 0x93, 0xda, 0xc8, 0xfd, 0x5e, 0x4d, 0x79, 0xd8, 0xab, 0x29, 0x8f, 0x7a, 0x35, 0xe5, 0x8b, - 0x5e, 0x4d, 0x79, 0xdc, 0xab, 0x29, 0x9f, 0xfd, 0xbb, 0x36, 0xf2, 0xfd, 0x97, 0x07, 0xfd, 0x17, - 0xc1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x8f, 0x9b, 0x77, 0x64, 0x20, 0x00, 0x00, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 35a7fbafa..b4428ad45 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -843,7 +843,7 @@ message ResourceSlice { message ResourceSliceList { // Standard list metadata // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta listMeta = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of resource ResourceSlices. repeated ResourceSlice items = 2; diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index 298d8d107..4efd2491d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -290,7 +290,7 @@ type ResourceSliceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional - metav1.ListMeta `json:"listMeta" protobuf:"bytes,1,opt,name=listMeta"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of resource ResourceSlices. Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 8154c99ce..1a44a971d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (ResourceSlice) SwaggerDoc() map[string]string { var map_ResourceSliceList = map[string]string{ "": "ResourceSliceList is a collection of ResourceSlices.", - "listMeta": "Standard list metadata", + "metadata": "Standard list metadata", "items": "Items is the list of resource ResourceSlices.", } diff --git a/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go new file mode 100644 index 000000000..40ec27d87 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go @@ -0,0 +1,303 @@ +/* +Copyright 2013 Google Inc. All Rights Reserved. +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verbosity + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +// New returns a struct that implements -v and -vmodule support. Changing and +// checking these settings is thread-safe, with all concurrency issues handled +// internally. +func New() *VState { + vs := new(VState) + + // The two fields must have a pointer to the overal struct for their + // implementation of Set. + vs.vmodule.vs = vs + vs.verbosity.vs = vs + + return vs +} + +// Value is an extension that makes it possible to use the values in pflag. +type Value interface { + flag.Value + Type() string +} + +func (vs *VState) V() Value { + return &vs.verbosity +} + +func (vs *VState) VModule() Value { + return &vs.vmodule +} + +// VState contains settings and state. Some of its fields can be accessed +// through atomic read/writes, in other cases a mutex must be held. +type VState struct { + mu sync.Mutex + + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity levelSpec // V logging level, the value of the -v flag/ + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 +} + +// Level must be an int32 to support atomic read/writes. +type Level int32 + +type levelSpec struct { + vs *VState + l Level +} + +// get returns the value of the level. +func (l *levelSpec) get() Level { + return Level(atomic.LoadInt32((*int32)(&l.l))) +} + +// set sets the value of the level. +func (l *levelSpec) set(val Level) { + atomic.StoreInt32((*int32)(&l.l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *levelSpec) String() string { + return strconv.FormatInt(int64(l.l), 10) +} + +// Get is part of the flag.Getter interface. It returns the +// verbosity level as int32. +func (l *levelSpec) Get() interface{} { + return int32(l.l) +} + +// Type is part of pflag.Value. +func (l *levelSpec) Type() string { + return "Level" +} + +// Set is part of the flag.Value interface. +func (l *levelSpec) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + l.vs.mu.Lock() + defer l.vs.mu.Unlock() + l.vs.set(Level(v), l.vs.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + vs *VState + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + // Empty instances don't have and don't need a lock (can + // happen when flag uses introspection). + if m.vs != nil { + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + } + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +// Type is part of pflag.Value +func (m *moduleSpec) Type() string { + return "pattern=N,..." +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Set will sets module value +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + m.vs.set(m.vs.verbosity.l, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// set sets a consistent state for V logging. +// The mutex must be held. +func (vs *VState) set(l Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + vs.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&vs.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + vs.vmodule.filter = filter + vs.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&vs.filterLength, int32(len(filter))) + vs.verbosity.set(l) +} + +// Enabled checks whether logging is enabled at the given level. This must be +// called with depth=0 when the caller of enabled will do the logging and +// higher values when more stack levels need to be skipped. +// +// The mutex will be locked only if needed. +func (vs *VState) Enabled(level Level, depth int) bool { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if vs.verbosity.get() >= level { + return true + } + + // It's off globally but vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&vs.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + vs.mu.Lock() + defer vs.mu.Unlock() + if runtime.Callers(depth+2, vs.pcs[:]) == 0 { + return false + } + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := vs.pcs[0] - 1 + v, ok := vs.vmap[pc] + if !ok { + v = vs.setV(pc) + } + return v >= level + } + return false +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// Mutex is held. +func (vs *VState) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + file = strings.TrimSuffix(file, ".go") + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range vs.vmodule.filter { + if filter.match(file) { + vs.vmap[pc] = filter.level + return filter.level + } + } + vs.vmap[pc] = 0 + return 0 +} diff --git a/vendor/k8s.io/klog/v2/textlogger/options.go b/vendor/k8s.io/klog/v2/textlogger/options.go new file mode 100644 index 000000000..b1c4eefb3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/options.go @@ -0,0 +1,154 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "flag" + "io" + "os" + "strconv" + "time" + + "k8s.io/klog/v2/internal/verbosity" +) + +// Config influences logging in a text logger. To make this configurable via +// command line flags, instantiate this once per program and use AddFlags to +// bind command line flags to the instance before passing it to NewTestContext. +// +// Must be constructed with NewConfig. +type Config struct { + vstate *verbosity.VState + co configOptions +} + +// Verbosity returns a value instance that can be used to query (via String) or +// modify (via Set) the verbosity threshold. This is thread-safe and can be +// done at runtime. +func (c *Config) Verbosity() flag.Value { + return c.vstate.V() +} + +// VModule returns a value instance that can be used to query (via String) or +// modify (via Set) the vmodule settings. This is thread-safe and can be done +// at runtime. +func (c *Config) VModule() flag.Value { + return c.vstate.VModule() +} + +// ConfigOption implements functional parameters for NewConfig. +type ConfigOption func(co *configOptions) + +type configOptions struct { + verbosityFlagName string + vmoduleFlagName string + verbosityDefault int + fixedTime *time.Time + unwind func(int) (string, int) + output io.Writer +} + +// VerbosityFlagName overrides the default -v for the verbosity level. +func VerbosityFlagName(name string) ConfigOption { + return func(co *configOptions) { + + co.verbosityFlagName = name + } +} + +// VModulFlagName overrides the default -vmodule for the per-module +// verbosity levels. +func VModuleFlagName(name string) ConfigOption { + return func(co *configOptions) { + co.vmoduleFlagName = name + } +} + +// Verbosity overrides the default verbosity level of 0. +// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions +// for log level conventions in Kubernetes. +func Verbosity(level int) ConfigOption { + return func(co *configOptions) { + co.verbosityDefault = level + } +} + +// Output overrides stderr as the output stream. +func Output(output io.Writer) ConfigOption { + return func(co *configOptions) { + co.output = output + } +} + +// FixedTime overrides the actual time with a fixed time. Useful only for testing. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FixedTime(ts time.Time) ConfigOption { + return func(co *configOptions) { + co.fixedTime = &ts + } +} + +// Backtrace overrides the default mechanism for determining the call site. +// The callback is invoked with the number of function calls between itself +// and the call site. It must return the file name and line number. An empty +// file name indicates that the information is unknown. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption { + return func(co *configOptions) { + co.unwind = unwind + } +} + +// NewConfig returns a configuration with recommended defaults and optional +// modifications. Command line flags are not bound to any FlagSet yet. +func NewConfig(opts ...ConfigOption) *Config { + c := &Config{ + vstate: verbosity.New(), + co: configOptions{ + verbosityFlagName: "v", + vmoduleFlagName: "vmodule", + verbosityDefault: 0, + unwind: runtimeBacktrace, + output: os.Stderr, + }, + } + for _, opt := range opts { + opt(&c.co) + } + + // Cannot fail for this input. + _ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10)) + return c +} + +// AddFlags registers the command line flags that control the configuration. +// +// The default flag names are the same as in klog, so unless those defaults +// are changed, either klog.InitFlags or Config.AddFlags can be used for the +// same flag set, but not both. +func (c *Config) AddFlags(fs *flag.FlagSet) { + fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger") + fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns") +} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go new file mode 100644 index 000000000..0b55a2994 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go @@ -0,0 +1,187 @@ +/* +Copyright 2019 The Kubernetes Authors. +Copyright 2020 Intel Corporation. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package textlogger contains an implementation of the logr interface which is +// producing the exact same output as klog. It does not route output through +// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be +// configured through its own [NewConfig] and [Config.AddFlags]. +package textlogger + +import ( + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/verbosity" +) + +var ( + // TimeNow is used to retrieve the current time. May be changed for testing. + TimeNow = time.Now +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewLogger constructs a new logger. +// +// Verbosity can be modified at any time through the Config.V and +// Config.VModule API. +func NewLogger(c *Config) logr.Logger { + return logr.New(&tlogger{ + values: nil, + config: c, + }) +} + +type tlogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string + config *Config +} + +func (l *tlogger) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *tlogger) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *tlogger) Enabled(level int) bool { + return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth) +} + +func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) { + l.print(nil, severity.InfoLog, msg, kvList) +} + +func (l *tlogger) Error(err error, msg string, kvList ...interface{}) { + l.print(err, severity.ErrorLog, msg, kvList) +} + +func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) { + // Determine caller. + // +1 for this frame, +1 for Info/Error. + skip := l.callDepth + 2 + file, line := l.config.co.unwind(skip) + if file == "" { + file = "???" + line = 1 + } else if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + l.printWithInfos(file, line, time.Now(), err, s, msg, kvList) +} + +func runtimeBacktrace(skip int) (string, int) { + _, file, line, ok := runtime.Caller(skip + 1) + if !ok { + return "", 0 + } + return file, line +} + +func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + defer buffer.PutBuffer(b) + + // Format header. + if l.config.co.fixedTime != nil { + now = *l.config.co.fixedTime + } + b.FormatHeader(s, file, line, now) + + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVFormat(&b.Buffer, "err", err) + } + serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList) + if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' { + b.WriteByte('\n') + } + _, _ = l.config.co.output.Write(b.Bytes()) +} + +func (l *tlogger) WriteKlogBuffer(data []byte) { + _, _ = l.config.co.output.Write(data) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l *tlogger) WithName(name string) logr.LogSink { + clone := *l + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + clone.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + clone.values = v + clone.hasPrefix = true + } + return &clone +} + +func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink { + clone := *l + clone.values = serialize.WithValues(l.values, kvList) + return &clone +} + +// KlogBufferWriter is implemented by the textlogger LogSink. +type KlogBufferWriter interface { + // WriteKlogBuffer takes a pre-formatted buffer prepared by klog and + // writes it unchanged to the output stream. Can be used with + // klog.WriteKlogBuffer when setting a logger through + // klog.SetLoggerWithOptions. + WriteKlogBuffer([]byte) +} + +var _ logr.LogSink = &tlogger{} +var _ logr.CallDepthLogSink = &tlogger{} +var _ KlogBufferWriter = &tlogger{} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go new file mode 100644 index 000000000..c888ef8a6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go @@ -0,0 +1,52 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *tlogger) Handle(ctx context.Context, record slog.Record) error { + return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos) +} + +func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *tlogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &tlogger{} diff --git a/vendor/k8s.io/utils/exec/README.md b/vendor/k8s.io/utils/exec/README.md new file mode 100644 index 000000000..7944e8dd3 --- /dev/null +++ b/vendor/k8s.io/utils/exec/README.md @@ -0,0 +1,5 @@ +# Exec + +This package provides an interface for `os/exec`. It makes it easier to mock +and replace in tests, especially with the [FakeExec](testing/fake_exec.go) +struct. diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go new file mode 100644 index 000000000..cbb44bdb5 --- /dev/null +++ b/vendor/k8s.io/utils/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/utils/exec" diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go new file mode 100644 index 000000000..d9c91e3ca --- /dev/null +++ b/vendor/k8s.io/utils/exec/exec.go @@ -0,0 +1,256 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "io" + "io/fs" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // CommandContext returns a Cmd instance which can be used to run a single command. + // + // The provided context is used to kill the process if the context becomes done + // before the command completes on its own. For example, a timeout can be set in + // the context. + CommandContext(ctx context.Context, cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) +} + +// CommandContext is part of the Interface interface. +func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.CommandContext(ctx, cmd, args...))) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + path, err := osexec.LookPath(file) + return path, handleError(maskErrDot(err)) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +var _ Cmd = &cmdWrapper{} + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + err := (*osexec.Cmd)(cmd).Run() + return handleError(err) +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + return out, handleError(err) +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + return out, handleError(err) +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + + if c.Process == nil { + return + } + + c.Process.Signal(syscall.SIGTERM) + + time.AfterFunc(10*time.Second, func() { + if !c.ProcessState.Exited() { + c.Process.Signal(syscall.SIGKILL) + } + }) +} + +func handleError(err error) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case *osexec.ExitError: + return &ExitErrorWrapper{e} + case *fs.PathError: + return ErrExecutableNotFound + case *osexec.Error: + if e.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = &ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +// Exited is to check if the process has finished +func (e CodeExitError) Exited() bool { + return true +} + +// ExitStatus is for checking the error code +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go new file mode 100644 index 000000000..acf45f1cd --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go118.go @@ -0,0 +1,32 @@ +//go:build !go1.19 +// +build !go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" +) + +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + return cmd +} + +func maskErrDot(err error) error { + return err +} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go new file mode 100644 index 000000000..55874c929 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go119.go @@ -0,0 +1,40 @@ +//go:build go1.19 +// +build go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "errors" + osexec "os/exec" +) + +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6041e7d99..91e1ec75e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -29,6 +29,13 @@ github.com/blang/semver/v4 # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 +# github.com/cenkalti/hub v1.0.1 +## explicit +github.com/cenkalti/hub +# github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 +## explicit +github.com/cenkalti/rpc2 +github.com/cenkalti/rpc2/jsonrpc # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -49,6 +56,29 @@ github.com/cilium/ebpf/link github.com/cilium/ebpf/perf github.com/cilium/ebpf/ringbuf github.com/cilium/ebpf/rlimit +# github.com/containernetworking/cni v1.1.2 +## explicit; go 1.14 +github.com/containernetworking/cni/libcni +github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/types +github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/types/040 +github.com/containernetworking/cni/pkg/types/100 +github.com/containernetworking/cni/pkg/types/create +github.com/containernetworking/cni/pkg/types/internal +github.com/containernetworking/cni/pkg/utils +github.com/containernetworking/cni/pkg/version +# github.com/containernetworking/plugins v1.2.0 +## explicit; go 1.17 +github.com/containernetworking/plugins/pkg/ip +github.com/containernetworking/plugins/pkg/ns +github.com/containernetworking/plugins/pkg/utils/sysctl +# github.com/coreos/go-iptables v0.6.0 +## explicit; go 1.16 +github.com/coreos/go-iptables/iptables +# github.com/cpuguy83/go-md2man/v2 v2.0.3 +## explicit; go 1.11 +github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -280,7 +310,7 @@ github.com/netobserv/loki-client-go/pkg/labelutil github.com/netobserv/loki-client-go/pkg/logproto github.com/netobserv/loki-client-go/pkg/metric github.com/netobserv/loki-client-go/pkg/urlutil -# github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240905095613-6adad95c9d84 +# github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20240913155426-6ac7c5ccbf59 ## explicit; go 1.22.3 github.com/netobserv/netobserv-ebpf-agent/pkg/decode github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf @@ -307,6 +337,28 @@ github.com/netsampler/goflow2/pb github.com/netsampler/goflow2/producer github.com/netsampler/goflow2/transport github.com/netsampler/goflow2/utils +# github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 +## explicit; go 1.18 +github.com/ovn-org/libovsdb/cache +github.com/ovn-org/libovsdb/client +github.com/ovn-org/libovsdb/database +github.com/ovn-org/libovsdb/mapper +github.com/ovn-org/libovsdb/model +github.com/ovn-org/libovsdb/ovsdb +github.com/ovn-org/libovsdb/ovsdb/serverdb +github.com/ovn-org/libovsdb/updates +# github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20240902083137-5d2310e77f87 +## explicit; go 1.22.0 +github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb +github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types # github.com/pelletier/go-toml/v2 v2.2.2 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 @@ -397,6 +449,12 @@ github.com/prometheus/prometheus/util/strutil # github.com/rs/xid v1.5.0 ## explicit; go 1.12 github.com/rs/xid +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 +# github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 +## explicit; go 1.16 +github.com/safchain/ethtool # github.com/sagikazarmark/locafero v0.4.0 ## explicit; go 1.20 github.com/sagikazarmark/locafero @@ -502,7 +560,10 @@ github.com/stretchr/testify/require # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/vishvananda/netlink v1.1.0 +# github.com/urfave/cli/v2 v2.2.0 +## explicit; go 1.11 +github.com/urfave/cli/v2 +# github.com/vishvananda/netlink v1.3.0 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl @@ -634,7 +695,7 @@ golang.org/x/crypto/blake2b golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 -# golang.org/x/exp v0.0.0-20230905200255-921286631fa9 +# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices @@ -662,7 +723,7 @@ golang.org/x/net/websocket golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sys v0.24.0 +# golang.org/x/sys v0.25.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -801,19 +862,31 @@ google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 ## explicit +# gopkg.in/gcfg.v1 v1.2.3 +## explicit +gopkg.in/gcfg.v1 +gopkg.in/gcfg.v1/scanner +gopkg.in/gcfg.v1/token +gopkg.in/gcfg.v1/types # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 # gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 +# gopkg.in/natefinch/lumberjack.v2 v2.2.1 +## explicit; go 1.13 +gopkg.in/natefinch/lumberjack.v2 +# gopkg.in/warnings.v0 v0.1.2 +## explicit +gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.31.0 +# k8s.io/api v0.31.1 ## explicit; go 1.22.0 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 @@ -870,7 +943,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apimachinery v0.31.0 +# k8s.io/apimachinery v0.31.1 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -931,7 +1004,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.31.0 +# k8s.io/client-go v0.31.1 ## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1197,7 +1270,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.30.1 +# k8s.io/component-base v0.30.2 ## explicit; go 1.22.0 k8s.io/component-base/featuregate k8s.io/component-base/metrics @@ -1214,6 +1287,8 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler +k8s.io/klog/v2/internal/verbosity +k8s.io/klog/v2/textlogger # k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 ## explicit; go 1.20 k8s.io/kube-openapi/pkg/cached @@ -1230,6 +1305,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing +k8s.io/utils/exec k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/pointer @@ -1239,7 +1315,7 @@ k8s.io/utils/trace # lukechampine.com/uint128 v1.2.0 ## explicit; go 1.12 lukechampine.com/uint128 -# sigs.k8s.io/controller-runtime v0.18.2 +# sigs.k8s.io/controller-runtime v0.18.4 ## explicit; go 1.22.0 sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 05153f74c..176ce0db0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -52,12 +52,22 @@ func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *Alrea } } +// OwnerReferenceOption is a function that can modify a `metav1.OwnerReference`. +type OwnerReferenceOption func(*metav1.OwnerReference) + +// WithBlockOwnerDeletion allows configuring the BlockOwnerDeletion field on the `metav1.OwnerReference`. +func WithBlockOwnerDeletion(blockOwnerDeletion bool) OwnerReferenceOption { + return func(ref *metav1.OwnerReference) { + ref.BlockOwnerDeletion = &blockOwnerDeletion + } +} + // SetControllerReference sets owner as a Controller OwnerReference on controlled. // This is used for garbage collection of the controlled object and for // reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). // Since only one OwnerReference can be a controller, it returns an error if // there is another OwnerReference with Controller flag set. -func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { // Validate the owner. ro, ok := owner.(runtime.Object) if !ok { @@ -80,6 +90,9 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true), } + for _, opt := range opts { + opt(&ref) + } // Return early with an error if the object is already controlled. if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { @@ -94,7 +107,7 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch // SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. // This allows you to declare that owner has a dependency on the object without specifying it as a controller. // If a reference to the same object already exists, it'll be overwritten with the newly provided version. -func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { // Validate the owner. ro, ok := owner.(runtime.Object) if !ok { @@ -115,6 +128,9 @@ func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) erro UID: owner.GetUID(), Name: owner.GetName(), } + for _, opt := range opts { + opt(&ref) + } // Update owner references and return. upsertOwnerRef(ref, object)