From 2ea0ee0d9dee46263484fcc713b465468c781867 Mon Sep 17 00:00:00 2001 From: Dylan Asmar Date: Wed, 24 Jan 2024 17:25:28 -0800 Subject: [PATCH] Added Grid World MDP Tutorial --- docs/Project.toml | 3 + docs/src/example_gridworld_mdp.md | 594 +++++++++++++++++++++- docs/src/examples.md | 2 +- docs/src/examples/grid_world_overview.gif | Bin 0 -> 8958 bytes 4 files changed, 597 insertions(+), 2 deletions(-) create mode 100644 docs/src/examples/grid_world_overview.gif diff --git a/docs/Project.toml b/docs/Project.toml index 7121be41..acbc35bf 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,9 +1,11 @@ [deps] BasicPOMCP = "d721219e-3fc6-5570-a8ef-e5402f47c49e" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +DiscreteValueIteration = "4b033969-44f6-5439-a48b-c11fa3648068" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" LightGraphs = "093fc24a-ae57-5d10-9952-331d41423f4d" +MCTS = "e12ccd36-dcad-5f33-8774-9175229e7b33" NamedTupleTools = "d9ec5142-1e00-5aa0-9d6a-321866360f50" NativeSARSOP = "a07c76ea-660d-4c9a-8028-2e6dbd212cb8" POMDPLinter = "f3bd98c0-eb40-45e2-9eb1-f2763262d755" @@ -13,6 +15,7 @@ POMDPs = "a93abf59-7444-517b-a68a-c42f96afdd7d" QMDP = "3aa3ecc9-5a5d-57c8-8188-3e47bd8068d2" QuickPOMDPs = "8af83fb2-a731-493c-9049-9e19dbce6165" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" [compat] Documenter = "1" diff --git a/docs/src/example_gridworld_mdp.md b/docs/src/example_gridworld_mdp.md index 34fa3e7b..b299268e 100644 --- a/docs/src/example_gridworld_mdp.md +++ b/docs/src/example_gridworld_mdp.md @@ -1,2 +1,594 @@ -# GridWorld MDP using Value Iteration and MCTS +# GridWorld MDP Tutorial +In this tutorial, we provide a simple example of how to define a Markov decision process (MDP) using the POMDPS.jl interface. We will then solve the MDP using value iteration and Monte Carlo tree search (MCTS). We will walk through constructing the MDP using the explicit interface which invovles defining a new type for the MDP and then extending different components of the POMDPs.jl interface for that type. + +## Dependencies + +We need a few modules in order to run this example. All of the models can be added by running the following command in the Julia REPL: + +```julia +using Pkg + +Pkg.add("POMDPs") +Pkg.add("POMDPTools") +Pkg.add("DiscreteValueIteration") +Pkg.add("MCTS") +``` + +If you already had the models installed, it is prudent to update them to the latest version: + +```julia +Pkg.update() +``` + +Now that we have the models installed, we can load them into our workspace: + +```@example gridworld_mdp +using POMDPs +using POMDPTools +using DiscreteValueIteration +using MCTS +``` + +## Problem Overview + +In Grid World, we are trying to control an agent who has trouble moving in the desired direction. In our problem, we have four reward states within the a grid. Each position on the grid represents a state, and the positive reward states are terminal (the agent stops recieving reward after reaching them and performing an action from that state). The agent has four actions to choose from: up, down, left, right. The agent moves in the desired direction with a probability of $0.7$, and with a probability of $0.1$ in each of the remaining three directions. If the agent bumps into the outside wall, there is a penalty of $1$ (i.e. reward of $-1$). The problem has the following form: + + + +![Grid World](examples/grid_world_overview.gif) + +## Defining the Grid World MDP Type + +In POMDPs.jl, an MDP is defined by creating a subtype of the `MDP` abstract type. The types of the states and actions for the MDP are declared as [parameters](https://docs.julialang.org/en/v1/manual/types/#Parametric-Types-1) of the MDP type. For example, if our states and actions are both represented by integers, we can define our MDP type as follows: + +```julia +struct MyMDP <: MDP{Int64, Int64} # MDP{StateType, ActionType} + # fields go here +end +``` + +In our grid world problem, we will represent the states using a custom type that designates the `x` and `y` coordinate within the grid. The actions will by represented by a symbol. + +### GridWorldState +There are numerous ways to represent the state of the agent in a grid world. We will use a custom type that designates the `x` and `y` coordinate within the grid. + +```@example gridworld_mdp +struct GridWorldState + x::Int64 + y::Int64 +end +``` + +To help us later, let's extend the `==` for our `GridWorldStat`: + +```@example gridworld_mdp +function Base.:(==)(s1::GridWorldState, s2::GridWorldState) + return s1.x == s2.x && s1.y == s2.y +end +``` + +### GridWorld Actions +Since our action is the direction the agent chooses to go (i.e. up, down, left, right), we can use a Symbol to represent it. Note that in this case, we are not defining a custom type for our action, instead we represent it directly with a symbol. Our actions will be `:up`, `:down`, `:left`, and `:right`. + +### GridWorldMDP +Now that we have defined our types for states and actions, we can define our MDP type. We will call it `GridWorldMDP` and it will be a subtype of `MDP{GridWorldState, Symbol}`. + +```@example gridworld_mdp +struct GridWorldMDP <: MDP{GridWorldState, Symbol} + size_x::Int64 # x size of the grid + size_y::Int64 # y size of the grid + reward_states_values::Dict{GridWorldState, Float64} # Dictionary mapping reward states to their values + hit_wall_reward::Float64 # reward for hitting a wall + tprob::Float64 # probability of transitioning to the desired state + discount_factor::Float64 # disocunt factor +end +``` + +We can define a constructor for our `GridWorldMDP` to make it easier to create instances of our MDP. + +```@example gridworld_mdp +function GridWorldMDP(; + size_x::Int64=10, + size_y::Int64=10, + reward_states_values::Dict{GridWorldState, Float64}=Dict( + GridWorldState(4, 3) => -10.0, + GridWorldState(4, 6) => -5.0, + GridWorldState(9, 3) => 10.0, + GridWorldState(8, 8) => 3.0), + hit_wall_reward::Float64=-1.0, + tprob::Float64=0.7, + discount_factor::Float64=0.9) + return GridWorldMDP(size_x, size_y, reward_states_values, hit_wall_reward, tprob, discount_factor) +end +``` + +To help us visualize our MDP, we can extend `show` for our `GridWorldMDP` type: + +```@example gridworld_mdp +function Base.show(io::IO, mdp::GridWorldMDP) + println(io, "Grid World MDP") + println(io, "\tSize x: $(mdp.size_x)") + println(io, "\tSize y: $(mdp.size_y)") + println(io, "\tReward states:") + for (key, value) in mdp.reward_states_values + println(io, "\t\t$key => $value") + end + println(io, "\tHit wall reward: $(mdp.hit_wall_reward)") + println(io, "\tTransition probability: $(mdp.tprob)") + println(io, "\tDiscount: $(mdp.discount_factor)") +end +``` + +Now lets create an instance of our `GridWorldMDP`: + +```@example gridworld_mdp +mdp = GridWorldMDP() + +``` + +!!! note + In this definition of the problem, our coordiates start in the bottom left of the grid. That is GridState(1, 1) is the bottom left of the grid and GridState(10, 10) would be on the right of the grid with a grid size of 10 by 10. + +## Grid World State Space +The state space in an MDP represents all the states in the problem. There are two primary functionalities that we want our spaces to support. We want to be able to iterate over the state space (for Value Iteration for example), and sometimes we want to be able to sample form the state space (used in some POMDP solvers). In this notebook, we will only look at iterable state spaces. + +Since we can iterate over elements of an array, and our problem is small, we can store all of our states in an array. We also have a terminal state based on the definition of our problem. We can represent that as a location outside of the grid (i.e. `(-1, -1)`). + +```@example gridworld_mdp +function POMDPs.states(mdp::GridWorldMDP) + states_array = GridWorldState[] + for x in 1:mdp.size_x + for y in 1:mdp.size_y + push!(states_array, GridWorldState(x, y)) + end + end + push!(states_array, GridWorldState(-1, -1)) # Adding the terminal state + return states_array +end +``` + +Let's view some of the states in our state space: + +```@example gridworld_mdp +@show states(mdp)[1:5] + +``` + +We also need a other functions related to the state space. + +```@example gridworld_mdp +# Check if a state is the terminal state +POMDPs.isterminal(mdp::GridWorldMDP, s::GridWorldState) = s == GridWorldState(-1, -1) + +# Define the initial state distribution (always start in the bottom left) +POMDPs.initialstate(mdp::GridWorldMDP) = Deterministic(GridWorldState(1, 1)) + +# Function that returns the index of a state in the state space +function POMDPs.stateindex(mdp::GridWorldMDP, s::GridWorldState) + if isterminal(mdp, s) + return length(states(mdp)) + end + + @assert 1 <= s.x <= mdp.size_x "Invalid state" + @assert 1 <= s.y <= mdp.size_y "Invalid state" + + si = (s.x - 1) * mdp.size_y + s.y + return si +end + +``` + + +### Large State Spaces +If your problem is very large we probably do not want to store all of our states in an array. We can create an iterator using indexing functions to help us out. One way of doing this is to define a function that returns a state from an index and then construct an iterator. This is an example of how we can do that for the Grid World problem. + +!!! note + If you run this section, you will redefine the `states(::GridWorldMDP)` that we just defined in the previous section. + +```@example gridworld_mdp + + # Define the length of the state space, number of grid locations plus the terminal state + Base.length(mdp::GridWorldMDP) = mdp.size_x * mdp.size_y + 1 + + # `states` now returns the mdp, which we will constructur our iterator from + POMDPs.states(mdp::GridWorldMDP) = mdp + + function Base.getindex(mdp::GridWorldMDP, si::Int) # Enables mdp[si] + @assert si <= length(mdp) "Index out of bounds" + @assert si > 0 "Index out of bounds" + + # First check if we are in the terminal state (which we define as the last state) + if si == length(mdp) + return GridWorldState(-1, -1) + end + + # Otherwise, we need to calculate the x and y coordinates + y = (si - 1) % mdp.size_y + 1 + x = div((si - 1), mdp.size_y) + 1 + return GridWorldState(x, y) + end + + function Base.getindex(mdp::GridWorldMDP, si_range::UnitRange{Int}) # Enables mdp[1:5] + return [getindex(mdp, si) for si in si_range] + end + + Base.firstindex(mdp::GridWorldMDP) = 1 # Enables mdp[begin] + Base.lastindex(mdp::GridWorldMDP) = length(mdp) # Enables mdp[end] + + # We can now construct an iterator + function Base.iterate(mdp::GridWorldMDP, ii::Int=1) + if ii > length(mdp) + return nothing + end + s = getindex(mdp, ii) + return (s, ii + 1) + end + + +``` + +Similar to above, let's iterate over a few of the states in our state space: + +```@example gridworld_mdp +@show states(mdp)[1:5] +@show mdp[begin] +@show mdp[end] + +``` + +## Grid World Action Space +The action space is the set of all actions availiable to the agent. In the grid world problem the action space consists of up, down, left, and right. We can define the action space by implementing a new method of the actions function. + +```@example gridworld_mdp +POMDPs.actions(mdp::GridWorldMDP) = [:up, :down, :left, :right] +``` + +Similar to the state space, we need a function that returns an index given an action. + +```@example gridworld_mdp +function POMDPs.actionindex(mdp::GridWorldMDP, a::Symbol) + @assert in(a, actions(mdp)) "Invalid action" + return findfirst(x -> x == a, actions(mdp)) +end + +``` + +## Grid World Transition Function +MDPs often define the transition function as $T(s^{\prime} \mid s, a)$, which is the probability of transitioning to state $s^{\prime}$ given that we are in state $s$ and take action $a$. For the POMDPs.jl interface, we define the transition function as a distribution over the next states. That is, we want $T(\cdot \mid s, a)$ which is a function that takes in a state and an action and returns a distribution over the next states. + +For our grid world example, there are only a few states to which the agent can transition and thus only a few states with nonzero probaility in $T(\cdot \mid s, a)$. We can use the `SparseCat` distribution to represent this. The `SparseCat` distribution is a categorical distribution that only stores the nonzero probabilities. We can define our transition function as follows: + +```@example gridworld_mdp +function POMDPs.transition(mdp::GridWorldMDP, s::GridWorldState, a::Symbol) + # If we are in the terminal state, we stay in the terminal state + if isterminal(mdp, s) + return SparseCat([s], [1.0]) + end + + # If we are in a positive reward state, we transition to the terminal state + if s in keys(mdp.reward_states_values) && mdp.reward_states_values[s] > 0 + return SparseCat([GridWorldState(-1, -1)], [1.0]) + end + + # Probability of going in a direction other than the desired direction + tprob_other = (1 - mdp.tprob) / 3 + + new_state_up = GridWorldState(s.x, min(s.y + 1, mdp.size_y)) + new_state_down = GridWorldState(s.x, max(s.y - 1, 1)) + new_state_left = GridWorldState(max(s.x - 1, 1), s.y) + new_state_right = GridWorldState(min(s.x + 1, mdp.size_x), s.y) + + new_state_vector = [new_state_up, new_state_down, new_state_left, new_state_right] + t_prob_vector = fill(tprob_other, 4) + + if a == :up + t_prob_vector[1] = mdp.tprob + elseif a == :down + t_prob_vector[2] = mdp.tprob + elseif a == :left + t_prob_vector[3] = mdp.tprob + elseif a == :right + t_prob_vector[4] = mdp.tprob + else + error("Invalid action") + end + + # Combine probabilities for states that are the same + for i in 1:4 + for j in (i + 1):4 + if new_state_vector[i] == new_state_vector[j] + t_prob_vector[i] += t_prob_vector[j] + t_prob_vector[j] = 0.0 + end + end + end + + # Remove states with zero probability + new_state_vector = new_state_vector[t_prob_vector .> 0] + t_prob_vector = t_prob_vector[t_prob_vector .> 0] + + return SparseCat(new_state_vector, t_prob_vector) +end + +``` + +Let's examline a few transitions: + +```@example gridworld_mdp +@show transition(mdp, GridWorldState(1, 1), :up) + +``` + +```@example gridworld_mdp +@show transition(mdp, GridWorldState(1, 1), :left) + +``` + +```@example gridworld_mdp +@show transition(mdp, GridWorldState(9, 3), :right) + +``` + +```@example gridworld_mdp +@show transition(mdp, GridWorldState(-1, -1), :down) + +``` + +## Grid World Reward Function + +In our problem, we have a reward function that depends on the next state as well (i.e. if we hit a wall, we stay in the same state and get a reward of $-1$). We can still construct a reward function that only depends on the current state and action by using expectation over the next state. That is, we can define our reward function as $R(s, a) = \mathbb{E}_{s^{\prime} \sim T(\cdot \mid s, a)}[R(s, a, s^{\prime})]$. + +```@example gridworld_mdp +# First, let's define the reward function given the state, action, and next state +function POMDPs.reward(mdp::GridWorldMDP, s::GridWorldState, a::Symbol, sp::GridWorldState) + # If we are in the terminal state, we get a reward of 0 + if isterminal(mdp, s) + return 0.0 + end + + # If we are in a positive reward state, we get the reward of that state + # For a positive reward, we transition to the terminal state, so we don't have + # to worry about the next state (i.g. hitting a wall) + if s in keys(mdp.reward_states_values) && mdp.reward_states_values[s] > 0 + return mdp.reward_states_values[s] + end + + # If we are in a negative reward state, we get the reward of that state + # If the negative reward state is on the edge of the grid, we can also be in this state + # and hit a wall, so we need to check for that + r = 0.0 + if s in keys(mdp.reward_states_values) && mdp.reward_states_values[s] < 0 + r += mdp.reward_states_values[s] + end + + # If we hit a wall, we get a reward of -1 + if s == sp + r += mdp.hit_wall_reward + end + + return r +end + +# Now we can define the reward function given the state and action +function POMDPs.reward(mdp::GridWorldMDP, s::GridWorldState, a::Symbol) + r = 0.0 + for (sp, p) in transition(mdp, s, a) + r += p * reward(mdp, s, a, sp) + end + return r +end + +``` + +Let's examine a few rewards: + +```@example gridworld_mdp +@show reward(mdp, GridWorldState(1, 1), :up) + +``` + +```@example gridworld_mdp +@show reward(mdp, GridWorldState(1, 1), :left) + +``` + +```@example gridworld_mdp +@show reward(mdp, GridWorldState(9, 3), :right) + +``` + +```@example gridworld_mdp +@show reward(mdp, GridWorldState(-1, -1), :down) + +``` + +```@example gridworld_mdp +@show reward(mdp, GridWorldState(2, 3), :up) + +``` + +## Grid World Remaining Functions +We are almost done! We still need to define `discount`. Let's first use `POMDPLinter` to check if we have defined all the functions we need for DiscreteValueIteration: + +```@example gridworld_mdp +using POMDPLinter + +@show_requirements POMDPs.solve(ValueIterationSolver(), mdp) + +``` +As we expected, we need to define `discount`. + +```@example gridworld_mdp +function POMDPs.discount(mdp::GridWorldMDP) + return mdp.discount_factor +end + +``` + +Let's check again: + +```@example gridworld_mdp +@show_requirements POMDPs.solve(ValueIterationSolver(), mdp) + +``` + +## Solving the Grid World MDP (Value Iteration) +Now that we have defined our MDP, we can solve it using Value Iteration. We will use the `ValueIterationSolver` from the [DiscreteValueIteration](https://github.com/JuliaPOMDP/DiscreteValueIteration.jl) package. First, we construct the a Solver type which contains the solver parameters. Then we call `POMDPs.solve` to solve the MDP and return a policy. + +```@example gridworld_mdp +# Initialize the problem (we have already done this, but just calling it again for completeness in the example) +mdp = GridWorldMDP() + +# Initialize the solver with desired parameters +solver = ValueIterationSolver(; max_iterations=100, belres=1e-3, verbose=true) + +# Solve for an optimal policy +vi_policy = POMDPs.solve(solver, mdp) +nothing # hide + +``` + +We can now use the policy to compute the optimal action for a given state: + +```@example gridworld_mdp +s = GridWorldState(9, 2) +@show action(vi_policy, s) + +``` + +```@example gridworld_mdp +s = GridWorldState(8, 3) +@show action(vi_policy, s) + +``` + +## Solving the Grid World MDP (MCTS) +Similar to the process with Value Iteration, we can solve the MDP using MCTS. We will use the `MCTSSolver` from the [MCTS](https://github.com/JuliaPOMDP/MCTS.jl) package. + +```@example gridworld_mdp +# Initialize the problem (we have already done this, but just calling it again for completeness in the example) +mdp = GridWorldMDP() + +# Initialize the solver with desired parameters +solver = MCTSSolver(n_iterations=1000, depth=20, exploration_constant=10.0) + +# Now we construct a planner by calling POMDPs.solve. For online planners, the computation for the +# optimal action occurs in the call to `action`. +mcts_planner = POMDPs.solve(solver, mdp) +nothing # hide + +``` + +Similar to the value iteration policy, we can use the policy to compute the action for a given state: + +```@example gridworld_mdp +s = GridWorldState(9, 2) +@show action(mcts_planner, s) + +``` + +```@example gridworld_mdp +s = GridWorldState(8, 3) +@show action(mcts_planner, s) + +``` + +## Visualizing the Value Iteration Policy +We can visualize the value iteration policy by plotting the value function and the policy. We can use numerous plotting packages to do this, but we will use [UnicodePlots](https://github.com/JuliaPlots/UnicodePlots.jl) for this example. + +```@example gridworld_mdp +using UnicodePlots +using Printf +``` + +### Value Function as a Heatmap +We can plot the value function as a heatmap. The value function is a function over the state space, so we need to iterate over the state space and store the value at each state. We can use the `value` function to evaluate the value function at a given state. + +```@example gridworld_mdp +# Initialize the value function array +value_function = zeros(mdp.size_y, mdp.size_x) + +# Iterate over the state space and store the value at each state +for s in states(mdp) + if isterminal(mdp, s) + continue + end + value_function[s.y, s.x] = value(vi_policy, s) +end + +# Plot the value function +heatmap(value_function; + title="GridWorld VI Value Function", + xlabel="x position", + ylabel="y position", + colormap=:inferno +) + +``` + +!!! note + Rendering of unicode plots in the documentation is not optimal. For a better image, run this locally in a REPL. + +### Visualizing the Value Iteration Policy +One way to visualize the policy is to plot the action that the policy takes at each state. + +```@example gridworld_mdp +# Initialize the policy array +policy_array = fill(:up, mdp.size_x, mdp.size_y) + +# Iterate over the state space and store the action at each state +for s in states(mdp) + if isterminal(mdp, s) + continue + end + policy_array[s.x, s.y] = action(vi_policy, s) +end + +# Let's define a mapping from symbols to unicode arrows +arrow_map = Dict( + :up => " ↑ ", + :down => " ↓ ", + :left => " ← ", + :right => " → " +) + +# Plot the policy to the terminal, with the origin in the bottom left +@printf(" GridWorld VI Policy \n") +for y in mdp.size_y+1:-1:0 + if y == mdp.size_y+1 || y == 0 + for xi in 0:10 + if xi == 0 + print(" ") + elseif y == mdp.size_y+1 + print("___") + else + print("---") + end + end + else + for x in 0:mdp.size_x+1 + if x == 0 + @printf("%2d |", y) + elseif x == mdp.size_x + 1 + print("|") + else + print(arrow_map[policy_array[x, y]]) + end + end + end + println() + if y == 0 + for xi in 0:10 + if xi == 0 + print(" ") + else + print(" $xi ") + end + end + end +end +``` + +## Seeing a Policy In Action +Another useful tool is to view the policy in action by creating a gif of a simulation. To accomplish this, we could use [POMDPGifs](https://github.com/JuliaPOMDP/POMDPGifs.jl). To use POMDPGifs, we need to extend the [`POMDPTools.render`](@ref) function to `GridWorldMDP`. Please reference [Gallery of POMDPs.jl Problems](@ref) for examples of this process. \ No newline at end of file diff --git a/docs/src/examples.md b/docs/src/examples.md index b9cbf1f1..8da1bbf1 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -4,7 +4,7 @@ This section contains examples of how to use POMDPs.jl. For specific informaiton The examples are organized by topic. The exmaples are designed to build through each step. First, we have to define a POMDP. Then we need to solve the POMDP to get a policy. Finally, we can simulate the policy to see how it performs. The examples are designed to be exeucted in order. For example, the examples in [Simulations Examples](@ref) assume that the POMDPs defined in the [Defining a POMDP](@ref) section have been defined and we have a policy we would like to simulate that we computed in the [Using Different Solvers](@ref) section. -The [GridWorld MDP using Value Iteration and MCTS](@ref) section is a standalone example that does not require any of the other examples. +The [GridWorld MDP Tutorial](@ref) section is a standalone example that does not require any of the other examples. ## Outline ```@contents diff --git a/docs/src/examples/grid_world_overview.gif b/docs/src/examples/grid_world_overview.gif new file mode 100644 index 0000000000000000000000000000000000000000..bf94bba1045e640fa48f6e4a1ca316704ebf774d GIT binary patch literal 8958 zcmeHrXIN9uws$}jq(w!f2_hDxB@_{eG%HOIP^z>D0YVAA_uiXG@6vlGK!AWq@1V5M zi}c=W-r&EU^PcD2=YBZn)7?ASduEkcYyD=enMuSXUh@k;zv4yV{a^>60RT}^QCC-2 zTuTH1xcIu@ZU6ub03-o60D#TSP28&xN0|iF*Vp4(leo4fjtKymTu1?W0B->RAOJ8m zHFfRUH7zYIKnS3ysHnHM_s#_qz{$y(oSY26b>8AMaH=@|F9j?Bi)#P?SnO}_U!DKu z<1Us&Sz4CyCDam%dkc!#nnJY=VU~;^VR~?c5c38K&CCeb5n@*2mFAQ-eF4*li`$#S z-r38%*S0s*7SLf95hfNCf$ErigmKtHO*wSoMwT#h3r3iQwh+^s*Gv~|TZpZ(sWHqF z%4lnBgs_0v3NimufZ*B}WH2-1FA+;aA?6EbEKN*|4B?iHJRnXEZUJ^iU2~{0%*Mpr zkdX`Ygc&!WxsEPG{^jf6hr{)RnDs3!O(9^gjg1Y+h6iL~t_S845D)-!a)Y_KIdBpj z7Ip|rs4WM=;vUZ0FE`-If2tuYK)*Z!X`2{>;mTkx5GNRCh?w!0sh7B5%;Bb%a1#XM zg=y$V6Dvz0W-BYW4n#-$3HKA|6CMt20X`lME-oDb4*th5P7Z#a$GlIt1o(Kl`E{9z z8Gkov`Zrhq!6uG@Gs<&elu;0TVdQT{{{xO#!gc=w?H@e3;QSFkBctF&7osc=voJBT zx&T0!j~mP-2>$=bxDbDnRr%Mle<%Mb`s}f=k|ioent0-|)co zATP{eP)if@_qa3_es+=hj1ophRu-1#7gWY~P-_^YF0P>j`-n?nUP16b>s-YDCk=)x zJ(w^zCnqll?!(Rbo|6Z{#Sh`-RsEadZ$f|QN#klp2UoEFgXTYo{dYjLuzU$O{`W9i zytgxjfpKCmgat02aOH>#>-Tj5YAPmStoQHr^}ujryxdrUP zj1cohDp{Fp!!GIyhx#x7AU48}c{v5ae{}xF(HDNq$@d3G|L;0h7YS+#w}ly5{K5Z4 zx|oxT3H~QP{!3+yGb}Cr@9{D={tGN=X~;_xZ7Uq~mJ%<8e_MlfOtf*!82qmU{$ojd z{O^{uUorpVa2H7Y@&}@b+W>G=i!*^+DgK%C-$_Ir?LSd5wK6yQwIS$egJDLni{LGA z1GCVZ+m)3?U24fW`{+8R`KRVA{bysWgO zxTvroKQA{YJ1a9IJuUT1N^(+SLVR3oOmtLa#OLr&VWA#0KnZN0^E}wcjK;$V>93iRy5L7oSinLHO;)u z2$IyBWht4jS~#3%-LYnmo+`3^1B&ZeEVJ)Ipe8pf9c2nJ8_k`sT{lj!u5Rv765w4& z4?q8az#wZp-{4Q-pCcmWLZf2i;uAb#5>vjUroocav$At?UohbnGQ7ylD=V)^C`DGI zYTT=8>l?lrlF-xQ0UO&pI=i}idi(kZzW*2;8Xg(NjEzrBPEF6u&dpo0E-o#vtgfwZ zY;JAu?C$L!93CB?oSvOy0ShU_;zh{~9z+zZYOO^nHEyIzd9shfX1;k+a%sGb4x4TE zpnvB4hR6&m?!iowalevVOUf0DZQA84)$DS4px_zy$a|q1ODW`bPAX}c)dyZSXE>Xo zc=ImfdBSTi)%OMpUZgJYikA_bXSD@cFeX8r#~O8ZiZS)ultjb&lO`8OL%|u z01HQFtf65^;dFCF9PPNlP$hIBvNmqt=&Q1VA>{;J<@#(**a=9Zs-k?oqDQgbEXb9Uy_aUhpMf)%_vk-!5Kt;#$c<(1?(bnb~J*guW8yB zHbYN2b5}pneVNO5+!gQ^{>GP^S7qdw{77P>nf%Fe zhM59ZpB zrcghq-`$Ysh>>(}HFBDt)~W`IKZ7;Du;|8$Ngmz*^e*s`U%YHQwO4{wfo*1@U0r2n zl2fz(RE)9w)q9^!s#P9*31A&bOmR^an2**f5y(zAyJ2uO;qBB2OHvX(ZBClsbJ|5u z>2v+N=?3;yKXVJ3Rax?sOxu0)rEQlz3##0!a|>(zk8_I}h}-XH2eH~eif@aBu%^mQ z<@lCKZMJ)s|G;<1uNb+3%14gVvJsa{YJC797u@5pppp(&I+mP}64k7lsQ%IAypjpS z%T?~10iIP7fKzZa9))00_2qkxMU^6wbDNFV#MrmK-BdE#YNFQ5+iJdTv%1y7=)=C< zdOu2y3y^K^9^A%`T;1;AY+>K&d^{w`)xPy{l)W~6yQZ{z4TAARyIwQ)tQTW+Dr=D7 zsV(~|{o?F#Bdg&kPwxgdU0KfuZKs@6?vl82lxFx@-k>%~muH`Ik+f(3tNOEw0kZ)i z!%C?bM{gi0`ra*CHy2I7Q$5v zQ6A2fQ@t7Esgq8bMjJIQ;lWR))#4_OM->Bd+m(s|bU2>cbjH;8uCQ)zMcd_c#WzVL z|8%xd(|NUxX9IO!AD&$sgB_sSI7A9M(#X1Yz0k_QAErOf4{)3;)g-7r5GEchB#Fy? zaXj4e>_A=C5lDbZrjD-1W9aoHUY)!u)%x{v4#MI7*d#G5`YZmRtaW>13RZ(ilwpF? zh3EY=u+c|^iT3SP8bO~+6h9lTsrLFY>M#+o>DN(@cKbhhnR4sHqi-Djy#eZJ&u=Ug zH9VA*bAQi1a~4e0G;mbpru-dxt+e$O#2H&0Y<_#DH@xwq_pTzA%qK)t~nIub+eZLj?%FFkGf-eZa^~?(dlP;1x^T^XDbufutI@>94no z%XEECwS-Nusz_dEAc-u$KhFlXYk%E*BSPZv{%zH(of^9RzAb9n+yZvu!ne9&P~Yd- z&1j@m)`MaP%H}JRW2E64;Zf*a7$_fJC1LOp_CzWZGSt>>UA>d=uyMq6N}|VpbSLq! zleuM1;49yZT)0-5HuXkU@=fa9WFqOT=TtepD-CQ$!>1X19W-+O&%IKpyB43*=SV9Y zg(qJk%N7-jZQ0h}O{1NEbfs#9@S}feEVhYsfg9=7AB!k+0PN=Gc$Ci?;A6%uzHeY_vqBlQft#8v=rh&gAlT}}jd111k6$kZtG7;FPo*r=Im=#j zr=8nQEGJroy$CpBonArkHeWX8_HlUX`KS=ue*G3#;E3T|I^3?~=UxeIy(5e}q^gDN z$yN*vM296yfXt|(Br&&BkL!h1H{Tj(@JW{T<;;l84Iy(K0KGnlWV&0w)gr0*wyq@d zs50=ienM*bN3vVh9zWH$!?7!oOP&Z?;!t6aw6ixbMWxX$7f|`Lw`ZR3f_2Cv4dN5yF6Y&`ghnyhns?V9AQF+FFYa&@=nWAyHpkBls+(ZA}W$oI21pg?DkH+ z<}Qy2Tr;}l5Hf+tE_>mGP(~grjmSJh35~!XEbmMlb|dQ!=-(nHB`=?nSJo5Cm^$w4 zM5dB+R8oQvhT)hI3y$qLhcko?nzkE8Zu2EY&4jz)>9mA~Z7ZFyi9e=l*6;MN{BE#O z5Hfz6?tr8HS@yP%2kqSKExYa{4l{3*d3zJ+3`@jkBdv zkOLAk7H=wex@MK+FierOSL}1X(?oO%Cb@IacjtUC9qqJ+x{Yps>$$d2XE*_#JQ{B~ z-}TGeN4`DT4)t-_%9K9g3UoQBYjHW+nRMIE;MY*Ms4?u+8xU2{;<_e^w8sZmghE~Cx@>dSlOM9 z$m71a2cN#j0<#s_BK)boqjZbM6<>EranIHkPufb?_qLvJelPh}vkzG>)eoKZ9(h9- zJ*=WVSNR@YM*27o`#3Lh6C$Nu_3d=NeFGMK z-Nd{F=AGrCeiXibdYOJcl(y3R42g$sTDI=iw&o^_W@h|unXP_VL^f+SjtN=Ulc`-S zhOP3W&9Q~P{zu6GVoo5+*C$6mP~27{EXyF=*K0%`NH)buEedR^^t90rYRL+$6}LcF zn(vtc0M~*UMT4@V1JiAT7FzuU_>ES4y^WRmb^!oFSKyE6;GfDtUk=sQ#C=8<1$IRN zH{|fhWPy9iq4QZ@Cs~N`L({!$cocxp2cp4Mi*B%p5R!dA5jWt&BK+&3frLlFxwavt z$Ux*^pwckg`erDBC}1{=oVW=8(LLZ5fiRiMAPHosrZ14#6|eD8_#Fv9tj#ACWyAYR zb`J$UCvgL>_kLnE1yC@CMNvn<6+f5Te14|lw`~ewlf`2;#phZIdkps$x$0^*6f!^& zfUV#U2-OdN?-Q=*6RAlZIiw%<-YX2bc-X^#M+PK}=P+-$zkxwu}WwgkGjZueCglqCbbXMZ@7S=Vft)BZTOwxOJjR zCcZ67w&+csfrno65B3mBopKoV2Oj?J{h! zkZzhNzUpYNrD&$sB=k|tAj{`$nzUS%G&EIg!$LA(G-Y8V?N~XIi6uoeJNCtB(mQ_w z?-*+tND@RfrnfEi)naPL)ig!P3=!6}_puqu#2K*Gl+5f5n19Ag)pR{c6LAA4N!k>F z_857|$a3G*#gTNiSU;m`KM&P72mdSv)d9r6g>O%NJ#dEM9$6QOn2g}55&3O1adOk(==FfHSIG&(V198b#^jw zc71G4vR`&VR<>VkLJMn>RrWi#_RO~Gv=D#$YGSXNoT$2-oV#r~nai1-w7G@H`Q6o7 zW&Sw{w0ZNac|VV{hgEa<`3rP&3Q}V8%W3l$R13$J3gU?im#TAi?DMzU-3M6xe$b}( zvjm0Qdrm4(+4cquo(V!5Q3HiPTKCmu{8qg2sDHoJQrK@S2-_%Nl9pMDZB z{QOj^;Ml)F#<1|Ty-cnn|A;G9(V<-V12=EAkP4<;Q&+0V z<0`3Qsc6`4v=c2P&_v8exbkcc@0PpgIg{;yY@@qypFiYI+RI z1V0q_omA(?)iwp7zO7UaqiQGVDoszK%|9UP4eP$l|^jT+azUG zsDcuK+PGYFf&&^Gi~b>n>guQ+`hcD?te>Dm&0oH zi`Cu~Y-k|)c4MsJu%q$&Q2lMSQZ~W*4wSSwX@g{7BfDC)3~3{eW7AGR6O&p!#IasI zzMk`>u7R*ZN3B^MRj&K7*`TJG#lLyseg;sDlMd1pPh-NERr9{)n*(>t8zU?0z_O2l znTl^>9Xru%2Q6U723xw8OYd4Ao_hI?Wr)u=4#5*2oHiy>w0YNi#r92^1xO44P;z##_5$v>l2gdytrc}dQz_i4$76GqC6euA;l$5+VA5jLXn9J# zFAC^A#p|uXzm(mv+g?ZK2nw@OA$*`r7y%eJlD*;zc&X4`5a0AAth@9!-g-PaffoL% zYgd{Cl*p^+$`8eZ8vF>W77%-z(`_1pAaVd8oJbl#Lf;b@-x(9A)?VJjQm;-S1Q_-1 zv*K+9ssfig`xt`A*=YKcYg#R4dnue`S?QJ8>ig&7TYtv)Esl}1wtXT}@8!+MC%6{2 z89#7e9YI{KKE9`ZSyqz;(8t25K@p^Ni@p^_Jb>-3?pv(yUUJ3T$Rj7^a1bQ>eiYb$ zd#Z<{w$1m|;79mipI{g3ROb+T|Bhq(gCM!`2wAlR{7EC(%beZNpw8{n26I6maW8;c z6rd@9M`a42>J8(UZhP_O2dTpFrH6QIp@4`cz*lxW)4iUT^u42o{*mmpJMHbI0bk`w zlZx|ON{tZ!V~l1VWA^QDFG-soof+1uHzn3~CdH3QsEwr$4!<&PHM%~QL^k}T6O+G& zDI~>ojkJ9w8|`r#i)XKQh{qtE%F4#S%{sM3jE&FLVmhyk7pygjtxc?|k1r*(*mR98 zJRfJs>+DLHRLvR3C!;+dA6vaQ1rHdT;E2KQb6|2-$6_8%5}c3L8&9C~qlh&ccgULe zYRAaVCq0bEM!Kdp*4iFH#{n{9bahRXGLx5rrZH<7921RwXRXd=Q{NJ%hS&Q0nwv!# z=3Ew=$?ng+cAmo%n~TG5amybFU7KSA4IlcpTq2*I%%5rGm}fN;fKCe~~e40P6C)}@}IN#4Y_}zahcb&# z3zp1;=c$Z`Xh6%M!b`kIOWbtJY0eW^{`2J+=ULr?Wz+K&>AIx{brXsj<0cHNxuA&? z`Wdh8)oeuVr-_Bs#1-4bntYk|V$j;i`C{VwaQ*q**Y%aw^ELQHocsDr)A@XS-Fh$G z+E~F_3B!h)^ICr3G6J+dmAKkix6151-;%g77rc1TzH%PCF<-E8C1mL(yxnJFY4d#3 zNM^Gcv59|Yll;z>DEi z-J(J-P6)570yjUnY~Jcw7oIT2R>-VHYV7EuSGZkPs%7>E&Kvnnmfr(+n9)0DY+DMz zJx%O}1{w+P*~d8VJ5R1zgAdF?w)YEuicHR1OzzlgZmDk^0;ESZmEA`^*j=m1gD>dAG?8Qd^ZiE5k!9hr9k@iLXRi!>P!2q*NIIzuImvU$ zRs`-xx}4l3KQ?qZ_QD<~iR`6lZZ;I2$I8(X^?n5;W{VQaqwK-Gj527@fC66u?<*i%5* z`js`(tJi_okA`Un7`m8R9@Y|2a^-{Y*lHmt;i~7z*ZGpUGU@Vd#CUu>&=`#^#U51w z6RbgkIox5z`iJv@_esgeFjxqgM^r4vpFZ_&=7U_8LUiIz2}(CS9PLMmoUVS>^J+k1 zNE#zIsW6-JtpU1{-nL-8cS3VuccgvA^s2Cf|JVEPD?_Kh-Y2{Jnsi6@<16o*9J-zE za@wzh>DinKXyoQvy&p(J)WYOv8$8&B3Ouh~pMS&7YdjO5P4Vz;r0_d@UY5`4QVvfZ z-~(Jgr^siJBnV4aQY#qT%27~I%g<3@9m>?mrM>JEVWd*3*O#GyiZp&vY%<*(@iYpd zj})LryURJSMT**lF* z#+M7dnR1z++x2AzLxs=*7=4_zF{W5|a