From 65f50d9e59c883ccf9bd6cfe0291dbf2ab72a0f3 Mon Sep 17 00:00:00 2001 From: huangshiyu Date: Tue, 15 Aug 2023 21:15:51 +0800 Subject: [PATCH 1/2] add snake --- examples/smac/README.md | 5 +- examples/snake/README.md | 10 + .../submissions/random_agent/submission.py | 30 + examples/snake/test_env.py | 32 + openrl/envs/snake/__init__.py | 17 + openrl/envs/snake/common.py | 184 +++++ openrl/envs/snake/discrete.py | 31 + openrl/envs/snake/game.py | 47 ++ openrl/envs/snake/gridgame.py | 196 +++++ openrl/envs/snake/observation.py | 59 ++ openrl/envs/snake/snake.py | 683 +++++++++++++++ openrl/envs/snake/snake_3v3.py | 780 ++++++++++++++++++ openrl/envs/snake/space.py | 61 ++ 13 files changed, 2134 insertions(+), 1 deletion(-) create mode 100644 examples/snake/README.md create mode 100644 examples/snake/submissions/random_agent/submission.py create mode 100644 examples/snake/test_env.py create mode 100644 openrl/envs/snake/__init__.py create mode 100644 openrl/envs/snake/common.py create mode 100644 openrl/envs/snake/discrete.py create mode 100644 openrl/envs/snake/game.py create mode 100644 openrl/envs/snake/gridgame.py create mode 100644 openrl/envs/snake/observation.py create mode 100644 openrl/envs/snake/snake.py create mode 100644 openrl/envs/snake/snake_3v3.py create mode 100644 openrl/envs/snake/space.py diff --git a/examples/smac/README.md b/examples/smac/README.md index 5fb14e76..f9d7d2cb 100644 --- a/examples/smac/README.md +++ b/examples/smac/README.md @@ -11,4 +11,7 @@ Installation guide for Linux: Train SMAC with [MAPPO](https://arxiv.org/abs/2103.01955) algorithm: -`python train_ppo.py --config smac_ppo.yaml` \ No newline at end of file +`python train_ppo.py --config smac_ppo.yaml` + +## Render replay on Mac + diff --git a/examples/snake/README.md b/examples/snake/README.md new file mode 100644 index 00000000..4b47c3cf --- /dev/null +++ b/examples/snake/README.md @@ -0,0 +1,10 @@ + +This is the example for the snake game. + + +## Submit to JiDi + +Submition site: http://www.jidiai.cn/env_detail?envid=1. + +Snake senarios: [here](https://github.com/jidiai/ai_lib/blob/7a6986f0cb543994277103dbf605e9575d59edd6/env/config.json#L94) + diff --git a/examples/snake/submissions/random_agent/submission.py b/examples/snake/submissions/random_agent/submission.py new file mode 100644 index 00000000..c599945b --- /dev/null +++ b/examples/snake/submissions/random_agent/submission.py @@ -0,0 +1,30 @@ +# -*- coding:utf-8 -*- +def sample_single_dim(action_space_list_each, is_act_continuous): + if is_act_continuous: + each = action_space_list_each.sample() + else: + if action_space_list_each.__class__.__name__ == "Discrete": + each = [0] * action_space_list_each.n + idx = action_space_list_each.sample() + each[idx] = 1 + elif action_space_list_each.__class__.__name__ == "MultiDiscreteParticle": + each = [] + nvec = action_space_list_each.high - action_space_list_each.low + 1 + sample_indexes = action_space_list_each.sample() + + for i in range(len(nvec)): + dim = nvec[i] + new_action = [0] * dim + index = sample_indexes[i] + new_action[index] = 1 + each.extend(new_action) + return each + + +def my_controller(observation, action_space, is_act_continuous): + joint_action = [] + for i in range(len(action_space)): + player = sample_single_dim(action_space[i], is_act_continuous) + joint_action.append(player) + return joint_action + diff --git a/examples/snake/test_env.py b/examples/snake/test_env.py new file mode 100644 index 00000000..d4a83839 --- /dev/null +++ b/examples/snake/test_env.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2023 The OpenRL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""""" +import numpy as np +from openrl.envs.snake.snake import SnakeEatBeans + +env = SnakeEatBeans() + +obs, info = env.reset() + +done = False +while not np.any(done): + a1 = np.zeros(4) + a1[env.action_space.sample()] = 1 + a2 = np.zeros(4) + a2[env.action_space.sample()] = 1 + obs, reward, done, info = env.step([a1, a2]) + print("obs:", obs, reward, "\ndone:", done, info) diff --git a/openrl/envs/snake/__init__.py b/openrl/envs/snake/__init__.py new file mode 100644 index 00000000..663cfed7 --- /dev/null +++ b/openrl/envs/snake/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2023 The OpenRL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""""" diff --git a/openrl/envs/snake/common.py b/openrl/envs/snake/common.py new file mode 100644 index 00000000..eb67e9dc --- /dev/null +++ b/openrl/envs/snake/common.py @@ -0,0 +1,184 @@ +import numpy as np +import sys +import os + +class HiddenPrints: + def __enter__(self): + self._original_stdout = sys.stdout + sys.stdout = open(os.devnull, 'w') + + def __exit__(self, exc_type, exc_val, exc_tb): + sys.stdout.close() + sys.stdout = self._original_stdout + +class Board: + def __init__(self, board_height, board_width, snakes, beans_positions, teams): + # print('create board, beans_position: ', beans_positions) + self.height = board_height + self.width = board_width + self.snakes = snakes + self.snakes_count = len(snakes) + self.beans_positions = beans_positions + self.blank_sign = -self.snakes_count + self.bean_sign = -self.snakes_count + 1 + self.board = np.zeros((board_height, board_width), dtype=int) + self.blank_sign + self.open = dict() + for key, snake in self.snakes.items(): + self.open[key] = [snake.head] # state 0 open list, heads, ready to spread + # see [A* Pathfinding (E01: algorithm explanation)](https://www.youtube.com/watch?v=-L-WgKMFuhE) + for x, y in snake.pos: + self.board[x][y] = key # obstacles, e.g. 0, 1, 2, 3, 4, 5 + # for x, y in beans_positions: + # self.board[x][y] = self.bean_sign # beans + + self.state = 0 + self.controversy = dict() + self.teams = teams + + # print('initial board') + # print(self.board) + + def step(self): # delay: prevent rear-end collision + new_open = {key: [] for key in self.snakes.keys()} + self.state += 1 # update state + # if self.state > delay: + # for key, snake in self.snakes.items(): # drop tail + # if snake.len >= self.state: + # self.board[snake.pos[-(self.state - delay)][0]][snake.pos[-(self.state - delay)][1]] \ + # = self.blank_sign + for key, snake in self.snakes.items(): + if snake.len >= self.state: + self.board[snake.pos[-self.state][0]][snake.pos[-self.state][1]] = self.blank_sign # drop tail + for key, value in self.open.items(): # value: e.g. [[8, 3], [6, 3], [7, 4]] + others_tail_pos = [self.snakes[_].pos[-self.state] + if self.snakes[_].len >= self.state else [] + for _ in set(range(self.snakes_count)) - {key}] + for x, y in value: + # print('start to spread snake {} on grid ({}, {})'.format(key, x, y)) + for x_, y_ in [((x + 1) % self.height, y), # down + ((x - 1) % self.height, y), # up + (x, (y + 1) % self.width), # right + (x, (y - 1) % self.width)]: # left + sign = self.board[x_][y_] + idx = sign % self.snakes_count # which snake, e.g. 0, 1, 2, 3, 4, 5 / number of claims + state = sign // self.snakes_count # manhattan distance to snake who claim the point or its negative + if sign == self.blank_sign: # grid in initial state + if [x_, y_] in others_tail_pos: + # print('do not spread other snakes tail, in case of rear-end collision') + continue # do not spread other snakes' tail, in case of rear-end collision + self.board[x_][y_] = self.state * self.snakes_count + key + self.snakes[key].claimed_count += 1 + new_open[key].append([x_, y_]) + + elif key != idx and self.state == state: + # second claim, init controversy, change grid value from + to - + # print( + # '\tgird ({}, {}) in the same state claimed by different snakes ' + # 'with sign {}, idx {} and state {}'.format( + # x_, y_, sign, idx, state)) + if self.snakes[idx].len > self.snakes[key].len: # shorter snake claim the controversial grid + # print('\t\tsnake {} is shorter than snake {}'.format(key, idx)) + self.snakes[idx].claimed_count -= 1 + new_open[idx].remove([x_, y_]) + self.board[x_][y_] = self.state * self.snakes_count + key + self.snakes[key].claimed_count += 1 + new_open[key].append([x_, y_]) + elif self.snakes[idx].len == self.snakes[key].len: # controversial claim + # print( + # '\t\tcontroversy! first claimed by snake {}, then claimed by snake {}'.format(idx, key)) + self.controversy[(x_, y_)] = {'state': self.state, + 'length': self.snakes[idx].len, + 'indexes': [idx, key]} + # first claim by snake idx, then claim by snake key + self.board[x_][y_] = -self.state * self.snakes_count + 1 + # if + 2, not enough for all snakes claim one grid!! + self.snakes[idx].claimed_count -= 1 # controversy, no snake claim this grid!! + new_open[key].append([x_, y_]) + else: # (self.snakes[idx].len < self.snakes[key].len) + pass # longer snake do not claim the controversial grid + + elif (x_, y_) in self.controversy \ + and key not in self.controversy[(x_, y_)]['indexes'] \ + and self.state + state == 0: # third claim or more + # print('snake {} meets third or more claim in grid ({}, {})'.format(key, x_, y_)) + controversy = self.controversy[(x_, y_)] + # pprint.pprint(controversy) + if controversy['length'] > self.snakes[key].len: # shortest snake claim grid, do 4 things + # print('\t\tsnake {} is shortest'.format(key)) + indexes_count = len(controversy['indexes']) + for i in controversy['indexes']: + self.snakes[i].claimed_count -= 1 / indexes_count # update claimed_count ! + new_open[i].remove([x_, y_]) + del self.controversy[(x_, y_)] + self.board[x_][y_] = self.state * self.snakes_count + key + self.snakes[key].claimed_count += 1 + new_open[key].append([x_, y_]) + elif controversy['length'] == self.snakes[key].len: # controversial claim + # print('\t\tcontroversy! multi claimed by snake {}'.format(key)) + self.controversy[(x_, y_)]['indexes'].append(key) + self.board[x_][y_] += 1 + new_open[key].append([x_, y_]) + else: # (controversy['length'] < self.snakes[key].len) + pass # longer snake do not claim the controversial grid + else: + pass # do nothing with lower state grids + + self.open = new_open # update open + # update controversial snakes' claimed_count (in fraction) in the end + for _, d in self.controversy.items(): + controversial_snake_count = len(d['indexes']) # number of controversial snakes + for idx in d['indexes']: + self.snakes[idx].claimed_count += 1 / controversial_snake_count + + +class SnakePos: + def __init__(self, snake_positions, board_height, board_width, beans_positions): + self.pos = snake_positions # [[2, 9], [2, 8], [2, 7]] + self.len = len(snake_positions) # >= 3 + self.head = snake_positions[0] + self.beans_positions = beans_positions + self.claimed_count = 0 + + displace = [(self.head[0] - snake_positions[1][0]) % board_height, + (self.head[1] - snake_positions[1][1]) % board_width] + # print('creat snake, pos: ', self.pos, 'displace:', displace) + if displace == [board_height - 1, 0]: # all action are ordered by left, up, right, relative to the body + self.dir = 0 # up + self.legal_action = [2, 0, 3] + elif displace == [1, 0]: + self.dir = 1 # down + self.legal_action = [3, 1, 2] + elif displace == [0, board_width - 1]: + self.dir = 2 # left + self.legal_action = [1, 2, 0] + elif displace == [0, 1]: + self.dir = 3 # right + self.legal_action = [0, 3, 1] + else: + assert False, 'snake positions error' + positions = [[(self.head[0] - 1) % board_height, self.head[1]], + [(self.head[0] + 1) % board_height, self.head[1]], + [self.head[0], (self.head[1] - 1) % board_width], + [self.head[0], (self.head[1] + 1) % board_width]] + self.legal_position = [positions[_] for _ in self.legal_action] + + def get_action(self, position): + if position not in self.legal_position: + assert False, 'the start and end points do not match' + idx = self.legal_position.index(position) + return self.legal_action[idx] # 0, 1, 2, 3: up, down, left, right + + def step(self, legal_input): + if legal_input in self.legal_position: + position = legal_input + elif legal_input in self.legal_action: + idx = self.legal_action.index(legal_input) + position = self.legal_position[idx] + else: + assert False, 'illegal snake move' + self.head = position + self.pos.insert(0, position) + if position in self.beans_positions: # eat a bean + self.len += 1 + else: # do not eat a bean + self.pos.pop() \ No newline at end of file diff --git a/openrl/envs/snake/discrete.py b/openrl/envs/snake/discrete.py new file mode 100644 index 00000000..20867064 --- /dev/null +++ b/openrl/envs/snake/discrete.py @@ -0,0 +1,31 @@ +import numpy as np +from .space import Space + + +class Discrete(Space): + r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`. + Example:: + >>> Discrete(2) + """ + def __init__(self, n): + assert n >= 0 + self.n = n + super(Discrete, self).__init__((), np.int64) + + def sample(self): + return self.np_random.randint(self.n) + + def contains(self, x): + if isinstance(x, int): + as_int = x + elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.char in np.typecodes['AllInteger'] and x.shape == ()): + as_int = int(x) + else: + return False + return as_int >= 0 and as_int < self.n + + def __repr__(self): + return "Discrete(%d)" % self.n + + def __eq__(self, other): + return isinstance(other, Discrete) and self.n == other.n \ No newline at end of file diff --git a/openrl/envs/snake/game.py b/openrl/envs/snake/game.py new file mode 100644 index 00000000..f3be6166 --- /dev/null +++ b/openrl/envs/snake/game.py @@ -0,0 +1,47 @@ +# -*- coding:utf-8 -*- +# 作者:zruizhi +# 创建时间: 2020/7/10 10:24 上午 +# 描述: +from abc import ABC, abstractmethod + + +class Game(ABC): + def __init__(self, n_player, is_obs_continuous, is_act_continuous, game_name, agent_nums, obs_type): + self.n_player = n_player + self.current_state = None + self.all_observes = None + self.is_obs_continuous = is_obs_continuous + self.is_act_continuous = is_act_continuous + self.game_name = game_name + self.agent_nums = agent_nums + self.obs_type = obs_type + + def get_config(self, player_id): + raise NotImplementedError + + def get_render_data(self, current_state): + return current_state + + def set_current_state(self, current_state): + raise NotImplementedError + + @abstractmethod + def is_terminal(self): + raise NotImplementedError + + def get_next_state(self, all_action): + raise NotImplementedError + + def get_reward(self, all_action): + raise NotImplementedError + + @abstractmethod + def step(self, all_action): + raise NotImplementedError + + @abstractmethod + def reset(self): + raise NotImplementedError + + def set_action_space(self): + raise NotImplementedError \ No newline at end of file diff --git a/openrl/envs/snake/gridgame.py b/openrl/envs/snake/gridgame.py new file mode 100644 index 00000000..996e58b1 --- /dev/null +++ b/openrl/envs/snake/gridgame.py @@ -0,0 +1,196 @@ +# -*- coding:utf-8 -*- +# 作者:zruizhi +# 创建时间: 2020/7/10 10:24 上午 +# 描述: + +from PIL import Image, ImageDraw +from itertools import count +import numpy as np +from .game import Game + +UNIT = 40 +FIX = 8 + + +class GridGame(Game): + def __init__(self, conf, colors=None, unit_size=UNIT, fix=FIX): + super().__init__(conf['n_player'], conf['is_obs_continuous'], conf['is_act_continuous'], + conf['game_name'], conf['agent_nums'], conf['obs_type']) + # grid game conf + self.game_name = conf['game_name'] + self.max_step = int(conf['max_step']) + self.board_width = int(conf['board_width']) + self.board_height = int(conf['board_height']) + self.cell_range = conf['cell_range'] if isinstance(eval(str(conf['cell_range'])), tuple) else (int(conf['cell_range']),) + self.cell_dim = len(self.cell_range) + self.cell_size = np.prod(self.cell_range) + + # grid observation conf + self.ob_board_width = conf['ob_board_width'] if not conf.get('ob_board_width') is None else [self.board_width for _ in range(self.n_player)] + self.ob_board_height = conf['ob_board_height'] if not conf.get('ob_board_height') is None else [self.board_height for _ in range(self.n_player)] + self.ob_cell_range = conf['ob_cell_range'] if not conf.get('ob_cell_range') is None else [self.cell_range for _ in range(self.n_player)] + + # vector observation conf + self.ob_vector_shape = conf['ob_vector_shape'] if not conf.get('ob_vector_shape') is None else [self.board_width*self.board_height*self.cell_dim for _ in range(self.n_player)] + self.ob_vector_range = conf['ob_vector_range'] if not conf.get('ob_vector_range') is None else [self.cell_range for _ in range(self.n_player)] + + # 每个玩家的 action space list, 可以根据player_id获取对应的single_action_space + self.joint_action_space = self.set_action_space() + + # global state,每个step需维护此项,并根据此项定义render data 及 observation + self.current_state = None + + # 记录对局结果信息 + self.n_return = [0] * self.n_player + self.won = '' + + # render 相关 + self.grid_unit = unit_size + self.grid = GridGame.init_board(self.board_width, self.board_height, unit_size) + self.grid_unit_fix = fix + self.colors = colors + generate_color(self.cell_size - len(colors) + 1) if not colors is None else generate_color( + self.cell_size) + self.init_info = None + + def get_grid_obs_config(self, player_id): + return self.ob_board_width[player_id], self.ob_board_height[player_id], self.ob_cell_range[player_id] + + def get_grid_many_obs_space(self, player_id_list): + all_obs_space = {} + for i in player_id_list: + m, n, r_l = self.get_grid_obs_config(i) + all_obs_space[i] = (m, n, len(r_l)) + return all_obs_space + + def get_vector_obs_config(self, player_id): + return self.ob_vector_shape[player_id], self.ob_vector_range[player_id] + + def get_vector_many_obs_space(self, player_id_list): + all_obs_space = {} + for i in player_id_list: + m = self.ob_vector_shape[i] + all_obs_space[i] = (m) + return all_obs_space + + def get_single_action_space(self, player_id): + return self.joint_action_space[player_id] + + def set_action_space(self): + raise NotImplementedError + + def check_win(self): + raise NotImplementedError + + def get_render_data(self, current_state): + grid_map = [[0] * self.board_width for _ in range(self.board_height)] + for i in range(self.board_height): + for j in range(self.board_width): + grid_map[i][j] = 0 + for k in range(self.cell_dim): + grid_map[i][j] = grid_map[i][j] * self.cell_range[k] + current_state[i][j][k] + return grid_map + + def set_current_state(self, current_state): + if not current_state: + raise NotImplementedError + + self.current_state = current_state + + def is_not_valid_action(self, joint_action): + raise NotImplementedError + + def is_not_valid_grid_observation(self, obs, player_id): + not_valid = 0 + w, h, cell_range = self.get_grid_obs_config(player_id) + if len(obs) != h or len(obs[0]) != w or len(obs[0][0]) != len(cell_range): + raise Exception("obs 维度不正确!", obs) + + for i in range(h): + for j in range(w): + for k in range(len(cell_range)): + if obs[i][j][k] not in range(cell_range[k]): + raise Exception("obs 单元值不正确!", obs[i][j][k]) + + return not_valid + + def is_not_valid_vector_observation(self, obs, player_id): + not_valid = 0 + shape, vector_range = self.get_vector_obs_config(player_id) + if len(obs) != shape or len(vector_range) != shape: + raise Exception("obs 维度不正确!", obs) + + for i in range(shape): + if obs[i] not in range(vector_range[i]): + raise Exception("obs 单元值不正确!", obs[i]) + + return not_valid + + def step(self, joint_action): + info_before = self.step_before_info() + all_observes, info_after = self.get_next_state(joint_action) + done = self.is_terminal() + reward = self.get_reward(joint_action) + return all_observes, reward, done, info_before, info_after + + def step_before_info(self, info=''): + return info + + def init_action_space(self): + joint_action = [] + for i in range(len(self.joint_action_space)): + player = [] + for j in range(len(self.joint_action_space[i])): + each = [0] * self.joint_action_space[i][j].n + player.append(each) + joint_action.append(player) + return joint_action + + def draw_board(self): + cols = [chr(i) for i in range(65, 65 + self.board_width)] + s = ', '.join(cols) + print(' ', s) + for i in range(self.board_height): + print(chr(i + 65), self.current_state[i]) + + def render_board(self): + im_data = np.array( + GridGame._render_board(self.get_render_data(self.current_state), self.grid, self.colors, self.grid_unit, self.grid_unit_fix)) + return im_data + + @staticmethod + def init_board(width, height, grid_unit, color=(250, 235, 215)): + im = Image.new(mode="RGB", size=(width * grid_unit, height * grid_unit), color=color) + draw = ImageDraw.Draw(im) + for x in range(0, width): + draw.line(((x * grid_unit, 0), (x * grid_unit, height * grid_unit)), fill=(105, 105, 105)) + for y in range(0, height): + draw.line(((0, y * grid_unit), (width * grid_unit, y * grid_unit)), fill=(105, 105, 105)) + return im + + @staticmethod + def _render_board(state, board, colors, unit, fix, extra_info=None): + ''' + 完成基本渲染棋盘操作 + 设置extra_info参数仅为了保持子类方法签名的一致 + ''' + im = board.copy() + draw = ImageDraw.Draw(im) + for x, row in zip(count(0), state): + for y, state in zip(count(0), row): + if state == 0: + continue + draw.rectangle(build_rectangle(y, x, unit, fix), fill=tuple(colors[state]), outline=(192, 192, 192)) + return im + + @staticmethod + def parse_extra_info(data): + return None + + +def build_rectangle(x, y, unit_size=UNIT, fix=FIX): + return x * unit_size + unit_size // fix, y * unit_size + unit_size // fix, (x + 1) * unit_size - unit_size // fix, ( + y + 1) * unit_size - unit_size // fix + + +def generate_color(n): + return [tuple(map(lambda n: int(n), np.random.choice(range(256), size=3))) for _ in range(n)] \ No newline at end of file diff --git a/openrl/envs/snake/observation.py b/openrl/envs/snake/observation.py new file mode 100644 index 00000000..4e1c65b1 --- /dev/null +++ b/openrl/envs/snake/observation.py @@ -0,0 +1,59 @@ +# -*- coding:utf-8 -*- +# 作者:zruizhi +# 创建时间: 2020/11/13 3:51 下午 +# 描述:observation的各种接口类 +obs_type = ["grid", "vector", "dict"] + + +class GridObservation(object): + def get_grid_observation(self, current_state, player_id, info_before): + raise NotImplementedError + + def get_grid_many_observation(self, current_state, player_id_list, info_before=''): + all_obs = [] + for i in player_id_list: + all_obs.append(self.get_grid_observation(current_state, i, info_before)) + return all_obs + + +class VectorObservation(object): + def get_vector_observation(self, current_state, player_id, info_before): + raise NotImplementedError + + def get_vector_many_observation(self, current_state, player_id_list, info_before=''): + all_obs = [] + for i in player_id_list: + all_obs.append(self.get_vector_observation(current_state, i, info_before)) + return all_obs + + +class DictObservation(object): + def get_dict_observation(self, current_state, player_id, info_before): + raise NotImplementedError + + def get_dict_many_observation(self, current_state, player_id_list, info_before=''): + all_obs = [] + for i in player_id_list: + all_obs.append(self.get_dict_observation(current_state, i, info_before)) + return all_obs + + +# todo: observation builder +class CustomObservation(object): + def get_custom_observation(self, current_state, player_id): + raise NotImplementedError + + def get_custom_obs_space(self, player_id): + raise NotImplementedError + + def get_custom_many_observation(self, current_state, player_id_list): + all_obs = [] + for i in player_id_list: + all_obs.append(self.get_custom_observation(current_state, i)) + return all_obs + + def get_custom_many_obs_space(self, player_id_list): + all_obs_space = [] + for i in player_id_list: + all_obs_space.append(self.get_custom_obs_space(i)) + return all_obs_space \ No newline at end of file diff --git a/openrl/envs/snake/snake.py b/openrl/envs/snake/snake.py new file mode 100644 index 00000000..f9eb3f54 --- /dev/null +++ b/openrl/envs/snake/snake.py @@ -0,0 +1,683 @@ +# -*- coding:utf-8 -*- +# 作者:zruizhi +# 创建时间: 2020/7/30 17:24 下午 +# 描述: +from .gridgame import GridGame +import random +from itertools import count +import numpy as np +from PIL import ImageDraw, ImageFont +from .observation import * +from .discrete import Discrete +import itertools +from gym import Env, spaces +from PIL import Image + + +class SnakeEatBeans(GridGame, GridObservation, DictObservation): + def __init__(self, env_id: int = 0, render: bool = False): + conf = { + "class_literal": "SnakeEatBeans", + "n_player": 2, + "board_width": 8, + "board_height": 6, + "cell_range": 4, + "n_beans": 5, + "max_step": 50, + "game_name": "snakes", + "is_obs_continuous": False, + "is_act_continuous": False, + "agent_nums": [1, 1], + "obs_type": ["dict", "dict"], + "save_interval": 100, + "save_path": "../../replay_winrate_var/replay_{}.gif", + } + self.terminate_flg = False + colors = conf.get("colors", [(255, 255, 255), (255, 140, 0)]) + super(SnakeEatBeans, self).__init__(conf, colors) + # 0: 没有 1:食物 2-n_player+1:各玩家蛇身 + self.n_cell_type = self.n_player + 2 + self.step_cnt = 1 + self.n_beans = int(conf["n_beans"]) + # 方向[-2,2,-1,1]分别表示[上,下,左,右] + self.actions = [-2, 2, -1, 1] + self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} + self.snakes_position = {} + self.players = [] + self.cur_bean_num = 0 + self.beans_position = [] + # 1<= init_len <= 3 + self.init_len = 3 + self.current_state = self.init_state() + self.all_observes = self.get_all_observes() + if self.n_player * self.init_len > self.board_height * self.board_width: + raise Exception( + "玩家数量过多:%d,超出board范围:%d,%d" + % (self.n_player, self.board_width, self.board_height) + ) + + self.input_dimension = self.board_width * self.board_height + self.action_dim = self.get_action_dim() + + self.num_agents = conf["agent_nums"][0] + self.num_enemys = conf["agent_nums"][1] + + self.observation_space = [ + spaces.Box(low=-np.inf, high=-np.inf, shape=(288,), dtype=np.float32) + ] + self.share_observation_space = [ + spaces.Box(low=-np.inf, high=+np.inf, shape=(288,), dtype=np.float32) + ] + # self.action_space = [Discrete(4) for _ in range(self.n_player)] + self.action_space = Discrete(4) + self.save_internal = conf["save_interval"] + self.save_path = conf["save_path"] + self.episode = 0 + self.render = render + self.img_list = [] + self.env_id = env_id + + def seed(self, seed=None): + if seed is None: + np.random.seed(1) + else: + np.random.seed(seed) + + def check_win(self): + flg = self.won.index(max(self.won)) + 2 + return flg + + def get_grid_observation(self, current_state, player_id, info_before): + return current_state + + def get_dict_observation(self, current_state, player_id, info_before): + key_info = {1: self.beans_position} + for i in range(self.n_player): + snake = self.players[i] + key_info[snake.player_id] = snake.segments + # key_info['state_map'] = current_state + key_info["board_width"] = self.board_width + key_info["board_height"] = self.board_height + key_info["last_direction"] = ( + info_before.get("directions") if isinstance(info_before, dict) else None + ) + key_info["controlled_snake_index"] = player_id + + return key_info + + def set_action_space(self): + action_space = [[Discrete(4)] for _ in range(self.n_player)] + return action_space + + def reset(self): + self.step_cnt = 1 + self.snakes_position = ( + {} + ) # 格式类似于{1: [[3, 1], [4, 3], [1, 2], [0, 6], [3, 3]], 2: [[3, 0], [3, 7], [3, 6]], 3: [[2, 7], [1, 7], [0, 7]]} + self.players = [] + self.cur_bean_num = 0 + self.beans_position = [] + self.current_state = self.init_state() + self.all_observes = self.get_all_observes() + self.terminate_flg = False + self.img_list = [] + self.episode += 1 + + # available actions + left_avail_actions = np.ones([self.num_agents, self.action_dim]) + right_avail_actions = np.ones([self.num_enemys, self.action_dim]) + avail_actions = np.concatenate([left_avail_actions, right_avail_actions], 0) + # process obs + raw_obs = self.all_observes[0] + obs = self.raw2vec(raw_obs) + share_obs = obs.copy() + info = {"action_mask": avail_actions} + return raw_obs, info # obs:(n_player, 288) + + # return self.all_observes + + def step(self, joint_action): + info_before = self.step_before_info() + joint_action = np.expand_dims(joint_action, 1) + all_observes, info_after = self.get_next_state(joint_action) + done = self.is_terminal() + reward = self.get_reward(joint_action) + + left_avail_actions = np.ones([self.num_agents, self.action_dim]) + right_avail_actions = np.ones([self.num_enemys, self.action_dim]) + avail_actions = np.concatenate([left_avail_actions, right_avail_actions], 0) + + raw_obs = all_observes[0] + obs = self.raw2vec(raw_obs) + share_obs = obs.copy() + + rewards = np.expand_dims(np.array(reward), axis=1) + + dones = [done] * self.n_player + infos = info_after + + if self.render: + img = self.render_board() + img_pil = Image.fromarray(img) + + self.img_list.append(img_pil) + + if done and self.episode % self.save_internal == 0 and self.env_id == 0: + self.img_list[0].save( + self.save_path.format(self.episode), + save_all=True, + append_images=self.img_list[1:], + duration=500, + ) + print("save replay gif to" + self.save_path.format(self.episode)) + + infos.update({"action_mask": avail_actions}) + return raw_obs, rewards, dones, infos + # return all_observes, reward, done, info_before, info_after + + # obs: 0-空白 1-豆子 2-我方蛇头 3-我方蛇身 4-敌方蛇头 5-敌方蛇身 + + def raw2vec(self, raw_obs): + control_index = raw_obs["controlled_snake_index"] + width = raw_obs["board_width"] + height = raw_obs["board_height"] + beans = raw_obs[1] + ally_pos = raw_obs[control_index] + enemy_pos = raw_obs[5 - control_index] + + obs = np.zeros(width * height * self.n_player, dtype=int) + ally_head_h, ally_head_w = ally_pos[0] + enemy_head_h, enemy_head_w = enemy_pos[0] + obs[ally_head_h * width + ally_head_w] = 2 + obs[height * width + ally_head_h * width + ally_head_w] = 4 + obs[enemy_head_h * width + enemy_head_w] = 4 + obs[height * width + enemy_head_h * width + enemy_head_w] = 2 + + for bean in beans: + h, w = bean + obs[h * width + w] = 1 + obs[height * width + h * width + w] = 1 + + for p in ally_pos[1:]: + h, w = p + obs[h * width + w] = 3 + obs[height * width + h * width + w] = 5 + + for p in enemy_pos[1:]: + h, w = p + obs[h * width + w] = 5 + obs[height * width + h * width + w] = 3 + + obs_ = np.array([]) + for i in obs: + obs_ = np.concatenate([obs_, np.eye(6)[i]]) + obs_ = obs_.reshape(-1, width * height * 6) + + return obs_ + + def init_state(self): + for i in range(self.n_player): + s = Snake(i + 2, self.board_width, self.board_height, self.init_len) + s_len = 1 + while s_len < self.init_len: + if s_len == 1 and i > 0: + origin_hit = self.is_hit(s.headPos, self.snakes_position) + else: + origin_hit = 0 + cur_head = s.move_and_add(self.snakes_position) + cur_hit = self.is_hit(cur_head, self.snakes_position) or self.is_hit( + cur_head, {i: s.segments[1:]} + ) + if origin_hit or cur_hit: + x = random.randrange(0, self.board_height) + y = random.randrange(0, self.board_width) + s.headPos = [x, y] + s.segments = [s.headPos] + s.direction = random.choice(self.actions) + s_len = 1 + else: + s_len += 1 + self.snakes_position[s.player_id] = s.segments + self.players.append(s) + + self.generate_beans() + self.init_info = { + "snakes_position": [ + list(v) + for k, v in sorted( + self.snakes_position.items(), key=lambda item: item[0] + ) + ], + "beans_position": list(self.beans_position), + } + directs = [] + for i in range(len(self.players)): + s = self.players[i] + directs.append(self.actions_name[s.direction]) + self.init_info["directions"] = directs + + return self.update_state() + + def update_state(self): + next_state = [ + [[0] * self.cell_dim for _ in range(self.board_width)] + for _ in range(self.board_height) + ] + for i in range(self.n_player): + snake = self.players[i] + for pos in snake.segments: + next_state[pos[0]][pos[1]][0] = i + 2 + + for pos in self.beans_position: + next_state[pos[0]][pos[1]][0] = 1 + + return next_state + + def step_before_info(self, info=""): + directs = [] + for i in range(len(self.players)): + s = self.players[i] + directs.append(self.actions_name[s.direction]) + info = {"directions": directs} + + return info + + def is_hit(self, cur_head, snakes_position): + is_hit = False + for k, v in snakes_position.items(): + for pos in v: + if cur_head == pos: + is_hit = True + # print("hit:", cur_head, snakes_position) + break + if is_hit: + break + + return is_hit + + def generate_beans(self): + all_valid_positions = set( + itertools.product(range(0, self.board_height), range(0, self.board_width)) + ) + all_valid_positions = all_valid_positions - set(map(tuple, self.beans_position)) + for positions in self.snakes_position.values(): + all_valid_positions = all_valid_positions - set(map(tuple, positions)) + + left_bean_num = self.n_beans - self.cur_bean_num + all_valid_positions = np.array(list(all_valid_positions)) + left_valid_positions = len(all_valid_positions) + + new_bean_num = ( + left_bean_num + if left_valid_positions > left_bean_num + else left_valid_positions + ) + + if left_valid_positions > 0: + new_bean_positions_idx = np.random.choice( + left_valid_positions, size=new_bean_num, replace=False + ) + new_bean_positions = all_valid_positions[new_bean_positions_idx] + else: + new_bean_positions = [] + + for new_bean_pos in new_bean_positions: + self.beans_position.append(list(new_bean_pos)) + self.cur_bean_num += 1 + + def get_all_observes(self, before_info=""): + self.all_observes = [] + for i in range(self.n_player): + each_obs = self.get_dict_observation(self.current_state, i + 2, before_info) + self.all_observes.append(each_obs) + + return self.all_observes + + def get_next_state(self, all_action): + before_info = self.step_before_info() + not_valid = self.is_not_valid_action(all_action) + if not not_valid: + # 各玩家行动 + # print("current_state", self.current_state) + eat_snakes = [0] * self.n_player + others_reward = [ + 0 + ] * self.n_player # 记录对方获得的奖励,因为是零和博弈,所以敌人获得了多少奖励,我方就要减去多少奖励 + for i in range(self.n_player): # 判断是否吃到了豆子 + snake = self.players[i] + act = self.actions[np.argmax(all_action[i][0])] + # print(snake.player_id, "此轮的动作为:", self.actions_name[act]) + snake.change_direction(act) + snake.move_and_add(self.snakes_position) # 更新snake.segment + if self.be_eaten(snake.headPos): # @yanxue + snake.snake_reward = 1 + eat_snakes[i] = 1 + else: + snake.snake_reward = 0 + snake.pop() + # print(snake.player_id, snake.segments) # @yanxue + snake_position = [[-1] * self.board_width for _ in range(self.board_height)] + re_generatelist = [0] * self.n_player + for i in range(self.n_player): # 判断是否相撞 + snake = self.players[i] + segment = snake.segments + for j in range(len(segment)): + x = segment[j][0] + y = segment[j][1] + if snake_position[x][y] != -1: + if j == 0: # 撞头 + re_generatelist[i] = 1 + compare_snake = self.players[snake_position[x][y]] + if [x, y] == compare_snake.segments[0]: # 两头相撞won + re_generatelist[snake_position[x][y]] = 1 + else: + snake_position[x][y] = i + for i in range(self.n_player): + snake = self.players[i] + if re_generatelist[i] == 1: + if eat_snakes[i] == 1: + snake.snake_reward = ( + self.init_len - len(snake.segments) + 1 + ) # 身体越长,惩罚越大 + else: + snake.snake_reward = self.init_len - len(snake.segments) + snake.segments = [] + for i in range(self.num_agents): + others_reward[self.num_agents :] = [ + others_reward[j + self.num_agents] + self.players[i].snake_reward + for j in range(self.num_enemys) + ] + others_reward[self.num_agents :] = [ + others_reward[j + self.num_agents] // self.num_agents + for j in range(self.num_enemys) + ] + for i in range(self.num_enemys): + others_reward[: self.num_agents] = [ + others_reward[j] + self.players[i + self.num_agents].snake_reward + for j in range(self.num_agents) + ] + others_reward[: self.num_agents] = [ + others_reward[j] // self.num_enemys for j in range(self.num_agents) + ] + for i in range(self.n_player): + self.players[i].snake_reward -= others_reward[i] + for i in range(self.n_player): + snake = self.players[i] + if re_generatelist[i] == 1: + snake = self.clear_or_regenerate(snake) + self.snakes_position[snake.player_id] = snake.segments + snake.score = snake.get_score() + # yanxue add + # 更新状态 + self.generate_beans() + + next_state = self.update_state() + self.current_state = next_state + self.step_cnt += 1 + + self.won = [0] * self.n_player + + for i in range(self.n_player): + s = self.players[i] + self.won[i] = s.score + info_after = {} + info_after["snakes_position"] = [ + list(v) + for k, v in sorted( + self.snakes_position.items(), key=lambda item: item[0] + ) + ] + info_after["beans_position"] = list(self.beans_position) + info_after["hit"] = re_generatelist + info_after["score"] = self.won + self.all_observes = self.get_all_observes(before_info) + + return self.all_observes, info_after + + def clear_or_regenerate(self, snake): + direct_x = [0, 1, -1, 0] + direct_y = [1, 0, 0, -1] + snake.segments = [] + snake.score = 0 + grid = self.get_render_data(self.update_state()) + + def can_regenerate(): + for x in range(self.board_height): + for y in range(self.board_width): + if grid[x][y] == 0: + q = [] + q.append([x, y]) + seg = [] + while q: + cur = q.pop(0) + if cur not in seg: + seg.append(cur) + for i in range(4): + nx = (direct_x[i] + cur[0]) % self.board_height + ny = (direct_y[i] + cur[1]) % self.board_width + # if nx < 0 or nx >= self.board_height or ny < 0 or ny >= self.board_width: + # continue + if grid[nx][ny] == 0 and [nx, ny] not in q: + grid[nx][ny] = 1 + q.append([nx, ny]) + if len(seg) == self.init_len: + # print("regenerate") + if len(seg) < 3: + snake.direction = random.choice(self.actions) + elif len(seg) == 3: + mid = ( + [seg[1][0], seg[2][1]], + [seg[2][0], seg[1][1]], + ) + if seg[0] in mid: + seg[0], seg[1] = seg[1], seg[0] + snake.segments = seg + snake.headPos = seg[0] + if seg[0][0] == seg[1][0]: + # 右 + if seg[0][1] > seg[1][1]: + snake.direction = 1 + # 左 + else: + snake.direction = -1 + elif seg[0][1] == seg[1][1]: + # 下 + if seg[0][0] > seg[1][0]: + snake.direction = 2 + # 上 + else: + snake.direction = -2 + # print("re head", snake.headPos) # 输出重新生成的蛇 + # print("re snakes segments", snake.segments) + return True + # print("clear") + return False + + flg = can_regenerate() + if not flg: + self.terminate_flg = True + # print(self.terminate_flg) + return snake + + # def is_not_valid_action(self, joint_action): + # not_valid = 0 + # if len(joint_action) != self.n_player: + # raise Exception("joint action 维度不正确!", len(joint_action)) + # + # for i in range(len(joint_action)): + # if len(joint_action[i][0]) != 4: + # raise Exception("玩家%d joint action维度不正确!" % i, joint_action[i]) + # return not_valid + + def is_not_valid_action(self, all_action): + not_valid = 0 + if len(all_action) != self.n_player: + raise Exception("all action 维度不正确!", len(all_action)) + + for i in range(self.n_player): + if len(all_action[i][0]) != 4: + raise Exception("玩家%d joint action维度不正确!" % i, all_action[i]) + return not_valid + + def get_reward(self, all_action): + r = [0] * self.n_player + for i in range(self.n_player): + r[i] = self.players[i].snake_reward + self.n_return[i] += r[i] + # print("score:", self.won) + return r + + def is_terminal(self): + all_member = self.n_beans + # all_member = len(self.beans_position) + for s in self.players: + all_member += len(s.segments) + is_done = ( + self.step_cnt > self.max_step + or all_member > self.board_height * self.board_width + ) + + return is_done or self.terminate_flg + + def encode(self, actions): + joint_action = self.init_action_space() + if len(actions) != self.n_player: + raise Exception("action输入维度不正确!", len(actions)) + for i in range(self.n_player): + joint_action[i][0][int(actions[i])] = 1 + return joint_action + + def get_terminal_actions(self): + print("请输入%d个玩家的动作方向[0-3](上下左右),空格隔开:" % self.n_player) + cur = input() + actions = cur.split(" ") + return self.encode(actions) + + def be_eaten(self, snake_pos): + for bean in self.beans_position: + if snake_pos[0] == bean[0] and snake_pos[1] == bean[1]: + self.beans_position.remove(bean) + self.cur_bean_num -= 1 + return True + return False + + def get_action_dim(self): + action_dim = 1 + for i in range(len(self.joint_action_space[0])): + action_dim *= self.joint_action_space[0][i].n + + return action_dim + + def draw_board(self): + cols = [chr(i) for i in range(65, 65 + self.board_width)] + s = ", ".join(cols) + print(" ", s) + for i in range(self.board_height): + # print(i) + print(chr(i + 65), self.current_state[i]) + + @staticmethod + def _render_board(state, board, colors, unit, fix, extra_info): + im = GridGame._render_board(state, board, colors, unit, fix) + draw = ImageDraw.Draw(im) + # fnt = ImageFont.truetype("Courier.dfont", 16) + fnt = ImageFont.load_default() + for i, pos in zip(count(1), extra_info): + x, y = pos + draw.text( + ((y + 1 / 4) * unit, (x + 1 / 4) * unit), + "#{}".format(i), + font=fnt, + fill=(0, 0, 0), + ) + + return im + + def render_board(self): + extra_info = [tuple(x.headPos) for x in self.players] + im_data = np.array( + SnakeEatBeans._render_board( + self.get_render_data(self.current_state), + self.grid, + self.colors, + self.grid_unit, + self.grid_unit_fix, + extra_info, + ) + ) + return im_data + + @staticmethod + def parse_extra_info(data): + # return eval(re.search(r'({.*})', data['info_after']).group(1)).values() + # d = (eval(eval(data)['snakes_position']).values()) + if isinstance(data, str): + d = eval(data)["snakes_position"] + else: + d = data["snakes_position"] + + return [i[0] for i in d] + + +class Snake: + def __init__(self, player_id, board_width, board_height, init_len): + self.actions = [-2, 2, -1, 1] + self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} + self.direction = random.choice(self.actions) # 方向[-2,2,-1,1]分别表示[上,下,左,右] + self.board_width = board_width + self.board_height = board_height + x = random.randrange(0, board_height) + y = random.randrange(0, board_width) + self.segments = [[x, y]] + self.headPos = self.segments[0] + self.player_id = player_id + self.score = 0 + self.snake_reward = 0 + self.init_len = init_len + + def get_score(self): + return len(self.segments) - self.init_len + + def change_direction(self, act): + if act + self.direction != 0: + self.direction = act + else: + n_direct = random.choice(self.actions) + while n_direct + self.direction == 0: + n_direct = random.choice(self.actions) + self.direction = n_direct + # print("方向不合法,重新生成") + # print("direction", self.actions_name[self.direction]) + + # 超过边界,可以穿越 + def update_position(self, position): + position[0] %= self.board_height + position[1] %= self.board_width + return position + + def move_and_add(self, snakes_position): + cur_head = list(self.headPos) + # 根据方向移动蛇头的坐标 + # 右 + if self.direction == 1: + cur_head[1] += 1 + # 左 + if self.direction == -1: + cur_head[1] -= 1 + # 上 + if self.direction == -2: + cur_head[0] -= 1 + # 下 + if self.direction == 2: + cur_head[0] += 1 + + cur_head = self.update_position(cur_head) + # print("cur head", cur_head) + # print("cur snakes positions", snakes_position) + + self.segments.insert(0, cur_head) + self.headPos = self.segments[0] + return cur_head + + def pop(self): + self.segments.pop() # 在蛇尾减去一格 diff --git a/openrl/envs/snake/snake_3v3.py b/openrl/envs/snake/snake_3v3.py new file mode 100644 index 00000000..80364b3d --- /dev/null +++ b/openrl/envs/snake/snake_3v3.py @@ -0,0 +1,780 @@ +# -*- coding:utf-8 -*- +# 作者:zruizhi +# 创建时间: 2020/7/30 17:24 下午 +# 描述: +from .gridgame import GridGame +import random +from itertools import count +import numpy as np +from PIL import ImageDraw, ImageFont +from .observation import * +from .discrete import Discrete +from .common import Board, HiddenPrints, SnakePos #TODO: Snake类的重名问题 +import itertools +from gym import Env, spaces +from PIL import Image + +import time +import copy + + + +class SnakeEatBeans(GridGame, GridObservation, DictObservation): + def __init__(self, all_args, env_id): + self.all_args = all_args + conf = { + "class_literal": "SnakeEatBeans", + "n_player": 6, + "board_width": 20, + "board_height": 10, + "channels": 15, + "cell_range": 8, + "n_beans": 5, + "max_step": 200, + "game_name": "snakes", + "is_obs_continuous": False, + "is_act_continuous": False, + "agent_nums": [3,3], + "obs_type": ["dict","dict"], + "save_interval": 100, + "save_path": "../../replay/snake_3v3/replay_{}.gif" + } + self.terminate_flg = False + colors = conf.get('colors', [(255, 255, 255), (255, 140, 0)]) + super(SnakeEatBeans, self).__init__(conf, colors) + # 0: 没有 1:食物 2-n_player+1:各玩家蛇身 + self.n_cell_type = self.n_player + 2 + self.step_cnt = 1 + self.n_beans = int(conf['n_beans']) + # 方向[-2,2,-1,1]分别表示[上,下,左,右] + self.actions = [-2, 2, -1, 1] + self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} + self.snakes_position = {} + self.players = [] + self.cur_bean_num = 0 + self.beans_position = [] + # 1<= init_len <= 3 + self.init_len = 3 + self.current_state = self.init_state() + self.all_observes = self.get_all_observes() + if self.n_player * self.init_len > self.board_height * self.board_width: + raise Exception("玩家数量过多:%d,超出board范围:%d,%d" % (self.n_player, self.board_width, self.board_height)) + + self.input_dimension = self.board_width * self.board_height + self.action_dim = self.get_action_dim() + self.channels = conf["channels"] + + self.num_agents = conf["agent_nums"][0] + self.num_enemys = conf["agent_nums"][1] + + self.observation_space = [spaces.Box(low=-np.inf, high=-np.inf, shape=(self.channels, self.board_width, self.board_height), dtype=np.float32)] + self.share_observation_space = [] + self.share_observation_space = [spaces.Box( + low=-np.inf, high=+np.inf, shape=(self.channels, self.board_width, self.board_height), dtype=np.float32)] + self.action_space = [Discrete(4) for _ in range(self.n_player)] + self.save_interval = conf["save_interval"] + self.save_path = conf["save_path"] + self.episode = 0 + self.render = all_args.save_replay + self.img_list = [] + self.env_id = env_id + + def seed(self, seed=None): + if seed is None: + np.random.seed(1) + else: + np.random.seed(seed) + + def check_win(self): + flg = self.won.index(max(self.won)) + 2 + return flg + + def get_grid_observation(self, current_state, player_id, info_before): + return current_state + + def get_dict_observation(self, current_state, player_id, info_before): + key_info = {1: self.beans_position} + for i in range(self.n_player): + snake = self.players[i] + key_info[snake.player_id] = snake.segments + # key_info['state_map'] = current_state + key_info['board_width'] = self.board_width + key_info['board_height'] = self.board_height + key_info['last_direction'] = info_before.get('directions') if isinstance(info_before, dict) else None + key_info['controlled_snake_index'] = player_id + + return key_info + + def set_action_space(self): + action_space = [[Discrete(4)] for _ in range(self.n_player)] + return action_space + + def reset(self): + self.step_cnt = 1 + self.snakes_position = {} # 格式类似于{1: [[3, 1], [4, 3], [1, 2], [0, 6], [3, 3]], 2: [[3, 0], [3, 7], [3, 6]], 3: [[2, 7], [1, 7], [0, 7]]} + self.players = [] + self.cur_bean_num = 0 + self.beans_position = [] + self.current_state = self.init_state() + self.all_observes = self.get_all_observes() + self.terminate_flg = False + self.img_list = [] + self.episode += 1 + + # available actions + left_avail_actions = np.ones([self.num_agents, self.action_dim]) + right_avail_actions = np.ones([self.num_enemys, self.action_dim]) + avail_actions = np.concatenate([left_avail_actions, right_avail_actions], 0) + # process obs + board = [] + for i in range(self.n_player): + board.append([self.get_board(self.all_observes[i])]) + + board_ = np.concatenate(board) + obs = [] + for raw_obs in self.all_observes: + obs.append([self.raw2vec(raw_obs)]) + obs_ = np.concatenate(obs) + obs_ = np.concatenate((obs_, board_), axis=1) + + share_obs = np.repeat(np.expand_dims(obs_[0], axis = 0), 6, 0) + + return obs_, share_obs, avail_actions #obs:(n_player, 288) + + # return self.all_observes + + def step(self, joint_action): + info_before = self.step_before_info() + joint_action = np.expand_dims(joint_action, 1) + all_observes, info_after = self.get_next_state(joint_action) + done = self.is_terminal() + reward = self.get_reward(joint_action) + left_avail_actions = np.ones([self.num_agents, self.action_dim]) + right_avail_actions = np.ones([self.num_enemys, self.action_dim]) + avail_actions = np.concatenate([left_avail_actions, right_avail_actions], 0) + + board = [] + for i in range(self.n_player): + board.append([self.get_board(all_observes[i])]) + + board_ = np.concatenate(board) + + obs = [] + + for raw_obs in all_observes: + obs.append([self.raw2vec(raw_obs)]) # obs:[[(14, 20, 10)], [], ..., []] + + obs_ = np.concatenate(obs) #(n_player, channels, width, height) + obs_ = np.concatenate((obs_, board_), axis=1) + + share_obs = np.repeat(np.expand_dims(obs_[0], axis = 0), 6, 0) + + if done: + reward = self.get_final_reward(reward) + + rewards = np.expand_dims(np.array(reward), axis=1) + + dones = [done] * self.n_player + infos = [info_after] * self.n_player + + if self.render and self.episode % self.save_interval == 0 and self.env_id == 0: + + img = self.render_board() + img_pil = Image.fromarray(img) + self.img_list.append(img_pil) + + if done: + self.img_list[0].save(self.save_path.format(self.episode), + save_all = True, append_images = self.img_list[1:], duration = 400) + print("save replay gif to" + self.save_path.format(self.episode)) + + return obs_, share_obs, rewards, dones, infos, avail_actions + # return all_observes, reward, done, info_before, info_after + + # obs: 0 空白 1 豆子 2 我方蛇头 3 我方蛇身 4-5 友方蛇头 6-7 友方蛇身 8-10 敌方蛇头 11-13 敌方蛇身 + def raw2vec(self, raw_obs): + control_index = raw_obs['controlled_snake_index'] + width = raw_obs['board_width'] + height = raw_obs['board_height'] + beans = raw_obs[1] + pos = raw_obs[control_index] + + obs = np.zeros(width * height, dtype = int) + head_h, head_w = pos[0] + obs[head_h * width + head_w] = 2 + + for bean in beans: + h, w = bean + obs[h * width + w] = 1 + + for p in pos[1:]: + h, w = p + obs[h * width + w] = 3 + + if control_index == 2: + h1, w1 = raw_obs[3][0] + h2, w2 = raw_obs[4][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[3][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[4][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(self.num_agents + 2, self.n_player + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 3 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 6 + elif control_index == 3: + h1, w1 = raw_obs[2][0] + h2, w2 = raw_obs[4][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[2][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[4][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(self.num_agents + 2, self.n_player + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 3 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 6 + elif control_index == 4: + h1, w1 = raw_obs[2][0] + h2, w2 = raw_obs[3][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[2][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[3][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(self.num_agents + 2, self.n_player + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 3 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 6 + elif control_index == 5: + h1, w1 = raw_obs[6][0] + h2, w2 = raw_obs[7][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[6][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[7][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(2, self.num_agents + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 6 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 9 + elif control_index == 6: + h1, w1 = raw_obs[5][0] + h2, w2 = raw_obs[7][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[5][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[7][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(2, self.num_agents + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 6 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 9 + else: + h1, w1 = raw_obs[5][0] + h2, w2 = raw_obs[6][0] + obs[h1 * width + w1] = 4 + obs[h2 * width + w2] = 5 + for p in raw_obs[5][1:]: + h, w = p + obs[h * width + w] = 6 + for p in raw_obs[6][1:]: + h, w = p + obs[h * width + w] = 7 + for i in range(2, self.num_agents + 2): + h, w = raw_obs[i][0] + obs[h * width + w] = i + 6 + for p in raw_obs[i][1:]: + h, w = p + obs[h * width + w] = i + 9 + + obs_ = np.zeros(width * height * (self.channels - 1), dtype = int) + for i in range(width * height): + obs_[i * (self.channels - 1) + obs[i]] = 1 # channels的最后一维是territory matrix, 此处不生成, 要减去 + obs_ = obs_.reshape(height, width, (self.channels - 1)) # (height, width, channels-1 ) + obs_ = obs_.transpose((2, 1, 0)) + + return obs_ + + def get_board(self, observation_list): + observation_len = len(observation_list.keys()) + teams = None + teams = [[0, 1, 2], [3, 4, 5]] # 3v3 + teams_count = len(teams) + snakes_count = sum([len(_) for _ in teams]) + + # read observation + obs = observation_list.copy() + board_height = obs['board_height'] # 10 + board_width = obs['board_width'] # 20 + # print("obs['controlled_snake_index'] is ", obs['controlled_snake_index']) + ctrl_agent_index = obs['controlled_snake_index'] - 2 # 0, 1, 2, 3, 4, 5 + # last_directions = obs['last_direction'] # ['up', 'left', 'down', 'left', 'left', 'left'] + beans_positions = obs[1] # e.g.[[7, 15], [4, 14], [5, 12], [4, 12], [5, 7]] + snakes = {key - 2: SnakePos(obs[key], board_height, board_width, beans_positions) + for key in obs.keys() & {_ + 2 for _ in range(snakes_count)}} # &: intersection + team_indexes = [_ for _ in teams if ctrl_agent_index in _][0] + + init_board = Board(board_height, board_width, snakes, beans_positions, teams) + bd = copy.deepcopy(init_board) + + with HiddenPrints(): + while not all(_ == [] for _ in bd.open.values()): # loop until all values in open are empty list + bd.step() + + board = np.array(bd.board).transpose() + board = np.expand_dims(board, axis=0) + return board + + + def init_state(self): + for i in range(self.n_player): + s = Snake(i + 2, self.board_width, self.board_height, self.init_len) + s_len = 1 + while s_len < self.init_len: + if s_len == 1 and i > 0: + origin_hit = self.is_hit(s.headPos, self.snakes_position) + else: + origin_hit = 0 + cur_head = s.move_and_add(self.snakes_position) + cur_hit = self.is_hit(cur_head, self.snakes_position) or self.is_hit(cur_head, {i:s.segments[1:]}) + if origin_hit or cur_hit: + x = random.randrange(0, self.board_height) + y = random.randrange(0, self.board_width) + s.headPos = [x, y] + s.segments = [s.headPos] + s.direction = random.choice(self.actions) + s_len = 1 + else: + s_len += 1 + self.snakes_position[s.player_id] = s.segments + self.players.append(s) + + self.generate_beans() + self.init_info = { + "snakes_position": [list(v) for k, v in sorted(self.snakes_position.items(), key=lambda item: item[0])], + "beans_position": list(self.beans_position)} + directs = [] + for i in range(len(self.players)): + s = self.players[i] + directs.append(self.actions_name[s.direction]) + self.init_info["directions"] = directs + + return self.update_state() + + def update_state(self): + next_state = [[[0] * self.cell_dim for _ in range(self.board_width)] for _ in range(self.board_height)] + for i in range(self.n_player): + snake = self.players[i] + for pos in snake.segments: + next_state[pos[0]][pos[1]][0] = i + 2 + + for pos in self.beans_position: + next_state[pos[0]][pos[1]][0] = 1 + + return next_state + + def step_before_info(self, info=''): + directs = [] + for i in range(len(self.players)): + s = self.players[i] + directs.append(self.actions_name[s.direction]) + info = {"directions": directs} + + return info + + def is_hit(self, cur_head, snakes_position): + is_hit = False + for k, v in snakes_position.items(): + for pos in v: + if cur_head == pos: + is_hit = True + # print("hit:", cur_head, snakes_position) + break + if is_hit: + break + + return is_hit + + def generate_beans(self): + all_valid_positions = set(itertools.product(range(0, self.board_height), range(0, self.board_width))) + all_valid_positions = all_valid_positions - set(map(tuple, self.beans_position)) + for positions in self.snakes_position.values(): + all_valid_positions = all_valid_positions - set(map(tuple, positions)) + + left_bean_num = self.n_beans - self.cur_bean_num + all_valid_positions = np.array(list(all_valid_positions)) + left_valid_positions = len(all_valid_positions) + + new_bean_num = left_bean_num if left_valid_positions > left_bean_num else left_valid_positions + + if left_valid_positions > 0: + new_bean_positions_idx = np.random.choice(left_valid_positions, size=new_bean_num, replace=False) + new_bean_positions = all_valid_positions[new_bean_positions_idx] + else: + new_bean_positions = [] + + for new_bean_pos in new_bean_positions: + self.beans_position.append(list(new_bean_pos)) + self.cur_bean_num += 1 + + def get_all_observes(self, before_info=''): + self.all_observes = [] + for i in range(self.n_player): + each_obs = self.get_dict_observation(self.current_state, i+2, before_info) + self.all_observes.append(each_obs) + + return self.all_observes + + def get_next_state(self, all_action): + before_info = self.step_before_info() + not_valid = self.is_not_valid_action(all_action) + if not not_valid: + # 各玩家行动 + # print("current_state", self.current_state) + eat_snakes = [0] * self.n_player + ally_reward = 0 + enemy_reward = 0 + for i in range(self.n_player): # 判断是否吃到了豆子 + snake = self.players[i] + act = self.actions[np.argmax(all_action[i][0])] + # print(snake.player_id, "此轮的动作为:", self.actions_name[act]) + snake.change_direction(act) + snake.move_and_add(self.snakes_position) # 更新snake.segment + if self.be_eaten(snake.headPos): # @yanxue + snake.snake_reward = 1 + eat_snakes[i] = 1 + else: + snake.snake_reward = 0 + snake.pop() + # print(snake.player_id, snake.segments) # @yanxue + snake_position = [[-1] * self.board_width for _ in range(self.board_height)] + re_generatelist = [0] * self.n_player + for i in range(self.n_player): #判断是否相撞 + snake = self.players[i] + segment = snake.segments + for j in range(len(segment)): + x = segment[j][0] + y = segment[j][1] + if snake_position[x][y] != -1: + if j == 0: # 撞头 + re_generatelist[i] = 1 + compare_snake = self.players[snake_position[x][y]] + if [x, y] == compare_snake.segments[0]: # 两头相撞won + re_generatelist[snake_position[x][y]] = 1 + else: + snake_position[x][y] = i + for i in range(self.n_player): + snake = self.players[i] + if re_generatelist[i] == 1: + if eat_snakes[i] == 1: + snake.snake_reward = self.init_len - len(snake.segments) + 1 #身体越长,惩罚越大 + else: + snake.snake_reward = self.init_len - len(snake.segments) + snake.segments = [] + + for i in range(self.num_agents): + ally_reward += self.players[i].snake_reward + for i in range(self.num_enemys): + enemy_reward += self.players[i + self.num_agents].snake_reward + alpha = 0.8 + for i in range(self.num_agents): + self.players[i].snake_reward = (self.players[i].snake_reward - enemy_reward / 3) * alpha + ally_reward / 3 * (1 - alpha) + for i in range(self.num_agents,self.n_player): + self.players[i].snake_reward = (self.players[i].snake_reward - ally_reward / 3) * alpha + enemy_reward / 3 * (1 - alpha) + + for i in range(self.n_player): + snake = self.players[i] + if re_generatelist[i] == 1: + snake = self.clear_or_regenerate(snake) + self.snakes_position[snake.player_id] = snake.segments + snake.score = snake.get_score() + # yanxue add + # 更新状态 + self.generate_beans() + + next_state = self.update_state() + self.current_state = next_state + self.step_cnt += 1 + + self.won = [0] * self.n_player + + for i in range(self.n_player): + s = self.players[i] + self.won[i] = s.score + info_after = {} + info_after["snakes_position"] = [list(v) for k, v in sorted(self.snakes_position.items(), key=lambda item: item[0])] + info_after["beans_position"] = list(self.beans_position) + info_after["hit"] = re_generatelist + info_after["score"] = self.won + self.all_observes = self.get_all_observes(before_info) + + return self.all_observes, info_after + + def clear_or_regenerate(self, snake): + direct_x = [0, 1, -1, 0] + direct_y = [1, 0, 0, -1] + snake.segments = [] + snake.score = 0 + grid = self.get_render_data(self.update_state()) + + def can_regenerate(): + for x in range(self.board_height): + for y in range(self.board_width): + if grid[x][y] == 0: + q = [] + q.append([x, y]) + seg = [] + while q: + cur = q.pop(0) + if cur not in seg: + seg.append(cur) + for i in range(4): + nx = (direct_x[i] + cur[0]) % self.board_height + ny = (direct_y[i] + cur[1]) % self.board_width + # if nx < 0 or nx >= self.board_height or ny < 0 or ny >= self.board_width: + # continue + if grid[nx][ny] == 0 and [nx, ny] not in q: + grid[nx][ny] = 1 + q.append([nx, ny]) + if len(seg) == self.init_len: + # print("regenerate") + if len(seg) < 3: + snake.direction = random.choice(self.actions) + elif len(seg) == 3: + mid = ([seg[1][0], seg[2][1]], [seg[2][0], seg[1][1]]) + if seg[0] in mid: + seg[0], seg[1] = seg[1], seg[0] + snake.segments = seg + snake.headPos = seg[0] + if seg[0][0] == seg[1][0]: + # 右 + if seg[0][1] > seg[1][1]: + snake.direction = 1 + # 左 + else: + snake.direction = -1 + elif seg[0][1] == seg[1][1]: + # 下 + if seg[0][0] > seg[1][0]: + snake.direction = 2 + # 上 + else: + snake.direction = -2 + # print("re head", snake.headPos) # 输出重新生成的蛇 + # print("re snakes segments", snake.segments) + return True + # print("clear") + return False + + flg = can_regenerate() + if not flg: + self.terminate_flg = True + # print(self.terminate_flg) + return snake + + def is_not_valid_action(self, all_action): + not_valid = 0 + if len(all_action) != self.n_player: + raise Exception("all action 维度不正确!", len(all_action)) + + for i in range(self.n_player): + if len(all_action[i][0]) != 4: + raise Exception("玩家%d joint action维度不正确!" % i, all_action[i]) + return not_valid + + def get_reward(self, all_action): + r = [0] * self.n_player + for i in range(self.n_player): + r[i] = self.players[i].snake_reward + self.n_return[i] += r[i] + # print("score:", self.won) + return r + + def get_final_reward(self, reward): + ally_reward = reward[0] + reward[1] + reward[2] + enemy_reward = reward[3] + reward[4] + reward[5] + if ally_reward > enemy_reward: + reward[0] += 10 + reward[1] += 10 + reward[2] += 10 + reward[3] -= 10 + reward[4] -= 10 + reward[5] -= 10 + elif ally_reward < enemy_reward: + reward[3] += 10 + reward[4] += 10 + reward[5] += 10 + reward[0] -= 10 + reward[1] -= 10 + reward[2] -= 10 + return reward + + def is_terminal(self): + all_member = self.n_beans + # all_member = len(self.beans_position) + for s in self.players: + all_member += len(s.segments) + is_done = self.step_cnt > self.max_step or all_member > self.board_height * self.board_width + + return is_done or self.terminate_flg + + def encode(self, actions): + joint_action = self.init_action_space() + if len(actions) != self.n_player: + raise Exception("action输入维度不正确!", len(actions)) + for i in range(self.n_player): + joint_action[i][0][int(actions[i])] = 1 + return joint_action + + def get_terminal_actions(self): + print("请输入%d个玩家的动作方向[0-3](上下左右),空格隔开:" % self.n_player) + cur = input() + actions = cur.split(" ") + return self.encode(actions) + + def be_eaten(self, snake_pos): + for bean in self.beans_position: + if snake_pos[0] == bean[0] and snake_pos[1] == bean[1]: + self.beans_position.remove(bean) + self.cur_bean_num -= 1 + return True + return False + + def get_action_dim(self): + action_dim = 1 + for i in range(len(self.joint_action_space[0])): + action_dim *= self.joint_action_space[0][i].n + + return action_dim + + def draw_board(self): + cols = [chr(i) for i in range(65, 65 + self.board_width)] + s = ', '.join(cols) + print(' ', s) + for i in range(self.board_height): + # print(i) + print(chr(i + 65), self.current_state[i]) + + @staticmethod + def _render_board(state, board, colors, unit, fix, extra_info): + im = GridGame._render_board(state, board, colors, unit, fix) + draw = ImageDraw.Draw(im) + # fnt = ImageFont.truetype("Courier.dfont", 16) + fnt = ImageFont.load_default() + for i, pos in zip(count(1), extra_info): + x, y = pos + draw.text(((y + 1 / 4) * unit, (x + 1 / 4) * unit), + "#{}".format(i), + font=fnt, + fill=(0, 0, 0)) + + return im + + def render_board(self): + extra_info = [tuple(x.headPos) for x in self.players] + im_data = np.array( + SnakeEatBeans._render_board(self.get_render_data(self.current_state), self.grid, self.colors, self.grid_unit, self.grid_unit_fix, + extra_info)) + return im_data + + @staticmethod + def parse_extra_info(data): + # return eval(re.search(r'({.*})', data['info_after']).group(1)).values() + # d = (eval(eval(data)['snakes_position']).values()) + if isinstance(data, str): + d = eval(data)['snakes_position'] + else: + d = data['snakes_position'] + + return [i[0] for i in d] + + +class Snake(): + def __init__(self, player_id, board_width, board_height, init_len): + self.actions = [-2, 2, -1, 1] + self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} + self.direction = random.choice(self.actions) # 方向[-2,2,-1,1]分别表示[上,下,左,右] + self.board_width = board_width + self.board_height = board_height + x = random.randrange(0, board_height) + y = random.randrange(0, board_width) + self.segments = [[x, y]] + self.headPos = self.segments[0] + self.player_id = player_id + self.score = 0 + self.snake_reward = 0 + self.init_len = init_len + + def get_score(self): + return len(self.segments) - self.init_len + + def change_direction(self, act): + if act + self.direction != 0: + self.direction = act + else: + n_direct = random.choice(self.actions) + while n_direct + self.direction == 0: + n_direct = random.choice(self.actions) + self.direction = n_direct + # print("方向不合法,重新生成") + # print("direction", self.actions_name[self.direction]) + + # 超过边界,可以穿越 + def update_position(self, position): + position[0] %= self.board_height + position[1] %= self.board_width + return position + + def move_and_add(self, snakes_position): + cur_head = list(self.headPos) + # 根据方向移动蛇头的坐标 + # 右 + if self.direction == 1: + cur_head[1] += 1 + # 左 + if self.direction == -1: + cur_head[1] -= 1 + # 上 + if self.direction == -2: + cur_head[0] -= 1 + # 下 + if self.direction == 2: + cur_head[0] += 1 + + cur_head = self.update_position(cur_head) + # print("cur head", cur_head) + # print("cur snakes positions", snakes_position) + + self.segments.insert(0, cur_head) + self.headPos = self.segments[0] + return cur_head + + def pop(self): + self.segments.pop() # 在蛇尾减去一格 \ No newline at end of file diff --git a/openrl/envs/snake/space.py b/openrl/envs/snake/space.py new file mode 100644 index 00000000..918dcf2e --- /dev/null +++ b/openrl/envs/snake/space.py @@ -0,0 +1,61 @@ +from gym.utils import seeding + + +class Space(object): + """Defines the observation and action spaces, so you can write generic + code that applies to any Env. For example, you can choose a random + action. + WARNING - Custom observation & action spaces can inherit from the `Space` + class. However, most use-cases should be covered by the existing space + classes (e.g. `Box`, `Discrete`, etc...), and container classes (`Tuple` & + `Dict`). Note that parametrized probability distributions (through the + `sample()` method), and batching functions (in `gym.vector.VectorEnv`), are + only well-defined for instances of spaces provided in gym by default. + Moreover, some implementations of Reinforcement Learning algorithms might + not handle custom spaces properly. Use custom spaces with care. + """ + def __init__(self, shape=None, dtype=None): + import numpy as np # takes about 300-400ms to import, so we load lazily + self.shape = None if shape is None else tuple(shape) + self.dtype = None if dtype is None else np.dtype(dtype) + self._np_random = None + + @property + def np_random(self): + """Lazily seed the rng since this is expensive and only needed if + sampling from this space. + """ + if self._np_random is None: + self.seed() + + return self._np_random + + def sample(self): + """Randomly sample an element of this space. Can be + uniform or non-uniform sampling based on boundedness of space.""" + raise NotImplementedError + + def seed(self, seed=None): + """Seed the PRNG of this space. """ + self._np_random, seed = seeding.np_random(seed) + return [seed] + + def contains(self, x): + """ + Return boolean specifying if x is a valid + member of this space + """ + raise NotImplementedError + + def __contains__(self, x): + return self.contains(x) + + def to_jsonable(self, sample_n): + """Convert a batch of samples from this space to a JSONable data type.""" + # By default, assume identity is JSONable + return sample_n + + def from_jsonable(self, sample_n): + """Convert a JSONable data type to a batch of samples from this space.""" + # By default, assume identity is JSONable + return \ No newline at end of file From 401569b7d4491ee4922d314e9cb23afc34df5476 Mon Sep 17 00:00:00 2001 From: huangshiyu Date: Fri, 18 Aug 2023 20:22:46 +0800 Subject: [PATCH 2/2] add snake environment --- Gallery.md | 29 +- README.md | 3 +- README_zh.md | 3 +- docs/images/snakes_1v1.gif | Bin 0 -> 110831 bytes examples/dm_control/train_ppo.py | 3 +- examples/snake/README.md | 7 + examples/snake/selfplay.yaml | 3 + .../submissions/random_agent/submission.py | 1 - examples/snake/test_env.py | 95 +++++- examples/snake/train_selfplay.py | 87 ++++++ examples/snake/wrappers.py | 90 ++++++ openrl/algorithms/dqn.py | 4 +- openrl/algorithms/vdn.py | 4 +- openrl/envs/PettingZoo/__init__.py | 3 +- openrl/envs/common/registration.py | 9 +- openrl/envs/dmc/__init__.py | 5 +- openrl/envs/mpe/rendering.py | 10 +- openrl/envs/snake/__init__.py | 44 +++ openrl/envs/snake/common.py | 125 +++++--- openrl/envs/snake/discrete.py | 10 +- openrl/envs/snake/game.py | 16 +- openrl/envs/snake/gridgame.py | 146 +++++++--- openrl/envs/snake/observation.py | 14 +- openrl/envs/snake/snake.py | 109 ++++--- openrl/envs/snake/snake_3v3.py | 274 +++++++++++------- openrl/envs/snake/snake_pettingzoo.py | 129 +++++++++ openrl/envs/snake/space.py | 8 +- openrl/envs/vec_env/async_venv.py | 30 +- openrl/envs/wrappers/pettingzoo_wrappers.py | 11 +- openrl/envs/wrappers/util.py | 2 + .../networks/utils/nlp/hf_generation_utils.py | 68 +++-- .../wrappers/base_multiplayer_wrapper.py | 4 +- .../wrappers/random_opponent_wrapper.py | 4 +- openrl/utils/callbacks/checkpoint_callback.py | 4 +- openrl/utils/evaluation.py | 10 +- 35 files changed, 1040 insertions(+), 324 deletions(-) create mode 100644 docs/images/snakes_1v1.gif create mode 100644 examples/snake/selfplay.yaml create mode 100644 examples/snake/train_selfplay.py create mode 100644 examples/snake/wrappers.py create mode 100644 openrl/envs/snake/snake_pettingzoo.py diff --git a/Gallery.md b/Gallery.md index 0dd06f23..a29b2ad5 100644 --- a/Gallery.md +++ b/Gallery.md @@ -54,18 +54,19 @@ Users are also welcome to contribute their own training examples and demos to th
-| Environment/Demo | Tags | Refs | -|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------:| -| [MuJoCo](https://github.com/deepmind/mujoco)
| ![continuous](https://img.shields.io/badge/-continous-green) | [code](./examples/mujoco/) | -| [CartPole](https://gymnasium.farama.org/environments/classic_control/cart_pole/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/cartpole/) | -| [MPE: Simple Spread](https://pettingzoo.farama.org/environments/mpe/simple_spread/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![MARL](https://img.shields.io/badge/-MARL-yellow) | [code](./examples/mpe/) | -| [StarCraft II](https://github.com/oxwhirl/smac)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![MARL](https://img.shields.io/badge/-MARL-yellow) | [code](./examples/smac/) | -| [Chat Bot](https://openrl-docs.readthedocs.io/en/latest/quick_start/train_nlp.html)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![NLP](https://img.shields.io/badge/-NLP-green) ![Transformer](https://img.shields.io/badge/-Transformer-blue) | [code](./examples/nlp/) | -| [Atari Pong](https://gymnasium.farama.org/environments/atari/pong/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/atari/) | -| [PettingZoo: Tic-Tac-Toe](https://pettingzoo.farama.org/environments/classic/tictactoe/)
| ![selfplay](https://img.shields.io/badge/-selfplay-blue) ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/selfplay/) | -| [DeepMind Control](https://shimmy.farama.org/environments/dm_control/)
| ![continuous](https://img.shields.io/badge/-continous-green) | [code](./examples/dm_control/) | -| [Omniverse Isaac Gym](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/isaac/) | -| [GridWorld](./examples/gridworld/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/gridworld/) | -| [Super Mario Bros](https://github.com/Kautenja/gym-super-mario-bros)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/super_mario/) | -| [Gym Retro](https://github.com/openai/retro)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/retro/) | +| Environment/Demo | Tags | Refs | +|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-------------------------------:| +| [MuJoCo](https://github.com/deepmind/mujoco)
| ![continuous](https://img.shields.io/badge/-continous-green) | [code](./examples/mujoco/) | +| [CartPole](https://gymnasium.farama.org/environments/classic_control/cart_pole/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/cartpole/) | +| [MPE: Simple Spread](https://pettingzoo.farama.org/environments/mpe/simple_spread/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![MARL](https://img.shields.io/badge/-MARL-yellow) | [code](./examples/mpe/) | +| [StarCraft II](https://github.com/oxwhirl/smac)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![MARL](https://img.shields.io/badge/-MARL-yellow) | [code](./examples/smac/) | +| [Chat Bot](https://openrl-docs.readthedocs.io/en/latest/quick_start/train_nlp.html)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![NLP](https://img.shields.io/badge/-NLP-green) ![Transformer](https://img.shields.io/badge/-Transformer-blue) | [code](./examples/nlp/) | +| [Atari Pong](https://gymnasium.farama.org/environments/atari/pong/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/atari/) | +| [PettingZoo: Tic-Tac-Toe](https://pettingzoo.farama.org/environments/classic/tictactoe/)
| ![selfplay](https://img.shields.io/badge/-selfplay-blue) ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/selfplay/) | +| [DeepMind Control](https://shimmy.farama.org/environments/dm_control/)
| ![continuous](https://img.shields.io/badge/-continous-green) | [code](./examples/dm_control/) | +| [Omniverse Isaac Gym](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/isaac/) | +| [Snake](http://www.jidiai.cn/env_detail?envid=1)
| ![selfplay](https://img.shields.io/badge/-selfplay-blue) ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/snake/) | +| [GridWorld](./examples/gridworld/)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) | [code](./examples/gridworld/) | +| [Super Mario Bros](https://github.com/Kautenja/gym-super-mario-bros)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/super_mario/) | +| [Gym Retro](https://github.com/openai/retro)
| ![discrete](https://img.shields.io/badge/-discrete-brightgreen) ![image](https://img.shields.io/badge/-image-red) | [code](./examples/retro/) |
\ No newline at end of file diff --git a/README.md b/README.md index 2e4c4aaa..af7befcc 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,8 @@ Environments currently supported by OpenRL (for more details, please refer to [G - [Atari](https://gymnasium.farama.org/environments/atari/) - [StarCraft II](https://github.com/oxwhirl/smac) - [Omniverse Isaac Gym](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) -- [DeepMind Control](https://shimmy.farama.org/environments/dm_control/) +- [DeepMind Control](https://shimmy.farama.org/environments/dm_control/) +- [Snake](http://www.jidiai.cn/env_detail?envid=1) - [GridWorld](./examples/gridworld/) - [Super Mario Bros](https://github.com/Kautenja/gym-super-mario-bros) - [Gym Retro](https://github.com/openai/retro) diff --git a/README_zh.md b/README_zh.md index cae5cea6..41822950 100644 --- a/README_zh.md +++ b/README_zh.md @@ -86,7 +86,8 @@ OpenRL目前支持的环境(更多详情请参考 [Gallery](Gallery.md)): - [Atari](https://gymnasium.farama.org/environments/atari/) - [StarCraft II](https://github.com/oxwhirl/smac) - [Omniverse Isaac Gym](https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs) -- [DeepMind Control](https://shimmy.farama.org/environments/dm_control/) +- [DeepMind Control](https://shimmy.farama.org/environments/dm_control/) +- [Snake](http://www.jidiai.cn/env_detail?envid=1) - [GridWorld](./examples/gridworld/) - [Super Mario Bros](https://github.com/Kautenja/gym-super-mario-bros) - [Gym Retro](https://github.com/openai/retro) diff --git a/docs/images/snakes_1v1.gif b/docs/images/snakes_1v1.gif new file mode 100644 index 0000000000000000000000000000000000000000..03b40ab27e3cb460fde1603a2cda453ea5bc666f GIT binary patch literal 110831 zcmeF1cQD*v`0qcvmR)`I&gxy3AbJ){bW3z1dWaIeN7m|n^%_A$NhDb%(S_Bc1Q8)Y zR&S9YBtme%_xJyux&Pjod*=PmIWuSG%rnnx&hviexuvhGr0nboq67V*l?Bq_KsqCk z!3<=u0U7K-1{aXQ3uMv*nT$Xt3y{SK-+l$NA+&ir+ZLMn+t^fYD;hoxYUD|TJ+Vb4nRx{dGd)p3I z+p}ESvz^*=eA{!~+H>!<=iP0u2x=!)b>z5oP5&fZ1_d}uaRH^Dr zq4sQ7=Ir0!p9ZG3R4*y;K zzNjI;sG+^617FmUUDN~rBl3&-+ZWGiE*hOL78EXi9$oyNy7+f~aWQ#ud3jk2Tt5FF zy>PqSZ@xUay!`#|^550v#nt8I#pTt-)l3}!6}5?u#cgd(jEFXj~ANYm32L`(b0#g57 z2L3l!N&la!|4-HbvsC?GAoYLd>FR$L>4@tlMymlPPw-&+dNp*dFpq9Z7x${0)RKI#Qqb;VieI^`QP?sW9UIpkh; z$p-zdEWZ9oWW!EZn)6=W@yz4v_k7Gb)N&YhbcO0#E7B9muA6nSuYXm}AnR1;Z8fUv z5Nl=EA6YlqS>^ai)#~?FH8d_!U*yX6h{wI~F!|gdR3G|5)N_5RxIc7X!8G#NU*;pa zmO)q3(X(mX_;jH)SNXUsmri-gvnq)z6fIWC?j+JrfONFXxkcB~hj()NJF!=6SYUQ$>Wqt2)tm zy(w-V2T+1en2^?#uA5IIImeBh0P-qMx%DlU80FwWRv%RSB8v}l;7*u3!@6h%t$0VB zHVwbE&NEBg)|L%zV9`qs19UqwvDGKum;nBX&E#$-hDdXsx$ zr>=tO>&>a7B%Y+|;+on=8bZ4b9xOL2^4!t}Rkeay=egzb^{jE~2p+!OAq z;f3VjCL@(&r(C*R@3pJoqvnsE0?A_z4DyOEj)%@5Oit&*{%F_Iug!L8!!}bL`*a;A zg9VdY%-jOp~oDnyYD%`{$6COVv`cxO-{AAx;@WgB3hQ*9$v5F)DZD`&&dVMF(SSHCo`W7M-NufMh$>hV1f55JaP>c+tb%lWG91qCi1(>r> zl=ro*&*?UmYFVY_CvX%CxKtE-wnj2Oo@EitGRG)GoLZrc-j2UGxc8(>nvBU2p3J7= ztafJw9sBS$34FyipMok|p=FXtro5>~Y_6LMQTswVU0$u7tn`nz!{QEJDZy_Bm_4+2 zDms^A&$$7^F}yqAr+rEk_F*Su0Bb@#)^GQcQJBTfU=x*N_$hS}W?B@7t87~#Nk~Sy z)YX23!`surCS+6gw*5D|no|wp2%0I<{^`T=giI4yHIu8#@VA2C-ggRyv71G@^3@Eq z#^3v>^(|b4B7#{ZOH5M-8{GvL2DD@n{R%d}!t~A#DD2d9a*eAJ{5)gFMJD1{_p{UA zaYK#la%7{6B{OX)IK+R@90_Sku-tet3R4o(4x5P1kQ1m*5B>t>8V-+hYnfs5r!pw} zhNhIcu)G?ToNJ1{40gByjWEU`T^J*Rw!F7BDdQztuG2i;l3y{${IU#2oC}R}zwL#L zl3P&o{+(j6Zk-lJJule+?Mx$cVCHJa3iAkZ@!8Xv$M{t%$?3YsN@omtF}I#IkBj*? zBy&sP`)b?2$5q)S6D7FwOR28k*PUvsF1eh_96BWjy?mbQIUdIb3}%`oDC^0wzp2u4 zzx!R_jg_BVemp5|m!F>qb#+U5fs#*@ZR^NA`?p$!sDEs~W$+nY;#nH+v&Z(0%9>b3 zkYahPEk|hlR05Xe7$vwzNg}5RjF+l(8VxL$lV=p9; z3gd7s>SC;Ix7ezlXuA@&4A2T@u<=Z_&4-_aX8yXrEkkKWNO>#D2)Vk^bJR`t@P-z7 zmyieMM+b}xuCv5wINn&GMQ6vTN#3_;RH#lRiH2Av^a_EfRQ%?f) zH|t7IiK2cfkbZhHSq`Hnm0F?V7ejg!Dt}cnyuK5~_-BX-Qr7%(^U5;e9`bkLgDT^( z*Y8>5aTZTXTl7ojLe3}TnKZ+5B+(!rrMVIpsJZybk#d^uL!rCJPU#WYKhKU&2W2G2 zJ_k*?vK`Ns)PLBuKdAVt%@wP_;Woi0e&ppeywReMjmU}OwSQ=AakqO4EPAlWKZ00J zmDE#Qwy0k18x~C9x2tWRBL6kUf_6(e1ISmaWt>MGL&$et7S6Gr^JC_Z#*J<*`O&{z z&PqIVIY3f&sfRP>Iu&?qfoKgJL`lG>a+LSL;cGOAit_4)B6%P=ZIu1D{i`1bJO-I! z*-2<>5$i93&mTJFeTdySoP=Aw57>;PGPO^DDHOqxP3%hk@3Ey7*C zBDUiX(x%`xS7ChU`^e||bvuYSe>yWfZiO-@+2okJcq^GvUA`~BWg_wOwS5R5>jySt z$wgeUb+qnr=1=|m?{}w+zmqhtBB;xSb8KS!W!k<>sCJq+G6F-6$RNSrd%P z>9M5V%CeuB<_|T#QXD;dCw;OtQ``?(Ay=NQ!9?Qw*@=Q|Ll`>0UsB|!%w;W-D6}n| z8!Q>2{{0URjak?^#r>hiJMsBexSW_1i|ECt3Opi#qyY`cJ*UYqXf8lIS)talL6eDx z1|#AA7&W^>kKID%18| zXSBW%h+`pA=oN{}y^Rzq(kvly((BNck)SDQ=kd<+7xw1ELB&(c``i`8bX825!Z4& z#qSW{bo`vwN0=uS(0g#7FvN|>14?C2lzKMZgE}lUnC_WBMIV>W{lMoZV7eVtL(0K9RjHk--}7!C4siaSlFV zqZAT@GFK~p?d#lxo{)6`+sSkf}u!) z%el_n_{A@*4x+pn3OjAfYK216Nt8WDnR?(Xvqp}O0HrR9+qx1@%aC)uI4AYkC(P;k z4UZgsB#$W}JV5yR&4}xpwenx4&7{o(?FGpdwaLe8l>83^Bw+YZx7N+O=Qv|RH zRg!Zx>Ks9X;L2hm&PCD14BYu}e4v7G7%uOxh)n>x1Vwk)s3<5XG6hgZ?c|x(Gcx0# z{utKKAX)2n#j2E&bXQpTL4K$h{Ekzu{ZvuZkxaP0`M2u~v&oK%J7(mK#UncK$2-~2 zELq(!RzLH69Bt|fl4%%G(<$TnNdXebDZDv_U@-fd5T3qO0jqPtp2eo!wlJL?J@D@q zPOzv0Xe1)_^J-OmC1qzt)M+f=XMZ_z9PQg0#hVPU3wCt}P3R+k7!Dri=1Eb6*ZpA5 zw~65k2p1w3qAtU5Mq_HCD@su%8f-d|UMfZ#{2W0jXb=G!>Lg{N2u&i9-^0{+h{11u zsS(kFKCYl<(9QZFKpP$>BpO81afPMKI6%$J!p&2mmJ%+koUj}eWeeWcrwF@Kk-Cj2 zX`iAD#%Op%*0534Nj23xd-mM=4^K`bLvTf{`xNw$4C;h{eVTa+Ijnt^$`INMjm1M9 zQIxNGsf$AFCo5*4F_IbAm;q__OcBgONAByob7zpdKWOh$-mMd+NvpKBF4% zo#)WS66j3O^TQ58!jE{ZqdiQN$98>qiuQfu0v01@WWPETCJr*$NY%zENn7ja( z;b289)UlUBYoYeqZu7%R0)N$;>=s^`KA3+zG`5!_*$MAXqIis_{(#_nx_*NUBGNHM z`3MV*Mz}l#nvwR+{K@fqPv6WZGWdLvHb*hOj|dMVxilQvX-6@}5TFW9ASx^X#{#BN z?YW&TU<5&7>Sva9JMJLO6sWgNHS@BeINmuDk6PZOk1l+hl`J9C~S7TcHvhcHZaLlHg2 zV=Y4I00<9IApo)ifEhz(^TKXVnO9Mp{n?&8dm?|ms*p?{6o=#bQAu-$Xz&Zerz7F0 zl-)V&ggVuvD?`G_%bwfRAg5K$x%&98|LaXSNfTseplkNHuVN?Kcnw)~_v3GkxC)ks zB#J~l7=!?p(uly+kWncBnd$~(0Wbi{Hw35vASN0R2cY3ds2^tJni~9Ugu>*?w&HmN z3R*$7e?K7{&tNJBKoTtdCP8^?frvyYVIb)9#ra11^e2SlU9&yV5>=2ai9 z+^Ny$M)t#C1fZ|l4o3h0YSK^@;Pr>OdyeEBiB`=U zSILEQO^Zr@Mkd`Bjt?Y%8dWv^z4lH3Oi~cmjxhLzd{6g<$v>QK5=9*Xhv^4EAEA<_ zQay$4jeiLwx5JKo)1|PRY9@QiYIy*$>m73-n~|q9iEa`L#}cHjRK(7SkdC%HcdepN z!73KlwfdBC9Pv&vUXQ?sjFAK|3{DVP?1rEwLA@~#ry!o-vAo!#5MWxTG@yg6c%Do;e5?0kK9I-a_Y*FeL3ia2FDwISK?$6a` z7Hl(iO}5~A@#Ohryk+qmdg_2{iC*3oYA>?LDp&=;kn`4k{FGHDJA)#}@YumHr{`@r1_ z`XdDWp+A{WGyv`eiYw^Pdg;Gn!2psdq(*Log_VDTpVcw0F~Me4Zb%+d$jF4v4G4NQ zLGuo1>^Bs*dky}|zW?1f^|?awqd%>a9Ay|b>z+6Mk6QOmt^~tsNsLoX!gpwl{}-J7 z>l6hOE>BIVry$B+5|~8dl38Jv`NUSf!u5)l*O6!-3Gnp7gqPue>#|O&M&f=!L+oD! z85nXpaC-&9nqR0Yy;@Vgyr@( zEr5bS3u-e4cO|oYh|mKwz+hL8s2VW88UX5AfGQuq^z`0_qCK|{;c2838I?bAtrJkf zQ^dn50&yiGV zhz$jrYIiIwxPsEJh>44S|FtvSD2eV*Fa5s?`fu1c@)=6&SeR{}jsk8%vT@q=HPvf? z+Uv>h1~~O|!tbnJDB~48?)(>*+Oxz15aLzKUNk@t2goqPh18Q&0BAK00Fc1dB|t?2 z6`M-RI78EszZ(H+1wQIlICU={b?#m2S^{i$zvt*Z)B{8D3iIbC`W9|-?KS`YDV|>7 zidL|>dLBj1d)3?+b+DOqPmqkC~V& zNdi;{vrVQwKqUza2Eid>VgP?1D3O*!1ch>*;z?x@pXt-vnyNB;ff?NWWXLY-Le!mf zNuZjm(}_IjYGJ>ht+rPCihW0wY=YOzD$B+EW|3uhLGEc>2_HVmEvPV4qTFR+xQ!+ zCmt)AVNn2hD4l~&2?_LJoYrKR94b>Vl;Uik57@l zOWl>#C-BLZaCu)hs5!!Q$L_;3trSk3=XpCb5$kOsag)+Z)7A6a&du@ecrKND(NINi z)|X$<&+j%$?06S_KOtgI3{t*+Snl~IIk`YYr(N^~oq0M}rD<%|rO^J+J(U8cSltMk z5biUdMu;xSxJDDXyT)KSNln4-w0J(HO`ovyjK#{LKAv7}bM2bczhmmXbPWgPdv&Wm zQmHB%1>UJ;5i#;fypb{gWJ@9(`UIslf1mpG$`rJ@puSN=^wZvySNkB8o9Ja~$hfbS zO|N28mU8W#olqQ=oE^86vE1?VQEH9JDTM40b1>4Bg8$#R9Q{1^)YMT=IX4v4Wp8SxfUJ=$NZWNH_PeuC&M#G^$&?vNX0^m_TYL)pL!BXDu{jCGX)D z*|!n7oWhsP4!I!lcf6_Fi&<=(iGr1HCtC1f?r zuxd}+bH9_{4%yKs`9(pdAh$au<37{xRN)?fmQoiVkHT-h??v?8H{HB0KPCnI6-ei` zO~|*8o;F`xKtnv(o$Ml>72N7_YH}@>-)s}P*>##w5*gQFee0qA;81PFwu0yt$F<7} zGVgD8GAc-irSO3%D+Ap8m~-;V!o9RK^&pY%$&PyWv;s?ZWQRDe^M*PcL`flU}#s{c%*S=*|!yj2Yo6?+~Q|%_=l?}upy-3}M zXJWz8=nIKTLS+rD#&^B=`Jxfj{LK$*m)!!2aQ*5W8=Hu>?^s`>E-kkNJ4^b?Qu)4pnp0$6Y zmG^gxHPfMG{EbOG#SX#c+H8Gh$eDmSWR$=2iBvy*>)2ys2zw9sOyo-c0do0OUg0}i z_WMj6T}6#SV|)sFiVGsdPy=ADHF}lqQcJ9+`9tzKj3;hC)zf_EEMyQ9W6W= zL_MrY_sH%HZs5{LiFW%z=W}}3#wNL+eG=*XQ-YfN2ao|N$c)ttaO%+$77B85*JDN} z$-!eam-c@o7~xko6$=K7|E<6WRoPzap1x=;Lq=MU3!He!K@~C1l-J_HpuVCTjY@%( zhFGxhusN&dlruf+Dc4zIooJj|FZdSZ>m57icOwzvsrgY5fwGW6h1lBcEH^D7FUBt3 z<(2;mwu+sp>0Ycl!D7+|dLwU?mcQ?iBfP@ZxDFpL{9pgf=jxzJXaQqb`j4OCK{!ML ztu%tpKVZokdF=R>gDG9s+s|iPi?Ij~5!Y)HakIJonb|`<43u1Q((sk`b4${VgGv)E z?u}8Z98lUge`Ju-zzcX|s%5W>nQ3sDK8|13rcR6D&H_6}2dbCjH>fnr__aURZ zlD3PG%2BL4{;57d*|a@L7HS! z0yfhPIZn*mC2CXAq*!}t?w#XwmZ@u#n$%{PTm!b9&h{mKhH>x$OEn_lfQQDp^($~T zTZmC-s0O(F3yA^}Fmy14wYh>?_aXU{{8LT({QJ9Ci>Tjko9X-u75i43hFa1n-ya8i zO4S$_=cY%k2iL0Sb1GIhrfJYvdbF`GKY0UU%Gnv@%RUJSyh{n#U zUG%YUiBNw3&z+G5D9w0OjhF*d72|%t*y7a;)x1NCH9m4s9NY)JhFtYsPzy+vY+7}f zWqhaB@`ImZIwctLE4`k2|Kp@{`V1%gnhtwm3efz z^xyz$#ka~(iyV~nn@(f!N?|{cofYSGPO3`Xn6^l6u^}BmHN*sYo?KijA#1o6N#Nl7 zM@rK8B5PDXA;=LCPBup<*fo44)ugzhIJ|wZYuS}Bm&2uh&Bct`ayybXjL^@2-^Jw; zPfm66oqqY1YLY0DrL}a33?`IYbk&_2sR=~^u=UTF)P|Bz2b;;2%W~AIABa=x{f6Aj z$RtPa#^s>4Kc{ECP{-3h<6b^IimxJ=W40*GNROQe#jWa7{B%$2 z1|=iy1GM=fh0{n>T^o;yZ$r-0`1xqcoT8cHm(ITi4rzNoB;5e2Uw2Z_(}Hm%(%bjN zpB^QL3Gm9M(hd@(6LC7XEM-%#BZ+j}AAV7_hC=JULQ29a(DGHX1QtA2N}5Z1Rks)r{~yZws}ohOBMbCP-|?BtDaTxcspF>~38YA)`B=7%oX| z49LJ}v0wI_?!f^h0(K31)&AyQm^KH{n6xVXSB>%YpqK;*`uAkvi2haB#Kj~`ZJVEi z(nxwG-`B{T$DVWIUrKFyU-He7;!PiQo5$p9s+swkn_&mq!YYB$DUw$-Df9ges}IlK zl6OWGfZOC?;9ha1xUV#jaO`wAYYBnurhl)hURo0>!?-Tqn1IshO&-Rzs`?{q(>mUf z$YASnCW!xfH>qyg?X|Y4PchOBFm?kmvdWy++vhhwy@1_76;RdpQY}Jg24Ko-C>4LQ zQjE~m3(ZSFiDD1HovZvb2Y_%aunnL%h{c}0&~#M&QQUP5GA!n!FCoC=ERoZ#vz>?4v!Cq7#`+VO&9mK2=qd z8Y&^NYot^9&gwp39zaN;-xjAxR;ZEq5xHM*GQY~!cgfmwaNpWX@5qhBQ)V^UfWTPn zl~;fkISc^wuq-gm=eaM$+Tzs3y0>x|l^Uf-1>>aKUK8MY(UK!L2mSbUh17W^T@vKk z-|Udt!h&-KQ+5c;LPn*JSmJTnhRdtGhSGeW(GsLVR%PWWal^B)sZCdl{3tHv1^ zI`L78Y0as)Ap$I12mUv1nikF&@rAzizF7D6^tGnz2$mVbsri*4%g6HydAcP`jU0kzoTPW2&-98AHxG$utfwk1Yq-jr)4^ijr&g=VN z6Q|q9gDTjn@d^4$WYRWd@yZz=i@09U+&)+OM0fNNe!|{okoG_=-s8>0B~!^PSIIRW z#_c`sn@$iuF^+>AR=tc_y*tT?Fs$An!0*|%E}`GZZkZzWiPSKhD8V_LZfX12(g>O8 zolN0Wg0N&z>*K1yDpN(yCzu{csFTR7`Sr6@jXS|KSH7Qac-`p;Pm@yJFM8e3w;uU| zM+>6j6nFCgL|*|MP|1N{SVR;S>a+l@0N^mN5(=V-V82TKOep#cy!;$WIja}M_Auj< z?M7U7Kg3RtBCPB~rKd<_anhZQ1d}>O?sbJX{;VC-cB<50*gtBIt%(MX&ugC=WF~Z{ zM`+xzoK(NCW#iDPB$Dy7gXnt!Y7Ff543<_5B#O^g_P;qk%Key@8Wo#c9%AhKX1-G2 zUOmV%b^f#M#AE$wvYQnUYf{2pr9}7MM0Y(3R}#5(5kwsxe}lbI4oQZpt4gk#1|QC! z7}^ub?fo)tXPgK$E10nUDlT#vBe>+cJqK_>%kX@2p7KMc&`L6G7?_5TJ{JBY=pkQ{=K{pjS`is2E&A4@8{5s1nCzLMTAf^)cUGB$hf6=^%gB^|&_hRov zz2p?b?!qdjc>83~iYTou7mj0lUaQvWN>f0ohy_R79|?;n{kA}+?Kx>$P>^|ylS^Lu z)jgEexXG2V89yNFVd9bMzsbg^@0CeuC!(|0c-?!*Bk8+yiT^!bfjDmuFkikD1#epE z)K=`Jz_IjKXKuVC7KijLzPd;Igw=aZl2j8~94CRrN&2f7S9t0R#7RuCYlj%N<)M4^ z0s1=CLrNntcJco1D#>0?%3t@=Wh#;~x5EH(bFkg?%%W>3r{K#L@j51J zea!n2fA-O5hW+LSPcAQKGTn)oikjMC1HyYDn$+ppF+zaH6G(3#dXZ9YqN z50TJTeg7;eFNH-fdGNIHS7UBw+0gx-&B&ac+8RpsFB5Iz-Zd(FyGPt_-om0n>)yWV zdb_%GhgkXMZHjj$u1fQ!Z^&Bv+xw1}d(^yHjgM>3nZ0?dz+$k~gJ&vYHWHuKwlCV& z+#Y%qjek&w;bOgYz0B}ed?RE7qib3E`-r*ewjQ4Z-n13J^<}3xQN8*`vE5nf^6;Tn zuz|1V3GJ@m$51mr@ZawQEO=eR7xLE2C7tU*nQwO#0mNaT?nEY9fgSYJ>H#YNbpvYH zx-lYGkNl5JT04JACO=lSPf@3ayrWdrk-0q3xld;?xlmMj3|6XR z`n&9#VS%T^#_esGbR$Oh4_$FBFSfk>bA4WY9aK9yc}EEOy2W|mR*XrV2S4)8QA+G_ z7r*@tj`_)JfM)L{BOQ38Ye4`248t8cZ^_J$sTcWlF7PLe255==G>&}5JJI~Dn8heG zoBO8~ot%FwL(moU&lm4!l+pyB(1^Rz$(VCgQRR5t7c#8iH{T%updBG2gv&O+pUhDU zSaSarHStn9Iq0O|pwo6l@wH|WG3)9(?^okrKc5NJWNOIJg2CZhY9E3QI6FzRPUV+v zHLC$vB4gUs-rCFh8Ze>p+l6^E#;TyR z@5r@YdC!H-pf)i0gB9!D>bi9W{$B6Fk&rvLfH*Ng6!zn(%T$m^2&DWZTJlqh#c2Y4 z7VD#tkh?dpMteSUvg!JqWKs2$S-W-fS5cGd9z3bj{52ggf(wud>TAbOwTHk{B% zc*djkLbfk|b5t^~1yu&*2m`ZmZ0QAs5!m#eYQ-?!CVAttaPDy|Y$5DU(kyrzoA3~BPOZ${kHl{`ua16gBg4*RN2IG_g;hei?rlQ=`u<-nA z-QDzQEyL03uI_PGP8r!MOZmTFjz-!VGk!! zha)WOtXp5Mna2DOjlLX-DSLJH;2U^flx&Ib@?%zts(`250KZzQySiGuRqVyc&x3PI z;0rWaHE9GXuD3x)jHhCg^V#-y))ayAnKzu`EBcg!7=JfzAFk%$%oSs#{^E`GQbfd$ z0zhldw~;?{xm!inU> zRfpnFm`OZgW2`Ie%%@n~{qS+?JwvNY6}eZpdXm!bD^=vxf2@CQt<7?P!cwTywT3Pp zDWov>37_Q1QkSggzE8gPGqg0wo$Za0uEYR!bivtw!kL zKD09?ynm3z>HE7KEF9)v8q)n;IgbBirl#S~YdWV-aV|8z`6Ie^W%up`Og%)GM_B09 z@&@mi%`QjWF&diIj^r0w(YMxHskr02fF|)!m@^sCXA^81Ei zHNpv;kJNSDKamOc20JI)D&Na?yk(+SddI-Ln%^VL*uyPxO1b{Atu-BoaJHS*Kz{0# zly2W#{F?6ozkjagAtcp>{Ml^>p_KqAua+8~GfQJP1qA?VhyhQ^mmGQNiC&PcNZyQ5 z_sU9N`Rx!&hE1*#Y8?hvk5tQ!sS_gFVl`dw6O(VOP>G;X*hyj6|Svy}QULR8I zFn=<)7L=EBB)t~|1%T_1I2)84r>#3Le!Dml)&C@_e|!o5#G?K%OyBmw(#dBZh-~P> zy{FxFAkDVyWS*S;i5u(Rbu^UoaQsa*Mx1AV9wlY7*M5hM|ew%BI{;9Q64(9u5`?cx1{N$dowx)%^GWC#l%4CDi_I zh%ROiY|ji&R^~dg@n}J*rxNq(=-Ca%U11SwRSJudoPcasO@^zWxYhyR}V#s^ozf_?@Vrxms8{J^qlSHka44 zGd!4=1CoY_Pxw0aL1wc60lOJ}RvpIPg?}4pbt#zNax0<}(#Uu9ZlgU+32$dYua zs@_B-=*_KTC7p2$p2SMLf*hqG;$!j;te)ZpC@iJUiSIi$`HH1lR$LfKWCF`3-MBxR zLS8?*ygolF>UAV(l#HXvrPNN24=ie)))#z-d!L~^Ae$U4qQhBV04+C^zG-GEUyNY_ z6wyW09Egxt_vU^)aq)zu4Q3|QmYKESUtuTxFrvLc%V@7U4y0 zkD1PYe^MZc&M#tFz$I{=e9+FT6uZ5R0_aFh@p4yrmFAja&@wnodbX0T@x5o_2!=e% zM9j->hsgR5;Ft5abcOn`=rrt(Cc-n2*6dZ*Or=bLIk8j*Moeub$Is!uAT?q4amQOb zpA&6P^qS9UJr9QA11e*W0y}3M?e@wVmn&%;EBd+U7$CF=RHg5o?c0;OSQ-d_MVQmT z){p0AZ%;lLUdvm_tEo2P+(#Rpl#)?hB|ra6M9Zp}@ivph>{FkKcTL#UhF;Zu8IyfM zkG7Y()rD@sKirX8_xUzlMlOOrU}JbF*3bLiKT-Mn3Z2FQiX3+EHBsUjm=~s{yMub7 zF4QVE_HZ-ONz##~+d|To>Ze@nQzQ78Y4O!-;w}w?-T>Y8c+_z|jmzupA|=X(p}G>I z45kyuf8|WMmT|exe-&M0`aBz4h1WjQHA2x!l@OjOEIG43hT5}&OweD8jYJ|>-(G=- zqvFj9sHdoJeh)`EE*T{Lo8EAjiR8F6&l3FQy44WjHyc%!J*=Xu(icmo#U2Z-kaCA% zipd1wVBR55dv$gf6F+$KpSqB5FIHYp637HfZp(YUSP>qKi%!ovp%^f0ZP$~!)GNQx zPx^VX(#s&$OGZb^i#r>3hlK;=yk$w+@{#gcx+6udZeAVxZu1Z9Ni|Cz-#g6|Mz~Ez zehJ0lJ&*E2xUz9NFD-K{-c3_$ja>C=GQp>t^qDKcdKtHQQ)3Y$yY$W!q19~K`#EX+Jyx)6a zqQNA9-d_x@t-(Sb+)Eh@AMhnt<&?V~+AM3FaulA-+ApD&W90AGGUHm1P>a@jpjDLP za^FB?fG$yW?&}AxrUM}%=Mw2hUdvU(EOR;NRgKtF8yC6&?ynvqb9a8(ocs47jC$98 znH!Ag$gtR&0@ME6WV*I8yUOe#luy({2Z4)B`&pCtF;JndT6 z0EI5re`D?M508Ct0$wfW7%3h_}DvmQI~r*9}hO??LH=s$;fz4JO`8F!cFZOF)VGIw-dRDV13(mTJ? zck0gKIzD@g8S%JY59~M=V1qvU#H9@{U=<^2ZRdj>O-?cfJd$ZNr_i4&(hl}%m(-^R zs|$F_G^x}8pk;Zl`-;W? z-$`vg?|(@11)f5XjfkCq+jrB+#S&Hkvp%La(p<(c)A#H;v)DfoE8iMp5j>+gJYY@X z!Qsj!M%@Jampot;3hor!P!_wm6wBv0*X~))Tvy=cAkcR*$ot%xVDyzwq_jCc?O$@#*4qDf6ge7ZQW5R#`0XK_*zJ_c6BYQkFkX zCt<}1%VadfJY(|KEyr8k{@U`=HLEpSpvuBhFjIP@sdzN<-_Y+2&Qo{6&Ag`)uLLK> z;(4|~6!JO(5?8L>YC3|ecN=(6&@PZDBL}ji>e7vKd3thd z#NIR>eQsHG%cuIrq%oUMRk82-VbP$@u4&poK|^*2El&`$n~+Bv^jSAEuMZYnHqnr= ze%rs7Z+qRX4a#@ghnSD$Qv-{YX-RhxZRd#MSS{iCSg!DlKMh}Qzg^jDxpjMPQWv$c zH}Zj1tq;7xW3GX2IiYvtbU*5vr9V#bauBx4p#>ly?(nobZ5tc|Ah9~4?GRCt0G1+X zNfEAa;^{=eF3rw$p~CdFMR}+7`H4LndLyOfS+QrL^&;zSh8Dc~!#jMgF76BAy#?Jh zT>s5uq-#mff+f4Oq=fDT_JHkrh~iRU-T`52D@di%M)L7`BSZBturQZkqffKxx6QLR zj)|Ja+!C|p5W`s>Tj?o!n9~5{|2*=gh+<+}Qjj?0`n{OnM5z|AB;mgkFyDcuu}9{| zqJ`p&t*E}5SXQ?_OQCdZdPu0709=ou9Tuq)i)-BS%X9-E07yyG)?H5kOO!y|yS6Th zY!E}%i6JY%lCxm3(^#H^KF&POqawo*qq=3M{5Ux-<=8C@62$9~sZTqd?R4eon*o5L z%T9wmrFmw7JOH6KDp;l!$QTzxMV7X?g@kEe^Q@6jBXXRAe0lBN@fC0KVN9rkP#Jx= z?c0w%Qzo5YZixa!p{ z=~*o)tX6S({a0p7y*DQ3AN8)Y_Qs=xB1j?5(F(tpL!&)S8#S;st6~pM&y>i%B+^%ev?`Ie3@2@!Xw&j z@A0yvHqrDJDVB?*30W-3M0rU+6oV*Hw)=sDCUG52F9=lqQo)-d<1c<*i1mJ1IMJl@ z?x}gF<*A+g?eF1))r!TULo!gPXV0&5yCLEHmBYAq5G`77&o z>r5-U-WSsTr{Z=NiQ?Inl_P$R@;~W?^^OvUA5+|liXK`1?yFy%?4*{V8y?H^FP6gd zb?fB)+x|dN23#wh_$uc?;qxEJaV@C^qF7ho@9H&yfP^F`-#HV`1a|Y@n7QhWjo5*V zgZuIH8LT;OOEdv#jYU;K+;{XBK9|If@~2;+v_|c2<4s*t_w%UcnnU8bEWiarxB{yF zuYvW*Q{paFT#=s^CVl&uR`;Ku^%Cs0Z{U_+i=dGZj;?TaZQk0a2hAsyS~ECNvOdoa zU5Ge~eY`m8A}4E=>I;T}934{g#`_TIM2{w|)EIGA`nYSS60*@X5)d&hqgt)`M%uN; zyE2mWUcTDt`l~8s#(JQdhK72VoFw+NWB7EPp6@3;1FI8|K)4A+2t}ki(xbzIzo%+B zr5!oeOY~|tRLg(9U&TVvv%@@sf9L>kcyG@Xrmzh3P);;(5{LRjlqP^B zMtrfqi*1t|zr7q#uKguOgopN|s+t~y_9vo9*X#ylRSM2j~4Mf;~)V@ zAK;09*;WhCpx}Bzi32$tYGCLS$^WPc=mAUBfyMMzG`yR>#>=BWOib(1X2n2egidZH zA9DwlbnBYj*j&+%T7Ym#{1L%YFl%)>WpZ_aY^;f5Eq;RiTGHuS!tkfkqVbrp?>t+3 zB^_^^E?+pK6-HitoU?r6xAaXtQwVYjuy0duo`P(?#oEhg2@8d~3~O@G;O2$3DoFVb zWa18M+it_fKPhE5!mk)Z)zOx5n!NsiRi`L)U6U7vt6S*fqQUX{nDPN&A2KaIfLVXU zU4&~VHKRpM99L|6d}!n7^LXqIo=w5)lReqw=LSxa5{SG_7p+s@g~qbvnN7$$-Z}+^ z?r8=W^x5NzrC6q)f70pl{SxVfKo6OOFtlwi)9o>5JFVY_#F0;JJ9gx-}V0)mEKrAZS6Lr02)A|OQ)YUrVgfEti4Vkk=2 z1Sz3NR}fJH7C=z}QL+6wJRjcooUhNCkF)lynOXOmz31B3{kt;NkB!Ri?j903M&j?} z{;k&ydCr*ark@xFJfL!nL)}Dkqi9U^csZuix&Q8UTF-KHqP5=1GC-NyL+Jn?^G6fEW8GRuBNV zr4bG>B(Z7auaoW;c1wH~C`LFadt4+Q{L5}x=ck6(X+zW7jnaXDzW7Jh-lylp%&g@8 zDSqbZ1G^dV2m!gyrHemJxNG8gKjQY6m1Ad4nVJgq^F1}vzUh>AVWIdQ3wo*AQT8{d z5PnTvH^u$^G6Dcxo#zz-iT%yDl!Eca^u;FHH;omSzYI+Ob(nebRe5>FJ=qC%}QUxJENL|^(Z&{#O<=dtVu5EpO%BfzJn^De2jVZrVy!50^vHW3I zKT;;9X-jxVRaVFY3PACW-V7X zDg#;is<*$%uXYRaIi?(S)h2*nNiE2H1^rq;@kQ_sv#ZgL`S8aVBHm+beBulrFpR{z z%@p7R32!rdqMw?PaQQ!&&)xMVR+%CWLPasZ%u9iU$U{|-EAzz}co!@~VBgco<6gki zuKq&m>T%ImWLk7{ZgeXzA-m&QRM|hCQMavMX&{sj`r-}jF`u?hdSuU1QWC3h;Q6MJ z#9sic&LDJ^19kwAAQ>TcSDq_A>YgHt_83Vz>xmBpkO%8$EQB(VQ#pU`<*y+_VLw%0 zo~Yi<+n4epSc{*iC-x%WEI5A(_~R?@_5jZY;-`AGc4lvxP|(m_2Pq&Z3js0Z+rb(^ zF~-~b!tq(?n`Gh+Td%7~cKkH-->RPWe1Uj@$mbYUy^_GQRVO1qEFLjVW{N>ygX@$Q zgXV4}U0|s9l{Pz)@mtF`wDzzPG-Z@@*IU?OA(dNP+uah2BSTT*=DIZt7VL>KS)-ic zt5Nn7-D($KXK%G%y7)K+_y%I#CA-ErTcm6_P_eZ@c5H0v>T72+O*GcxKU0lV&vyoA z=z>hoY$~R8A1hCi9(z^n zpK5t(cfe%)dvMJBaj~q`B945bK~%%H_>a=&()=0u({hUW`{RH1N{a8A3?NcdfslB- zO;%5xB^mBuh$nNT3hh!gXBX+m(4Uv3vt<;&U+z}RQ|z!<;s%RH{)n|}@Y%w=Jr zVWzU>eifez=;5y}y-M|8csNnBl8p)I#?TaMiFf6J+OVL+zn zHf!0JOXoMB2cy}ByJJPxc{DUu*(P*ez4S$o)|moijSL2ZwH5Tb@}M-B1TmORE2IwW z)OIV=W~DC<{zov$RbHLFB46rq*~zZ}{ySdgq48_!(3f=Ys|EQF00#vsT?D@mDyy#j zqvaPHvctQ>{n9>M{ABV{zS*lGd$tr+@~|-ZIwke1!r7KTA<{L=*?A$Nfgg#|{Kxgq z=(vi=W+1S^l*QYErg3^uX?GutVe#`c(_&V6iyhi@?kzoS22DXy@T-m6A0>3pl9 zI605M`BYU+ct8#^IXx16==3N`MDS!bYU*lb(ht$l^no3mm2VoSVjJV3dH(O6-@9@R z7WWEwgOUy+CQeQi78(ABNF}M3h$cJJM7zW~WEpU)Zy<>Ttw$=0Ut)VZJ*-;g&g>Rp z2;-oN;*}-Qd`98bv-7U+r5`XG^e)Tq+_*So*`vJ|Idj?`=blq{f?CWWZ}G$NcyxpqaQ31ZelK?Kb|gI^t_NAOh@ZjWg1y=NVBPMG^ItV zR^K*3{}qyU^&W?)6TlMV=PAkT9$#>~qIZr=ZGt^R18nrQ7-Uc27mm zg7`ePN>6{;LsJ-Ou-`u?O)gtiJSj!=A?V~&wpj*~=|cf_L&Y|B(Q#F!3VfT*(qfm^ zI6fh#Ug-Dn=mTnbXu2aR1YoX5(>mXsrgWvC^|`pqQc;@ktqjb1pBBGAciV7XL3)3~ z29DA;g32|NAHwi}Jj*DYM|N;lU30(D`MkzuzS6VcJ4HpRAikFP5x3A9r?Tg@^||7K0I%-+ZCzA zZ*tcjtpPu2@C}{Bej*Nt%<)J%A{&|nN3eKo-MZjg>c`A)#Y@8c2z`ZrD8e{j^xj$gOxDv#^*VJ)}@=U^ukJbj;H7eMH`G~ZtH-IlLh^fx4 zC~)~<>m`kciuTzNcIKKd$U98T^8lza1(@f9cp}IvV6KsBgq(Zi%NH?gY5DuzoY1k1 zv*bSo2`{u|mUPfZ9fqbHJXE(wKU^OF;y-!|4ymR7jS3zF<6lEsSv)@>9%gftLKh?( zqNUAUxD%qFO#6Ti*f}aX zDgfBT`S`^12ms{BU<43S@K@Km{ZIOl*4Iq`5bp`8<=LVwh%ev8n;98t+?T%cmMt9HHcg*qwMXZV@c@uhCWdh4m!492Huubj>0X@8mtIv|zLd`0{19-<^XywUE;j%N4Itgo zxBj&hIeRX^+C_hzN8$0hz(`wEUE_AH5;#q$`=I6eSNiq~>YoQfofo=9O*C}wte~Q9 zeaq{<`9|mr3BZQsp}K`54`xMPIo3RpcX!M^ZyG=JwS2FP?6ZA7ES5JLyOdk`IbtQu zpUbTY)Rc`RNL?gz!gbmTf2*H|nws9fVs~)yJ(3J$2@rA+)`Azjzm$E8#CgpcPl%{^ zN_SUt%G&q{-M7Pf1^M&dC5()wd2L&JRtOC`n5pUPnyvwsHz#d>`-UrCaRiSXUj5Yk zOTP1RiPTo}hom-lhlf1CTVJ0&I|C1!m0`o1c_Y5s_MCbxY5vov_uCu87k_wKrh`D& z>(*lxO_pAGg%M0whNOO8W?B4~a~1$fse->Amp}eZXPWpWtR^+7d?&qrQ|;@C-sG?9 zANk6phE^~=8fy>24`7BkAcjs#i0~{fbTP7j^r_B{#TI2Q-?FsD--=&$aIgCNIu0bE zK2?>I#U1dD+q)2|F`$BspE)((@YqZ>WS5fa9)RBL9^b!<;cUa4-!=(IH(p%!y;1qT zl763SSWO|X-iMuo>Z9G}VPafAA0*lR>Usx+{f~ofv%}9R&|<3(kAv-m9!90x$&V`T zoBs`w29hjRQ$39sNs{ucAa84m%`L^us zQOrZ79gPEk8gpYo43|?j!*lc z+h+c*hvf0u5PfarCfFErfOFE_ohkKT01jYHs_|zT=4=>h&Xr*0fLwQe<{8VJmbZ8g zz)rG})v5xQYXsljdpq1Vubnrk%#_2;C8KS(jMXfE12|&};}4CM6zk@`2U2q%?O>`& ze3LtT4~D&P3=a2Q0*w$x0kJijNER-83oY79j@JONUn36TNLT?L)5^DG@Kk-ROVgIsmSA1np zjSGhwFM4xu_NnVEuX9v7DCpAw1!-Vlu%2^0m=9=D+az%Ivpf%3>CGqfu%^K+Cy>iL zmd}STM5=WeH4rG(<-4}cQsMrQT1o#Ra2v<@&O+&M4va2xGFVPkdKI{m3UH3K`_<+o zo!RWtQ!I6M-l7)I>4ep(-Q9KgLJ%=W#~EWMaOi``&TW){JM)t_xqg6+5|YAlwra?C zY6?!h$V*)u?Z|3HE3on4cT>tX8&c&mKpg^Ro2}utr*c*G_xz+>88z~BrdBiv!*dU* zOjdIX;cq?1f8iqvu!a&sq6c`UG7)?Zs)gD~N~c_;Zzvt?6B2RJNcK}Duy zvHIH`kk-RMOSy;*EMyHUWW=pg!vpK$naZ)%R2}LaCFgup_?`VrqVSOg`yKK3Bv`puA#hdEQ$Nc6q75)ZvWuO2?SSiJ-@4|WdySed$ zG3#fwzaw&^Qk|6+oE;xn9Db#_UFw>_hYzK*5Pda^b} z=aMvc6V7~9$&qM^i4UyUX*CK|%?3@{bJGC&c+MS0heM28H_?iiEy9)iyMpjEM%uIc zf#>5ZibE$jM$7VlGcI(W@Ooy>-9mDDU6!Cz?j)pcv3Ny(`Pr9)CSRot!f=E@D9DFD zR>n6NeeHN;Wo_I*ghJqtKOv5e+NqZ10M3a;Z&x+|X%{25?wxqfW<_r72uOiucC<7& zT)N{G2MK;JFM5OK7NbLco)&mxD6sqa&pw^7Ka`+9IKYf{P`%>UgmR}=o$wM`NJM9P zY?FM)kD#T+7km!(hqkTh!FJ(wB+DlD(Z>U`oF4HaShk*Jsva%B&%i>Y5Yjl{_N4Ox z=S6TX$QyV0E(1$@=y2be)ptLMFXO{yX`*<9&H0_N;CB(>(nO6y;@Kf$R(-_Tp<9ZD zy$;=xDYgE~{)S=Kb!?Y-;{?su-o{-`=R8#|pR-ip|INRJVfL;;(=2nCx-OuACdRv9t|?o6rb&>-nJ3}g*6K|=zha$gOpb;S## z?&ju$WHjcvHUZp1Kq=kzW9tXShV6st%U9QgO5(u!HjAjgY-BnO!T^Db@2Khk)T;5= zx@`0h8q$0#$20!4pkA5+nUA<8SYRYF%M>nU3gt0*Ri&`X6D3PMxxF!t>bj=_BTEk3 zcfLw=>3_`7Ce~7405BkC#{><2nqc+xWKnq<2#-Vj1)!G!Xt4rZM+N^|mTtYCB4Dcg zxF6<#f%-t=jW-ghV+vOTMFAqBwLtMHBauy@Xn~QSehp>-;5K85d9;igQmQUh2c~pA z00r8pQD#`3>K_=U^9{G^2o}bb`Yms%+}k8J1G$Yu=&+->CTQ`;M%D3%3%{t`FK$X{ znO!?Ro-kDYijDkec6)10WOl8oNWZzLQeqZJ%dg=DtRWX?GEAgv^J^Q^@;JL($k7}@ zOHUdLYj1=~bF*lc73n8gE3N0!D;+CgXZNvf=>Qz<8Jg|2{oBg{>T!X3~8DCT}yc@i`+&ZfTTL$23hj0w_012mEizyN?A07(6QGL$?Vk@qeCLYwB9 zvyLFCqp3@fzSgeRKkWE-=6Zs}(N-Vl8tOmBv3gcSSl!{RFJqy`Pdn^mZ9vnPKmk1~ zX2OOZ^_yYlP4a-C%PNKwS|3;1GN3*Dm|xBOC5Phy99$-gs2{K)wg*t)Lv+TL%9UCzpeGvM2_WoRr; zo;Biofg}92S1rR8QuG7_|h5J2tU7x=4*}saHKV zN3YbriP#MJ84zn;-b+i$Nbt*&kCCVTOZ|6`wfqNm?}l27dFfr*DmO0KhEU9rk9)OR z_RAcHGC=HK2q&GyjbuIzm9YqZ>Br-YJyZmWa8`c!B{6c_hvz>=3$&4U?DKOGg-<@^ z=VmsH{N7SK)t`B+pIXD!er8})b5&-p$xJg)F(sep>cPikx=<(l(pMF+EQu5;@uc3b zz`+icQ_YV>1=o#>3_Th@p3ZyDxd;X~wosjbZ>g7#{Z)P}cR=7C-8ujQxuzL=8fV_= z9Y?H=e6uhEqyqXh57B)DVuU1llf=7e)Y*LVyW{Fg&}osn8s6D{+2E}oSB$H?-???% z{`Y2?b=-Ok3_g&t==jUPNcivp*7;P<-6ss{LtC6~o5qW*w}a=VZ)Wo;bc|!!WDrE` zRKj0`raucTgci?HEb_gb?LnK`I_&JIow}hNaO*`HLQp?!X{zQAhj!C!4Sqw|Fd;^y z?_!FrM9cVxiIPg^yB_tr8`J!XZvC-OD=GCh_x%-)Y^rx!S02O-06s>uCTksAoSCUE zpUS)kTg*p%{O1d+n3t<1Dmxo7;-VZ9vRDp1O@Amy>SiP*D{$)6`4`WJOR{)R)YjR+ z%QE#Jge>0t4>z%`_i+1#_Nj$O2|AxYzw(|a?d{wwZE<#=gp9e2&XU!+tW2*lCTHc(CT7Vb*`uF$l%9rEk zE(1mz+=X-UKsKuF`yizHFhb@Kslk|v%JshXB9ax8$*gyWyx^GiqEJtm195MkcD+dH z^4Ub{d>%ybCD$pXQ3S6dDKaU_>xzVe5JM*d$>@#Qk^56-)J?7Vm;+D7Zlt86IG z%vXtzH-d*JC-s?0#)1?>1#K=!sodmCUI}uGpD>5<`yX6_DGonfx2dIwwA870ZbGfy zwXZVaIFYcJt$Ls=_K*bcv|HSAVaCq62=@~@UpIdowVae7ZVPPF-+U3gG|=a&-RAa~ zr{|@Fd$kjn$}ny`tTvM;ZNeyGF@{yQ6qtCa92m^lBE|geSm+Zu^v@Hg_g3C*d&rEr zAM~f=J5NxT_#VrvU8uLUc0$K8FZI4b?Hk*?14kIl(|D?WSRnn>2U3DiT7kXj{i-QZ z`?4^Jxo^gX3**Yof9DswUpQK&b|Omrnb|O~&+!M0j%-O~=Hi$zWmyAY@e(Lr3s^-n zwyf0y%({Z+{Rg`G=2mwjN2)Iw%D)WAUyXBa~&~#I4Tin>w!V z`Xk4LZ%6|Yw~L^JlaQ-#n1=F6$CI4f!P-;60XB0}Y}PW{1#~dDX_lY(`i}A5L`Sin z`&VTZGVb8=IlXB*nbXR0h^HY{b8qbfcS5gOv9Ev@qW9~uLWh^8tdsEH8bhF{1RIAGnGT#&_RW)V-1$Fmrp zl33zU1Rji*ow?g5XaQQcu_GE)=H5DxPS{E1j@X>ymmK5KJ!VwU6eA`sdcEUtpXqAj z{+?gZ0v6@7bM8WEs=Sgh%C1N?FIg-C$-^77 z1ZzJ*RU9-pMPKj8CDXy;A}>rV?E*bTS;9TlN4-@Dn^cFjmr~b^Fbb)AW&`TOC@n>rK@^Fl_~Q~SQuD}PwEgW=|U`KouRH9{t2_!ww9tarmLzL zy4-mG$dZ56(X1AjKW;+{(S3;5u2UJ$Z>5ojveQR&ME0j#0`?r~wGeayVp#FfcurYh zXMLXUh>CeTLMv|{9kP&yy_|E7Y7hu8CT@t+82n{e$AqxxC$jTJ4Yidp(VO^TA9)_= z@z-{BR4&D{wN%qnR$O=g`z7EzXKg>GbnK+$Ac)^U=9$Jc^FhT?CtB*ar%-5`5QZl+ zEzqa)oWVNs;T>u19fAsZesu@vhl!c?YVW>DU6iWLoYxXJrOiX)!0{ zxi4QKJx!9+t>|q65F&c@a`a#w0Q2|gwlgB6f2$bjZ%YZjIgx*C;*WOYf5rLsI}xT! zsr~;I$F^(_KFO-b+Z%lTc~by&JKXMw3sJqs9iwEaHvKclg|F^-5CS5zx^wL9Is9~5 zspiaoJ}AUyTJvBHRYv>Z{X;Xyx?&g^LuqOCP>~Y-6%ZWlq$J7=N9_#Kk3I1}U!n7}@ zB2D50J}Z7Q@%4T~x8Q%XuT{w6* zy}?_0HFzE4BH8!isow(?Q|3se)$m=%(th{xmdLPvkddCyv!0UHNEJl@1hTWr;JUUm z^KUv15^?e~00&f|gD==RmF%Jxue&FY+dB?tx<5t3m=Qx zhQEZ_ueK@~o9YDD%sBfbzdc>qaawVxfLH2?04uwn{Wbhy9Q@Ian&ibbF!2X){rlQP z#rSrwc)7Fo5Zp)arOa>57T4zgzK2R9ut+xWf(Yr?+vnUse`OnSs&Py;&o32KCgSdJ z;@KvL$))*W+3eX7`!)>>Qwy_2LUrJwd5lAN0>6vPj`{bKj-Dev(Re5*=As)gS@(I+ zjKA$#M}pkGQT33G&5noWNfl?=)TfQ2Z)Kq_+5+Jx;nx*-D@xtZU4b9C*=VtR;_x1- zmWg~mJjrnYqj&%SpKcbMZrT%OP65ogg3Nay^U#!^ZZ@_I{%?C~phdG2OnBT5ET7G> zOvRttKvMA#`y*!Kj_3moZqHJPE-TUT1YC8K-!t7byn&}z;dFVLl}`Y_r)frPMb-&& zMpBcbQj?|^z`~XYby@RJr|_!?85vq~Q#K6;okZJQ_zwiLb%(Pr?}!*VT-(^-e!~qs z?qGDBJ43NUp$y@qMb36^H~W3^kT-xDnR-Mnq$7QrG-d!C7CVN44zc`3HaYIR_vCVA zcToLziWBDyGjKkVbw9jYIgj~^HoXyr}0@!pS5k&nsCaOZU)8EADI_BnfSok%RO z6nM|d{>;j@qej^Ds3CR$FS+AQdeTeDYB%PA=h#qB2e|HTyto_paU+<^PNHOhtJhAV z+b+yqouh*u{tbuP!jpKFWWEv6duzP=Y-Hb#h$kHyhvjyggRWD8)Va;4y7U%JL_KGr zv9bKHt58p>L3@LxqAu>8mS2`%Xb!Jvu{$r(0bxV~mIi>!k|mqnMH@>G6)4e&82#NzDl?|3~0Cvd$K4g_n1^Do@fksrg z4;h}qh?Li$zKAx<%?LX0&mY5vr?7*MfVDj|@%dR-Nx-zqOpOpo0Ba+x~+Euu* zI!J;F6xg`{Cjdq0Ksg*FVfKi92@NDclS%O1Q5}!11Y!PCN?Jk~0Uogfiv=J`@tgyM zXUK6nav6Y{ARq^*oMjmZqPqBH0Qa{{VZo1BU?ZGJsu5qQ5uK>H4up$+tPxJIll7=o zs;mhh)he~rR746Zf2`363F}=$KLUXDTJD^hxTC?obB43dNV(3$y3Q=5&LXEygAKBo zs5`q-r#1`HQU`0}>aCUQ-E!)Wa!jnO!3H1e4J+%6`|fC~*Xez{6AY}=#y9v(G=zU_ zh~R9DRBjAtxuZdA(8hA={;t2M-Sc_ir94q>HlV_s0IO1 zN8^D<10VoV2;~1>>3@I1;mCQw1fYR{qm&8Jqm&5;oV0nugqZO=FgP^}!Ds1Aq^*;) zIi-ym*g8jH^Dy+y-T6k^c!}b=D0_LO?Bw(_v1MHoVTwWQNP#=>+ zMxXS%x<;n6d7;taj`i8|3f$sSyao4NE%S$@eC6GwF!EpO%HwT0cAklw{#4g4&>(tY@P>5gMbiD0|dbh071Y3w#NXz zGdGH}&fWFCx#u9cq$C~hst#amARxvjd?Yr>6_K#z{a5d5#&x}Dca@9RDo}S!jCc&N zI2;qid#Ymt7s)sVfO0weAHHF}9dez#mKI|)JusNqeC4-p4IUqq3PeBJf4gD-8uc2J z5eNX+3>A%xWBR+abIb4+l7EH(Tz&K6KW2CWH zd3ZQD9$bM603nFAk*d6wOoX_1&m1|%0ED2W3D$J;nwPt@)B~!*0hV{_1RAi$ytt!2 zrpZnZDUIUH@joK|VGy&?w6gsGjD|J1krP~}q)h&a35v)?w%k|18#dI9+I z5~j<8);-2U8{ z_~m#Xirq84gWbtv@>m8-;Ehi-_;RTf;xa=0U~fj=93MRrN0p*V#<1IoGsh1960dN& ztM4<-L=7#$@%}XYq+KGi1tc_e>cn6cn>h;8)MB|T{j1c)!y&Kc@Z)}G{$#z7>1xt> zd1!rT%0TeT`*ti}(MmWeYDe!o`6gUNoy;4AjIrQz;QP`l5hz*w!!GaSX|mwa<5S+x zd|ZP{#K+Um-@e&dXF02#X6*>x0Vwm8xC&@1ZIl;Vo7=$lRM7!;yf`p(?-r9dQdm z+`7A?p`R9VEu~}WKwu{5@oQG0`+oFN5Qv?o1@OhlZr$!NUWc7(1prD@DF+|i@Uh{S zpjQFIQkx9KwiVSm8URMLl+*+Hh-ruBpvk8DO zt%I1y4-sPG=_W!$SfdJtgYF$BIAKEbbtQu<4@Z^LZ1>IsKqL=HywL6X9OHMi0K_a= ziEhN@N=TD>6x@D#lZn+?#sfjrdi0gM=T2Qk0=2OaRUr@;Q87(=OIP!>Y%0cR2LSyo zq)x#TT#&USco+bF{0N#>k@g+!~v{FSqTTyqu7Sb0Jt(&fWsbex>*9RL0zenI^Y_G9-hkPoHXqaU zg~g2?qyr$kq7cUxqnL`&n>nY{YB;ZydlRz&sWB};tdgRoi9UykWhzEEfJ>i6&WEYj zA_G}qiA_S*$=NkzR4zxqL5MK5U|$ORk0gWC=qqihm)M}ebz-El^&q|&_|<8t%MA;+ zVqc-bwyQ%-y87cl@K%Q)U0CT@k@fP|4v|Y_i1N+^CvVv z8li$z!6GOUSdu}9pQPorsGoN;`VBq$Q`EX|Z+16TjkGX-7;Ej6;;kB(C$cl$B&)ZBXnnTHJ$0efaP#s{!ovPoNmg)$@lbFIR#Ocu7UxGbg5+r*l@9Hg2!N+O$V1} zj3ro9?IuiHH*&11;KzU@uVunBF8=kQatXj?yf$E)(*#nBO;nGtneD&$UrYMncY2s1 z#I`V68^@iBg-GyC6&KEbzk`nDOl&tQ4AmwLe?LkOP@FSBH#Rg%Nqq75R6c*8L+D+w zG?V@h4;Pe#jcRzS>4X@(gV9XH`;Ml? zf`^^plI}2>36ZNq^Ol|3vZvMp*iwgTO)^@|e_sMl-2e4p*}+`GTrf!QWJm}1L#-;$ zzM$4M=1kt+aYIOckm0vqvy|A9+J&%!iTO0~{a}VL)+g(<#c%U%uP$w66nl9OgslFt z-x$glV(qE_us%MzQED*6)jnadJabe_WGB;kq3^k1$b(8~E!t1r_<0NLM`HTTK(guy zEP3-uYgu@x4wwHjYPr=V>1;TXySpP+f+D$3g

QW1f2%pk3FVXZu77o2LG=H?;I} zlxc+RhqYcNDgEWQY>2l3YBW|L5nv}K?Cr!mq*xlY-lW4?gEjGkO-1L_anxn8Z z43(|UK;&W~kzq&di=2@CbIIR3^-0{7?k5kd68LbA}nuw&^cRE?A=31@N_tcmsjo)SwCyBjcC?J=7+M%f3RlSw3^!UoMe5m5wxVQkzEO1+s_U< zMy~CZ%v|kTFvf;6o*nLU6ldYz0eL5TGWBWP`3bnVKQ9$D3@Z+^#|t0h5_BQ*O*K(5 zNa6ngn;y4+dQ{Tj*#v>Pcd!(aThIoyl!#1p!7S~h0D6+!GEkun=kswKzpj(GC<)#> zT#I-Vw+@*fks_#w+5mVCsB^S#z_Ms0BrB#GkDA4!RDx3#Dw34kqd%K-82H0A4#=m2 z5Q$ix_YUZQ`Y8RLw65!^{F&rYHryu;afGU0(~QNZm?@q#3TtVmW8%;&x&=w@Sg(8 zQ4#g-kPt>DA>1~zM1o*}B#saG3t1Hu{~ike2aiun95YgBVV3Y)xVYj6+12q`wn{6*dKEj_u*2RSbh`EnciFZc^4mUFIB&?~I`(^&=2dUEfL z6ufXRFx8EI7mLiO$mz2zq-UUBmSykMpGOf8(L14X7MCAWv)KoQ{X$gmR?5zIF1u1{ z&=NeJ%GLIrI?l@Gx#7#Fgz6(8bnB=h8}Jk=*S}y2wl_!0in>fhckP%8jmKmi5@02` z%#R4v1i&+|;{5Oxl>Z6JM~A{e%OVqVj+yo|os`I@>4>z=B431OzQXxwT&A>zZ^`$_ zTb4yfAp)5M-dJ3?2Rk}>2S&zoebYVv)de$)4=6XgkY|aBeBqT-QMz*l)yB5WawxaI z8W}MQOWomUYAX0dMdjQ;JylN~&OmLmQ7jVXfQn+{QxmLwZv&F5<-?nPa!s>SKE`9d zF))8;F?;H%O}gag49p)o<}V#}gWq$ftgMGXn%{)gWF*`qT%S{~tiM#*Y?WGKR{ETX zYf!4XypeLk4RafZU<4uhtg=sc!-=ev<5raoT%%B09PvZ$+uP<$U#{yYEx+UGe-99(`gXB;XQQZHoX#Tj(Mj}KEHMG!%igby0dgQvSSvIG>b@~)rcj; z#?bubl{n=RvQ$r`aQ)2x4^DVb&2B$|nu2rK%mT>N6sMd!r?d0}iKILR#|$l5IWTJ; zfTA)WHh5@ervA0cuw7mAdEK*pY$VSQ@4bzbszyrjVL)b^9IAc^=0%4^<4)GaG+)n2 zNY!szNaq+OqHc9N6$ix2xLvp*==q(_b(;haqQR5saC(c~&yBj&N;}07cm(uoToJ6*HuRHPa2K3@vw<0ny^0{*&(?x6@^pp})qGUQyL?}wVT|t9xE`!~+ZU-}A z3ZBizp*-TA&2fMSmZDwbMPhPut!klNXHPx2@bZE8p9cidhrXvC`g=YMOnewz^)RIW zVc5%um;XE@igrhx>W=a3j!o>2uj)?d?@oHzeeF*-Nwg>RR8P8RPiA5drK%^VzbEfy zPr;uas%UTVsoql0-txrWimKkq{@zs2E=PKIm^$1M2R+5r9Y%!L^mkiK_O`z4Yg0Ma zl-PGaw=0g(STEYG^~bDg9*1%eXYr|D0e3(=JBAYbuap2lOk8aw_uxw@rsu%ysld8( zAQZtQnrtfyIGMpa2v4N{dkYUeq$AvqAg5G#0v?`Cf>H>E#92hNw$7^Oz{|-&Z-73D zhLDTaEc1XRvQ0T6TeE1`NUZ^7pMkT$4zSo`xVvVVvhJ?vWVeZ{kQB`gCXF$y9?a4GIZGUl4{2{n9}?=bavimzQobpTS-XLdyh_opM9BvzKtp`!)&$3jI_pkBzqGLb$6 z*Vu{mkvJmM;gAmXM-GM&p>7P=r2%GumA5|;>QC&GRMi8uLtCTh z9dghg4|66>m?yEk!$ut#g95eb|5wY*6JULD^sv%V&goyQ&+?uN7EYX`k&-S zJ!iLkBuv@l5++G7B`A_+u4hebN5YF@WFOTWJ#Yag}4mTgGhHF`h z&s*m&L@qozhkB-?`ON$*-7S3K{L@+J+~TphXWoX3rsB_C^Jm=$XV<0{FWh@>^iMuE zT+^x=az}MhiTwO;$Wq+wlu#baS$Y{cHQ_~{@;JMssJ0yD03F+2I+?nh3V!fsb-CcP z00p&}nY^5uOxn2cEHIge^pq#{tW>k@@Wog2zdaYpe-{ldEX{pl@Ltdi`P^3ac|<)4 zde3&QNU_0`G~o8hFqAzOUkQ8{)#eSv9R(U=+*z4>y&YZzlYEqWLV zaiETSc`=@d59X-DJ!p+q>ccCjNpqD^9h1>rTl&+f>6WK2n?h$PP}}IF6`aASu0+^?bD}8w@|A*yFTCPNm&Q87qqM&u3*cUIfyNbRKa~z8oTWp+sf7?)Q z>F+vE-Un}qXM8+ezS9eww_flkfA~DLLh~Ak{jzfE(IP6j-S9%k)xRHH(N6^qqc&Ho z;I3?F3j2+H&1UH<#R`wD@@rcax3*#k&|;eAy6RT_zj@katpp;xQ3^qQ)OP3fyV^$| z&HdmuOA>b3im zWS-dJ#Td3BLd6W%Tem;Adot0UUmu`5p62Z&-+WJ<`k?Ul3FN-m12vtb+*YXA;SSRN z1LpCB#3y;K6sYIy&b-9Fyve9pXh&L_v9=A?#&g}{^vN({-|-nSuKVx#|LvH6VWC=Q z%+&VNqDi%CpFQ6wV$M7RweQ@YQhZv=UPbMEKF0D)lnH$E%+H>&&I683`5AxvXTlpm zLg7#6;-3mWKS5FlV55T<3qL8h4=9H}<7Q0%Uv2b%uW0`7NBoZl+V%fvpmYM*;%Wk) zDT6L$0^q>P;uME%B=qwESAAl}Xk&#qlY{}2j=J#@g+H>z#p&Z^rxj3gm0BDAOw&x$ zkE`P)H~;HIc;>gtJoQM(v$7D$*UPgI-i z1Y{MR(PcBGBPKk2ytAXAapR{ibcac4W$lWO#j!3NO8K4J`}PbofA`|dE$t`ocAy9s z&J61=u=T=exn3*Pgu z$qKb`9ABQxc$k?f1vnU;5L$ACivgH0f z34ll2uYgcbha!r!PkD?b&TNGbArtHI>;sX=IW`LhL5QSF8L;Oo{3<_QxJ&zUKF3QI zm`?Ck6Xiq{Ko|&s2?WP;@>JNs^p}MLdk(FO#p87wLoBU(^^|LecSZ1^$+Izcjmk5d zE#u-yz)@OTB{xM7RiUM7nU$$rZGko`ecJ`)_iPpRCNUjdGf89%y(MDqy|RJ6OAaP6 zN~D%P@YT>uGMV&tZkaqe9+qURHXbwhwIlRiKi825ascx3Q>B2-1t0T^$&e@>u~>EK zY?W#3c4Q5@+Z=k8z@xu2rub8+!!;ZELbD>Ip=xjH$Gw3vYV!0PKu7!MRMk@{*s_S| zr0v^pb3Y%z-re#HcFaf_$$T~@x{N=EnRoZA*N*kN>eBo)LK~mob^f{TEl98y*@|Mzh!^z7r#sJ_lGIQo;eoM_xjN>R>sD= z4+s(3wOpTiX9g43oGi(m^Ede-{_X;PD9{>EvQD?Z1(?`heVewmEAk`tu*dyuR@znB z%k0EQ{IUPaBChQt8YE9|n*3<|McAm4J^V{ku&>+yQE4Lhko4$ZK)wme{k3?YG4#Hy z(NzHCCWV?&tQ;>FtEfSn33M_n!8VgSu7a#CjKKDKb2!Z1jlmtY%v zJOLYW)Xc`89;2(IP2y0QB5)lhx}JK{zQzfuCE7@@;xT;F< z->n$ghH9kswsZLJFPYsgDpC+9XHUB}m6Wj8@&m%$|0WniWwhCx;+O7Ny>9B$ZTXZZ zy;U$>K~0yXI~K~V-H|-h9it*TfSIO9k)Mb$GmpLEim5vJljGxd340|pWWAz=)r3>! zUTKqUy|UZ*glpek89lFFHOOkxV`ZgR@lwm&$aNsm>IIQZiunMUn@oHn zrC7vyK}be;lC`0c$2`n+iUHk96-t+wre(#T)6rS+V4?r02)lP6ky#;89eI7n=xP)3 z??;+qlu@>+*g6I#j*w(C;N}#N;HJ985nCU1soJl0j+CiYN6t@Jd?KcN0;YiApNAo8 zfj5azodyk5fqx%~)2cSqu*!`8XmeMoN{=w?!L8g7Td5s-%x|Hx=MH)j33 zqufQfdM6FHM`$=LQG$seg2k+h>_e*GHZQ5)_5RtH=fqe*1zLZQSfRIHqNPd9IyCF; z^(~DSJdjwVK|L>(*`8*v@y^jQ&)$j5uR8s#K_yu=_^0x@7y_8r6=5ooqa=OifXP+i zFlHJaQZRO?|Kp)u{;0M(aLMH02bRe7pXb0)c-W13fZ+z-@<!wonW(AI6IQH2R(|{`_V@eOlu3QZU;{AkWg1v6^#=UZ&QRBK zJTblhluxIVv&cXNSUmj_TwN91E7dl3?T^hmPf1zr^8X?3&)=bZ;6GmczGu6~GTdX| zjjb##6S9S7EM-e$DO5C)DAL$>#yTV0kYuePp%gFS&M)UW=lBQ4b>|*?se^nr_W;Yug*c^1jF02A4v4 z>H9A+qxH=@4(?9~wvq0-TTvEW{q>Gi=2aYNK=(O;8Fcl<>kqn-59O67IIbmXPuGcY z#;eYSSr>Xo5|XnuB$mV@7r&8*9 z`zU+2G3A=q0S;)Znt0no?|kGPKb1F@P@v)~!fDNn?qSnw0~Ug!{F|o8w*AO!hSQ@l zHyIhKiJvnG;-ezLbs=-_ZpJsE2E z|2#q-UhOmecfocz*UI>Ugx^E^itsPLcQR&vBR(EgTxK!9TMM09nioSh<;_6iT}FJ5 zX;S|!Tv{0B#H3kabvuQF}B1@KtdNQF$BCPhSTlUUP9=6GK*O?KPw`GIiMGf3hB9c$5;VHt*P`fo#H~x6yq_#FE8+z{@c*NERS~_aUM-#oQ})T6?5mKf$PwY^@W310)%^oxJHwc zr*sbn=r97G1yuKCx)&v1W{dxz;ci6Z-csFOnc@`sqYkfxKBeP+EoV&vB2GLYgp2i~ zW-|!b6gG5>1=Tz}{OhJssIV^ZQjU^*%%x0h3J+7w7yr9G+VMRj-COc;X_j4##1J5Y z2uG^~%MLU;3yeiNSqTF-4_*8bxHro!F<88mgT+yxM7rzq0pX&qNEaN=w$C@SKbn6_ z{2oc<$sC$S*I4L-GHkMA*dpy*_sv-W9;VSgW#U~VkvuvSPs7l8K+{=QuYGWqaPX8a zHbCF&a%1lUzGyZ3fYlA@Fe=2LVgpH-Kj)xy8a8pAhkXvYZ`PB#8wk-aLG|)jp~c0Z z9oBw!Miaya@gc=t$el0i#V^UDoVC1Mg4#ybiPD~(B|RO75?On^&81_tU}G{If7HQ| z%JM!A2VFLLV~5!}4!BWwI**m2{8=tzkTBEn)Q;Slb{2Eo!C>BHDC%6C9&9_7uYII! zyreAtvXOKUyj)hNo_f!ovR~zF#fn<c zVGQbxRA-@yaFbR$>-&yXd9xVYx&WiH03yf$%+XTj0eUbhhK8AQv{ofUYFG!Y(dwRG z_+Fo?4L#TfTPT5o5yC5R0EWhb=>K!Y)G^#M_7qs|tWHb2knIEiSyb)w!6Z^4oqlXK z9eb4Ps+5!KN5@)bRjcaOG#NT?Pb&>>Ji^b}DP+2Kb5RFK7JyXEisPnD2g(hWYvuJX z9tPzOE-C2mzmVT|v2DEe2cmA9zJhTPYI_CBOsUF2sm$PZopl1_I&^_z0qvz+jP|KB zPq<{AaDg*ljb5r%N{IFnXc&<-iQ5`N+2{i#LqM|uFeC+1>#O6@u`e)NZ91>U1MZ6i^wz`x7hRPLo@NiC0%mMGpC*FZY_={%7gTrRmt|ZEXc9Q z4KD{dhodYH#+ys+sFCTiH%>FM@3W6Ka>g}T5yPXm)L?%GTys)#*6Om4C90LqIdAT= zZ+QFRCkGk5x#HM1ZJE2(daOfVoMkQ< z-*LKw+H}pGrf@paT7UTLG3T-tnI_MWyW5wPyy&}AkUc6=_jJHsgO|N_Hkf6D>zHFy zKL8s`f$j!2<#C{7;Z!tF-H!xGXm#H)?!&}j!~+W*yP+InRURGdZrMNd$a#MfHkrr{ zB=j?A7!NWONR(y5hMq1DY!5*>e6$AiBEp4rBJvwjv5 zZi?WAwFdAY$ddyJz|b#cSpjIxf@s_l%V^gr{}P5UddCJ~syxhxPm%|Vhh;?QAQ^y^ zZ$s^th5VxyeteXw04QGqsz()L++-xuRC->5uAHKUcF2#8wjqKjf(%5E0r;>_v5uQ2 z5Tl`vC4tt|YRvvUetdKi9cw_-H;))XQvmLLHJA#xx$DQwK3Z%Q&QPI`30#t?wZd6m z3=0^o()kxU;;y+B;tUP@$UhUVy7IwPxc}j}s^As^%?1FzE#M= zvjMEx8U1~bA_;gT0*>a4FA=9qNh4EvQ+?26{+>yD4M2&5@9V>u(4UXyj9@r`8H~wf zVUExd+tPvIMS#S@oMT~v35bS>k-;PP9)JRk@ zZ*-H4p(da~jO<0-LLw#q`XsYURw>ndhKqP=h+}4pQreSvPGY__{aftVX%Z}jE8%g z-yS@#`>h7RQx1CrSUIYS&%gPgE&4s7V2>yC`8L>RIU8a9{-oyC#n$uX<~E&n(DyPh zaXsu^$}2)o8>oJ~{I(tR`2b?q$a^d>pBtv*V*}{%T}JAhl{HXjn-|Jo!9EDkhWhB~ zGHNgDO`9F$ECtd(&!6-AP$2=xQn1Yh&YUo+8vVIM+TzVua1;WO54+rX1y~CJmdl5m zY9l7JPu%g33O!)`-28p(Pn!8)K2NVN3S+~5B}ki@_xtkhJg?y!KxRYVKR?6qz`NjU z?i^gaX3+llE$J0VrBTo1V?AI{;P!PPSqqh_B|-$G`KUBDludejH#3*UM}k`i1=rM! zB9@HikQtQfINDuqI|5GvG?Tv|a3k81!ZsAZ0KkZRI5-%E)gS@j3Rid7dKl)ZneA0F zl?2gT5XbrImE-bNd-OR%>tx%IgCuZyle!KSwO2?U{Bt=*#f0)EQM4c47k(UQsXFxJ z$FMoLl>oc5v5R$&;3b2?wI|O)zg4CTiBJL8S@Z!anns~`pM}U=a8c!(l!i=_u+xw- zD#|*KW`j0FHk0&-Ay{}P^NvZyl4(_cEx*I0!MiF?WQE)A@Z-<_&MkVnTX>(%5RT3B zKn1}&5C&zqpb7=ClMd1N=zK1=%pJ4O@z3Ho;`VFA7T%w`>woUF{Mo|!(|34nGs~4o zLJr+q8-Bg^_}-ck{C8|GvPTB~-;t%Ez)JuUJ}3aJCLpOeCPYz@)=5g;+ZqSNsthq` z6rJR6+k;e))OBDH2>*Au)BjVK_CGV03oHr$ga03p%3=3pV5RjBJ>=B%GGN75Y_{Ab4MdYncL7K@+JaX!zfa{OK}RB0CZLEGp05&1G=;>1hG z_C~i_n?J+;$Se_Qy~s>aE2kS*_GQS)j$B6AJC7I~oZ{s#k%E(N`~N8g)XW&1X#w0+ak*wNm1B8Ts#X*v6OhZoH4$bk=LR_eNq zTnc(0tY+T1JfD*zzAsbEt3CKK=Xk~W&E`K&Oe9=N${VTF{QNrgf*7L(b3Z;dw0IA zu}%Hg-q5l9SjF&2kLjV1`DZ}{$5U0|uTvUjZ=n>A9xa)S`>q;1OA6Jlr3A+O7}szJ zT#A`xDmb-IhitFe{*>WSWTtsQ`P&A9!$;ro+FUcW<*)4i!on77VvJuin{?8Lley?a30L3?S3T51i?4+DYJ($y+{i0REW$?=sU;1yK23i*XYAGNhWei#%S1xtDDG;9?500nxn+0pS zll;py&*E;ExS#bqaK5x+*Cnj1!^*(xis|F)UY9S`Z<*X36R9nuA7{06ds>x3vadgL zsXk_!#7^*RB#zQrQLXKpmE+otLgPQNF|OJ&Qv{BmZp}_ zCmm|(ltq~n-7=_qefZCSLV$~21rf3PjrN&CMT$&ge$F-=?e!les`w#Rx>PEuFZt;1 zjo+&aHR2bLnm46|uA!!qLRGeLT^d0X6#CjSUfL7 zTh~7?f@-Te)d*Rb=)zH}JVqzKPkH?}4QnS;i}y}o+ji5kZOm+B?vVz}QylvB&u6H4 z9};qnqfDbXP(suI`(p)wHvFrR9`Me#sQ2zS`3)=mAZF_-%`&aJU2<@aeaah_#7@{{ zs7((8jed?;HZSIX^4Bbf*Ey^65O$GEtk#kL;% z_KnXVb)+l(AkCXb>#jdeak5o?#73)i3mBg_;m)TPz1NWI!MZhOsLZdvxBm0|$sVUA z>Bl?zEbs}kStfw=c*le7jY90yrKEP&c()ZNEkA_1Y^#z#Op*j4C*3!Nqa;Y~66lmt z+kW9K0n!m|9W8V3k4l13TNlU@k^gP!{?)S&9L%+ndDgfqzE^!pkdrW;p^`GLXB9W( zae3u>-TQ^UeXUL<+ov=nJLtPidqtnMRcx=CA~B3&T9t3!)S+Df>SV-xgxelPDzWy* zH?DCRu2sD)A!L+Ru)oA{@d?H<1@d@OG~AbcMh+Kei!>AQX;P`h4++QdS{@cq=L6Kf zE9^E)`ru=_z99pj6u0Ym%Y9);Ekh~(j;48fjpWn=EtS=)C(RbO z``rZ!6#lnI@-xC~;P&5MBSClY8JRh_o7CdUsFX|*WZc=5Lfk4oNsoz{)=md9H-WVxPU9N($<6v1cthUY4*XK#NNS`tHujGV!JY5NaRpeIv>q2?W$Ii z4`*e2X!#C9yW^#TI%3yXa785JxpqHppZXI#j#Db5s=xBpnAijD(5;%~{_EYyS=t>C zHq|t=zVD5}HvcRVPkLT=wpV$>$Gn(I+d9wY&zl=G#B|qcBva|}!!H1gS)JyM$IPfY zvA<6fbdgd{*w3?>DN;ncukZy5#{69K%La7~5!}NJ)hSZ+*%C5*hShIgU*u8xn|8(` z@4NNY)1!s>4l*}wKn{~+@cb)w7>^=L_-}dpr0sad#;=ucD_w>()ZjcpOv`uS#^E)Jb z$GTPVb8^slayx(Tz2B2h{@tU5mk)*BTYJBE{r8LYf2&XK{rmZ3{okMUbwGhG>`X&> z(*={sm^i7g=ja<+>EaGj603B`EJ!ynP-MYTH)wymowGQT{&h0>6&2EL%rceR z9Wil{)$*BE%eGhgZAHxN-I$q;DB58PR4i zO}NQT1cZKEHU}MG6t-xp`_>4bK~vA8Ve<$m90foLrLN$=+P`8ZfY=6tfYDBD)Ip&oOUH(-6UdQtCry#Um7j(Zi;4^`Mr>?HlmNrvqIT0zd-*$< z{34}GZdN&(%ko44B2Vd(u6-sp87>N>VIui5yGbZNo&{x*r4f%;PZLVC4G;0Mxz|Zz zc2FRI-Yt;X5f6p&&MP07DaNP#}`R4 zC5pm2)4+8;RLm)30N9r`Zh`DlX)=_@M{Dav*^cE-9mmJf@t4EZlS#XB_|V!Lv>!jh zj}7e{zy|9TkM&>^=-HEyOO2U=7XhsrNkO!##r4u8rb!v$kQYy0TB&fl6w}zYo1Z_PIFfPSaRtIJiSz_{kv9rTK)XCO)^d!w}=)>z9|^flvVzA(aeJA zJc>skwY~l8NEQapZs-Fxb@mAt_lhb{8xo;e#h_83g%rjuQQ2c?fZe8z#EYS~i657% zzf*MSXRTHl7!f+XA(DqaKr280TrJQ-{RUT5p+Y5;gE9=zvAoT)>ovj@8En#zvRXf~ z!I_FWaevz!Sp}Ao%5gGiC5UTJD<2>j#cRsSeOE8O0+-f^;`j|z3sqIo%}1X~2RQAC zf0I*r!nAbTrXTUx6rzIO>Bib3y`^6#ye#k(0DY8Lm!qMAE!N4z;qCJs2wyDMdrbIz&e~5ar)XJMu>Tw^4E6+~pHC(yI@Mic3!K0ls5T8$K2%Q@I=z}>u@AotshlRj)9R*Qa9yV*{J{qnFS?q*fcIC4j>3g=?p$PnQxv%zkRvs_UBlG2TC_& zM$I=I-7Pc{x7%2tThFdk+)<%0zS`={Anj^YL7B z!I=c1Uupw`APk5IfKzdRSk-;~Zm#X7K4}=0x9|36XlsEHo(W(LILxJ708e8?HUf56 z?}x1dVnQ>L02r_cA(ZauIar1Xi*$df_^(1dnAL4Ihr1>TPhA_Rcd+Fb$!G1X^|bipBd70xtc~-~pFG`>T&&9!Qd8>f^RSZ!`i_tE$Inb8eP)VM z!8jT*FjlDPur|Dh<=vCXcc%1RCTu7yws>Sm5e>&eY^WX-9iFPVcK@0Js7Jv>Y>p4& zLR!B4mC4hO&Bg&9Sbr`>S_INgo@lgs5hq-%;v{|ei3wvvGQ{!IdtT(K-jk$&vq=f3 zgg0JVe;e*)gX#l-2;TeDA|3uRn+W)?y%-*u3@ZZyr%klie=EgMo$%C|sp=W~tNnQT z3p9C(df@Wa21t(d>fN>HUn>DQ-puTukkWKT(FzdDdHH>2@ z4y=9&rmQM8F!)-8*W!;SVOVJUA(nF*Q+C^h@lkh^XOnP!cs^>n%$S>m4dX*%u5<76 z0ck20C1mLw{6{F#W~Bw^lEvHvt-Sbp3*J|H0uO z&hOsg-ov!HBs%}QDLICUELFG{zFsgbt|I)gV59k6-*=ZhypCYyO?HO95+=V5s6{gm zvHjOl5&#T=K6bTip~|X{QwFFI&_lOU;|U@L@F>XQwqNVV`Xbz7cm4^&uE?RJY4@*x z&K9!-x5Cjl%KWbZ=u+Zo0*A?pTg?01ugXW|Q_(ms_S#6QwI)iA{z#;1FgCPLNdmkX z3@8#%9e2w9iqZXO*la4=h6l>Qqu*T^64pR_Fi0Sv#D#3ezf>j{lg$>sm5J=Wmx+e60DvTz(zz`unvO5nV-IJywMf+3%Gk_%Cw^KL? z0ruIy25>I{x4i%kz?3&Z>+(N>@8n_dKk}1?f>nXFbm*Q_YQ+o?G5nMEg&S~}i{u~* zBQ=MiTmx4}CAv z1=j@4vep3Z_H+;+ri5!Ck{CjzU+*g{II!l7yQC7X#T^=|4IX#vtjg#|2S3g#&P+r z$C3t%l`IlIG;cTa;;6L;;TH5}2LCG!Kn9o)JwH@gL@y|L_hE0$8b^QnU@vqQe94!7 zG#}}mxN+@ZKKzrf*eEm8dCldu+SL`wL$6cE>+qi*%BytHX1{&#U6XTDv`ZkW3aP4v z1G#xPzYzIC^?NDb8lQQX^Qvyd{rTPhqO|kILF)tsMI~l)M8+c@U$O3(Cl`WJYV4(r zXP&=4G4rtUTW4fr-N~1O%FF*$tJ+C(YkAiT9w%$5TSq=~TkkdbY;H0<*pS$4y%x3d zX68-E)s1C_HzSvM&;BST``%m^*08@Mjq^NkhO0+2B+ZaR@2TVwoDM5_b#dTugpXpN zYua*|5yk2;>3WI9MXTUi@)PU?koD_xgLlY#g|lu}^YiF@rzuwCTmgDd#KB~oi)T%4 z=OPAYJjJw^+^$v{Gh)bd$wk~cDFz-7Vi#Un*Z=5D`*ft>V#s}^k*nRe zjSuc`7Ogk9az`!~-ZNiU-gNP>#EDPWk=xvvh6~!|;|)$0Vw>9;pK2c2)%pKY-{WPmk3RR(ddu5Wl8D>1a6daZ5{%G3zVo3ESiAoq3b7Lt3~6g~0v8H^l_kQBd*M zZR6cTpYG3Vj&ao)3vO5X*u`SPON|OZLW=5g+z`69Y!V8`A&1w9TC%whwJWa{A#}xn zG*Yy~k8hPa(t_|2Wim_>F0iA}S}|)?LBgREis7h|4v&jMI`%NDT<@9NBQXjT03m#` zkr0qBojWZ;qSFzeG2!|MjuqbT>D%kbkjJ6ci_$Ej2qppPR@i^k)Srk3~_$%Nix>Nm}jP$T35iBKjR_7e3MZHLa#NF2s|X9EyPq* z@&w3vKZPSqwfx?m2Oq8#is0=S#B{8}e=EjXk=p4Ngj9$0a3mw@>))S)9`OUKu_Zmb z1x$r>rh42$&43``>XvSS$H>mqRmLRGOmbdY3I@S@wQb~PWPCK^ zyEd3@sPK)IU58XPiObaG`NJ_$&g#Xww% zohmw3J`Z-b6`yr->C@g9-+5rVt&}aJsV`gHhv_a(IaXuQ&lu5H?LMy{)29r}l^^c6NU>iqaQJss@sbBi|L zM!@>jP$Qs9e3$8a3810YAV#hNZ4^E1BwUs_Q6TDeVv5ZgFqeZj~ zsx%!APnfti#1eI+BBamrJ)>QV#eMl9hg%Yy9*oIe5yp<7nuRfTTbg*qzT<~pi#T0R ziOD&@M9AJ2onWuKM%~cW3EAcQ(PLmGMk0pLAT^M)*DK}vRde*lQ(yUZhIP?NJ-%~x zTN4iMmqUr>{b-6X@X30vwm)_Bl7YR*7(>5Lx^r;kig>`P@3*X5dA~E#Wv=WVNt5~B z-IVhzb?q&6E297MdhlX8Vm)^bo#~b{KkSlq%Sto3?$uoFmR&rOgV57#N z*CQK#Hg4*t>1K%DVcT=sx>b}N)|`Aqmn-IN zv5DWi9&MZGXpIi+m8~V9#}T(W=Bj$-%7XW6yzc4t+GB&H9tyDZ^cSJlBXsu#`^cx5 zi+Fnb9l^Q3(g`+SZ`kF%CF!H6j}{RWbQ0iNidg&n3T*Bv|l@%9+K$yy*6jn&-~it2mRX> zxEb3%5a02sK>&)KqA6%iaIG!l$da#@GL%2T*d1kQl7VI!%8yw+R;#nGhQ65Wm0H4} z4#0RD1rTdYO+#1`5HeF-%q{}z(Y`ORV^xOo25Y_K@7fus#1U9c;rj-b9}KU04?D>L zVsY%KxOKBk4GDke-0zu^d9V#8q7)GR#h%zv0f@4VKyd=75NxKl{oS`xV*0B*nhhjM zqCYu%_2`8^RRayL()9Rgl7H(RN4FPSo1oGVW{LFSqnI?PB>9up#;z%K8h~;kU=yfF z?5*Y^OPM2RB)9W<{o|i^_o@c!3gsP!LYIvre~F64Dp>QvE7kcx5|o5k`?3UjrvQZ) zEI|?hYo=|jF^kF#eAx!F z19#N*o^X|nziLhZ$46r5s7)mFz-v%)3%jl1&8JXnU8L4BA>p+cs>GEItb-I20A5Y&0f4}~zL1JLgA;VCpA@6RJ_(nrcY&1j) z1teGU`)-7$R(#9PI5jFs?oCA3jgZshP#hJxT^`tb4nUhpd^<;%)inf#Tj1EV@1#^| z-FV%nQ6?-%F&K{H8@ln1pJKZyH#*#2fm8^_n%;&mh5nbC{*8^)TZk0p)4vT!3NiqfBj$s}NO1#yKCXn8I>lY~7Ei)gZT z{JdqNFOAwC0rx(zu-26(EkJ6dY>1NCVj5TY(}cFZ8h2wOaO66Z#@t~Nn^PbUs1lf{ zRts{}KIJKBqzL;aDBeEKETlIj(cA3NB(sSLG1?v#3AUR6m zX=dTu)hIb`A*xlV6GdBiKradI;W#S?Lva)=p3eH&$a;#6hPefjmxcX1VhFtxVVokN z!Hi%0dHl7XcRXl5Jel{`AM1{{n&K3p%V(c=vFe;WRB!r;SM;l8M zS4#jc=oMdzJDO*nMLCGU3cVOTib>3(xuNa3rbYn6Lfn`*nPLtl2}{%nsLj8C`wb?H zoHDmq6QLs2?hGv)#CBz4DOyIF3MD5lmb=LVGQ4Ct`3fe_Ocl;=C@;U#X=3&iq*I~_ zk3$#rP#9LiiHoOiF$3(T>M$33Us2Wr|G61^M(Z>cW#Xr<$3#0S&-3AoEgt6n@RPnZ_Oa$e>I>Rhp zDvrmF<6Vp(TbjV&xw68&=S-vpi0p~F3zP19hb~&5#!h8pU4e6bih0K6k;;4&lY{mp zS~mZRr|4gr%7&;!kpdo4fm*=G0j+shKiDd+&(!rc97n)Pd_WS`g{GV!vVJ2F8W2C= z1FFNQzonspa4;hQo;iXI1fcWp|A*9}g8@OH5*ySJAXAADjc|$>bf~%lzHl0%(VGzd zR~nU!uIR(fLs&nKum=E}0SDYTv1R9JC@*2hexr-&7mb8hj?xcEc{|tlQbz|9;KTDs z1p@pzHa_iX3N5Zqj*5(<+OMxFyf_Uj6BFN@2F{rRD7UnhNx=3g=t{JmF>LreZHl8C zYI~S{cY+HSa&ylB7!ru1qh~Hwt?R=q{mUUw0bw%$#LdlNwx~y%At^|Wqm6j6=D@NF z*@LZ1S2z?sGY*!a{|3}FZbL4G-}Es<#=Dt8R6vk(9Z424(`MH-uBUml{e+s$#6ZfK z3XSh^mx6&y7oFFgOt$B?nJFjAB&N2=o8mZV8-p7gatjrjfSqE(T>^}ab2}lY^%Szj zH}|HfIlNCdDfj73kAnqB|AY-jx3nI$H95d;4e;KL?NC}cgWHU7^-sgs0XnK z;jb&6{T?jZo*}C0)Si}x0flI}87pv9Sd$lFOeX-DC>ONF*`XUvUTGL9-DK8{o{5&= zrzNY@G5#Y7}50f7-2xQbI_>EzyOeZ%yF6%pDz8#(~( zmx>d}FO3bbZB413P_8;Df3)eWLPDZKSwypkMW1FM!cbj(Ou(~Bpz+4}QWUkd%GhmH`$JPr`hR-fPt?_ub zr`FJ~PQl;Z{>kb`z>l#z5j7MA#W4F!8S+8eQcTq(Vo@L7C26A{ebLv8pS?lVW|yXo zt|6qF_o-)q5Eln#YkR2FWJ=ksn!ZYXH}haNA3ahetZ-hM;4*Wx;|2_}>Het7R=qAS*bzjb?y=!iXvR)1Fwia z)@ZprXYL$AezKwMq+Pb)N`d+4p~wRBdlI?FZfmtB^3P0c(fit+G7)mcPW-D_5us!J zqN^1sd5ClSzEXEs(&r6h3^`8)rLBF=PL|v3He5_<>Bl9xOPceHdE0uWA8|5MKyzkT)i zpA&L&lBxZg5 zonGtRxc8APx6LKjbFJYsx6Y__yzRa?|7RxdNw|(O@0FWnG)uC)yS~$C>n+D8U0eR1 z{kjrvwSv*PpM96((A>S)m3r8H_1TZnppyBne-R5iO$0-q{b~*;-KJ&fC+VjCHhNX5 zL^J}V(5J2iNGY=VGD~z=$KfI=O%pKJYuH9XLPZpnY6|IV`-|+NBNU5cdOmFjN9uZUlF>)0yW-r6u~ws<7zG+y(vGv;QKs0;0^Ot!*> zH7jw~F1>NtvGO+Bu0WB!!K7Njv-K_d4#6T$-!$O&Y^MIiDT&3S2+hyGN_33r34tMA zO^2RcHT267zn|!x^pK*fr&}V*KiatYsTs@Am=E_nwYi?&nI-$hqweNarsGV1o44!Y zC8BSDxpTr;#avtQj*;VfdWKKQ)8kGydmg#mf%T+yPY7i>X(?S8hf8bw;HVn3!%2_+!yYr%hcRM#Lj%3?=V~ z${hUK-Ae;S;2z!&K)X;jo!j(EmNoA)VK|1E(V3U<>?7+(vng1UC5HXSIwYvgEW@OQ z`A>?y{P)`gobDGv?I$VzS!pIPbnUc~2=YI+a8Wy$Gzka(ne(V)pgvO|5k~_jn(H=7 zvK~pUaG7KUZ_P0(Wc9d5BC_hDTGLVXUCe!qnTrNOFye?%5uj59z3S5)AR~R7MeVI zvyMqybOa@+^Dgc2YAzCD6=;H2799}#+Z#c?`LYSqt)SdAK6ys~qk8Ado=7NU>|E<9 zH1#qgBB}Ihy*oG=asEcJ#lbKf0kuWsHsS5V-PR9U3*$Q8C@EU{X_&D}B!#c@MdzWN z4>TlIKRw?r%8RP}S%Z_ZBy;8;jA6f{ldU}>XB4z zY<_}-S6&xae?rgs)WN5hxqK08UpAua;nbxPw+c71txqMIgFCXNQrq30`^*7n7 z9z3>I*-b}p_2kar8oH-8AC7u@|DAXO<&XU^gx&h7^qYN)<;Tl^fa_YjR^%2l2&??8 zVgA3{w_hhePgDo^Pt+umco|*tm4BWYq-xwr6}~TgT{ALai{XfU>&mb{c<1o?8g^C8 z*Vp<~f~D1mmsH4N?n`o7T#q<|PyaRAl62jcglrqYeI=Y+6W|_`Qh#UaWW!h*DP&+E zwd=i-2p>GLTk-*s{?kPA3ym(;r=Y-^IdN8ubIglf)Ay8y{B$W@M_|a#vfC_*v;6R^)VTowfZw~6?kr*MZV8AX+ zT19M~!-Ac7VKUO2fRdpA3YWdrCl8+cDbFYZgu-YS8#OCmqy%Dbf%{Hw1F*cr@Mqq; z7}9FbL02y3AP@Mo9$c(QhJ5d1i-q0gtk~4q@SaB4sR&TwBU^uBBI#J?x-$x`ke_S{ zjUFisQ^~Lti9S&>0?>*DQE3>VxHqiVM)P%i?d^Pp*LwG0UEWQM~B<9)aG zzyJ>j12KMrYQ7#m7ajH1(f7SMtT6!u@y~x1!A}lIMNKG56e$XCiho2d1Z;#sr=!5> zWPr~=pJ2bsVb|2NQ^|1DE&xMB|Nh19?@T;*lP%hF^6E1%56(neR!Yw!P21g#9+<;`zJp@h}g0IR{=!fR|{hqYE@5zBoy^_m4&)B!gwvK zAknva4_hz(*O7Dj|1W?%u4u_UGsw#9JrL%FyOR{sGq| zE3KlhMCmODy&tefv{b3ObPyf%AW*VhB+IeH-2OeOIpn8fv2MCRMf_K0&g>KRPJ_En zMWQmB&*-_j?rD8;{;`|H?CAC9T8jF(_RLo!i65#k;y%-vS62d;=B~ZhK4P~a;Bcmz z4)k@;9g8Q{Q_6;(k0}1(6Fz#`>mz4~zfXN!czUkn-wv~w+n=7DAgjE37nh~F*fx6Y z?!EQG9PPO=)tpd9wPwyCynXVD@3Lq7ubI}@sWSV`Am!qjG}VwZ8Us|DpJc{Fylm-| zx^FD2NFz@DeiTC!%JQzvF|L^6TEa-I+t*Ox_+iN}PII)|KpYp}?i+a=rht?4SGhhdhYizyG=aPYvZz znP0_KN$!`*&R=nirO5B|#2-uhq;sW_eQ;%QVQ*@Ru8)@srRTxH=wnwdSFc)qxV$&! zZG4cdi`}wp@ef~xdgg_j74CJ&?DbpdOYZZ7N8ENtk6v!-uI=+I%IVYTmp$5;wPF9K z?b9FIG-ZEGoRv|n)W3MuFy!;@JaX#?=TkS!uC`5PJ6G6vc~5^X`W-#MzxC|&$FNFK zRn4y&6uZ`e?$sf&=B~+OI=;3mqqFD;H;0rRj#9skW1%^p#Lvx;Zb*R_^8C>=l!Yy6pZJ z+TJuC%K!2Iy=J{;WsvNQP?TzvgwWV2)l`a)s3B`fW68eEAp0^RBs3@_%b+5u#u7pr zJEaOsUv|EgoxI)|kAIu#Z9#Sbd)m$_V9I<@(XHnVfy?Y(p7*T8&Vw9*v;3zVl* zT&$Gb0JzTPS5KBZzce2sh6_E$G7_3Q_O@DXVNKN5j&8zgN34t>URes=f+w#k2Is@9 zhTwzB-wTPn^uPSk(F`k@lhVeRJ!}~4KXE?9&nCwjZ!#A>JErt(Q%k}g{v;`s6e}4n zgdw0)P{=y~Y@(W=ROtZ9ii$EI#Tv+eAVf0SjLGW8L7!QE-ED0+P3W;Wp9G_tQQ;eL zR%Tmpbogd_V?@^eHp~dcF^pCqmCt7(NLuq{ql<+FcZ@Z%@ZVzZ4?2M|he1UOYF@%><`j>mxLQnPx zd{saS2QJWH3-Ly-OrU4z377YQ*B~Oc_O3Ay!;Qr$qJxAkIk=vOGX~GtwhbM0b%{Q5TdtB28b6;+i(@dNm zF5ebcf3z0EB6j7d2nXAAHVmYV)31q!&hd(#SIjAjl3~a(UIB#8 zdSG6ssXP^{cw}Kb_xF%%?R|wcI!1Y|F)u%IAH!GyrWC9?aKB^4z8aLb=v7YpYJQ)~ zIs?G%d2IRG`*l67i+bc@0@;cR7|ASp1>JwG$b3fy2E{L34jbno`WnIn=6kZ=OgO4i zk4=U0~=$0=2%4D^8?~5+sZWU$| ziUe;rJn=rs|Iddn8=t6v&O6_WCe#!Ko{8eZM8>t76CY9-rU=qxESB5+u^<+DndG0` zJKFqsf9E4PLQ9;%s$f9H4@RuM4@0j^$J&DL(hXJNr9BWh1RD_)Gt+2Pr zKQ^ZcBGfP2Blf_)_BKBaN>HPjAl}gmsNNE!Te_CoL;b*`L4?8@oFLc5X?pU&^{ata z%vvn4+8pYeAQ8-(lOth1tr?3((UEG!Frt^0q8vZ{>2g^*(emz9f@^s+^>19!fr%sA zZOw`{4k=T=95bO$I9E2nOVg}HZJddl^kWrN`vDJXjO+^8%J6Iyu>pDun|-urP#6V4!XZpfKV5{#jtbb% zthlZK)#?6yvSpva@U3qx1Gt~&n*q4702RQ)1Te#fVdw-Nv_n7#aInpS;6A2^Dkfr% zDYE`E^aUG_%Qn0$KwqU{7-Xcf047a`;Q)X{g~@YZDnf)m6D&Uk{wQ%;cLpy5@!jXq zF?=AK57g2BmF@I!2=EO2Z6@|x4)m#HrrXuabj(!%O%%dV8yNs{5=rMCxnrh`6~oA} zFal^JOu~c<6Ip`9o60R{2e~_rQ`#GMDM+Tm8{)WP<>;)K|giE@KsU0AvZlTU2}>Ra8}% zI7lYW(9-<31)a3QMv*aLd`ucu?rE3ASpWnvXP{w+D24RPYO4hclS&mk!I!jS>{uF8 z`gsT2K?<7VM)_Nc4oDHJJwz5bgib0pO^E)gqP?S50tppd`b< zH~sT-wW@wFhAGAn8ht4NPcx8aRHP;C`ssO8Bw+AfD>GIM(>915rHjmQL`;6gj&j*E z(*^pfz!ZLx9loNf6LkaQR@ZxnhgCvADj}9xzOR**goS$YfvSOQ$(Q(~Vqb ztO3VVhhq$#C*cm;i;nWLk72GdG;;j^$qCdWjxvco0xVQfr{;>wkDfmNVGFJf70OQi zGG#;;pq%KaQ|y~(=TW1Bpet3=RZkj%mM_yp7r5D%W%5R8qB9Jls#UbVm&mXTafO+= z0uUQ{nA_Vn9or~P!DN78B-CLt>N5M95oGkj-}i;xa&dC6nhf zpFJK@Fg{OMBte%IVAU4^qTp$7`Pe?&LZU@eBDSx_bd6Du-6Lhw*(npC+f9G!< zOvGOKfreHc5RE>Pk5ztMyz6)I&O^jGfS3=Q-D{VqUUZVkcl#YoB~nmC{;{j99rW*- zNb7CaS|}#45+XmC$p=&B(e6}i50F6#y}xg#IpGU=sgy;?uYZM$SISTALv?(m&6)cEDtk zHaI9-iq^Ygc_tiv3n*s12G$$IqS%_E z9e!}&altg5pj%NLa3=S_hq}oOFqMgV27%sL54of4egWYloA5!uk#gZq>91J-!?`JE zl+2>Fc3uOcY0#;@WcC{%_ht=W@F*156zG*xvLpA7oT3v4RXw;hkcPe}3_ZEjR4Yd` zw|;DQ;c>&w$HwgsCNuO7Q&E@vc3)!<*Xl?%9ybYuG`|P^k){p<*(T z`m9}J@d=Sn)JHp__Yd zbAv=0Ir)F(D&)Mg_Vuey`nN%zaM|_{#X49D2$IOgaxV5^C z*iVl5V@d3xr_E4_3sjLzo*0d$wduI}Ntb)O&PzCNq+oiePu(VpJq+ruaR{a3Ek$=b zkJ*cYc`Yks;utS`mL@t2{gZKC@CBJOLRJs;5V9yDwvv?Cyeo*d`>!ARk^nczt}*hx z6PYYx+l5&>MC_aG@ZUz9<`OLCZ~1!Xtnhn_CkphFy9XE|)wJ+W5BGl1WA-iLjwy|Qt|@6Juz%h5YM1Pe!p{Jd~E%vVpqcGFk^paf@Ez0Nim3(*PsyTcin5bB*CZswNP#0@?{ z0EkQqI;&@Itk8%Xls5taakYUEnMAzB%YAn8>Hd0!Gn{>1p*Jj}C4*bhXDC%#sp7se z%=J;syO`G%1>ju)p?yO(ug*#YvNt0~Tk))ePP%-sL{1!EID_?exga_j8$;mSKq|Epn2);{1spC#0x6J~?X(n+hwPlw6 z?v0FKmj8O1Ok{H*Fbg_@boq}zDd711g+Gr&pU7U_BXd@WE>c0kA#&AW$t;o>xd>#j zF{ymad-ZoFiBGcgA6ASGEprKM26Bs`sBme?ojvV(*>#;#sOlrx2e9ZANMF{!hn4E|H7AR|io&ot|s3ixkTh>@LwUg#uhR z=Z^oc-Xi(WFF5#xa!@C`dLs7at}V)M<4K~XDoQH575y>y+4#o{!XUN#elEV4$`7F% zd@Y>Ap(R8nO2OmkYwlYn3wOISGxo=Umnr?XR`cFS>R-M#3Oa*hi%Ngec7NSP+CO9D z%}BB`^?G&`WkDw8eLjGHuSBeQ8_f*#JBw2Ijy5v*ifXBY_e&lF1)n}@_7Dwt>Ka)w_D|Kjh}DNqKO_ZyaO1Up2evhsKhe70i}6*{!ft%{aK# zy9jk#QM{rJy>s`8yS4h1_o~4IKgk)h4(6(3{hEPCRLXCQUockPD)v7aHcY?@-~v=A zs{8^?$2=)ztp7=n`CnZPG!E%F@!zO&|8`G-%6|wl724&1DZ(u1G}P6opE1_e@rQ8U z|0}A5x*FyEC03c+ox1P;S84cfR5^UlzW%~nsH-trLvKs^PgGg&Iry*AuHjnB)_T=up_^$U+{p>4_ zX|m!GeV_ZGvt2TSrz&8I1|g1}9VZ>O+%%0`obdJUNWS@6?rkINBwD1E>DK4eeas3uAV(R>DHG&$}c&b z{vxiwh>TUlkl1uqqJqGmy`jtb^$jj52og3@mu5{{jGJ_C9ewt6-83C-7wK@}lTW}E-3}JeS#a^|lOIW20dVZ; zr>`y}Ek!K&%7=^Gmj9|E*FI5rL|*@?7SK+urN{L4TR(vD5W0)f%K*`m6>6Kp9t_4XC(z8Qk8l z=3mA{S$hyOdx#UYT8c8$I{Stt2k!ys^$S%G55)_XI?*t1OXYFN23;z6arULSqEGq; z?ZZTXoTeB|<#yH2NuqE)ULp#Np!3dxdO1 z5CvHxc-uG+KRu6iejB5Ha*kONM6Zt8%X&L0eCyeCReHV{>q0M)0~<%-z^T zxkWA6S4z=BHY#o^9hH#c$kb*c?8=Bz$B{TRcNtdJ-G|Vl#L}Ctf={ zmf085dP>9@V~TW!Tl^xJwc77`S$+||n-WbO$lbZ!)z@R;;#AL!;Axv`|MveVCb<&P zMU?3Cs#LI)hu&#l1dHPi$r}6R9{+tff-EEWJ+rs1eA!quj@yF9k$^8=KK_xKeE^2T zBGX$)_DoZmIE_PcBf@hdchAecqk_8f(V~|rZK`E@N5+K8V&Rx*P@UcrYm9`$a!Duy zIzpFmQMAa^tuQ6TCJJjpN+4l3H#44^%D0QG_HHwpG}!?4vsfYtlroZq07kIqScsN0 zArE>qIsdW<1M<@U=Enb!%q%EX&6at`*<_&26S+)BDp41}h#84p{=!xn@qRFA>m7tV z5Bh6zhY`K0Koy zCGbg!;~i=l=l}so>@vO6M;TTW6OwdXV_wDiK+u+%DVSXj!rjbInj*|E20V)+0(3tuM)!*sh>0IX(Vonm(- zah8kPsydlssO5(}f?LIg6C0<`CcPQ1R&Fqj zV7&LV8^5Zmsf@Kwa5)na3m%weiTC89_2-O<-uVcH(eik`vpr7Fu1(4JGtfG7zJz1< z>$W~k-E92j)I_vRE9qK!On~D|i=O(2B%|c7+h35bs#1(`$GyD{zwNYo@HG9heX58Y zJR@ne5FyvYLGN2AN<+*2>WVmWRb++#3i4$%?V3Dgym;jrBAh1{r`w+J*n(%ooy5@^ zO*{)sXYM#n;*OkmCEl>OTKY(IqjOLUxhs0@btu>L?y_LVwV#U)|4hR~ju@Q6V`!P$7xah0?Aj@%h$nDxoB73s8oRPWvK~^UROKa46Uz(ZZmK&oGY96n> z0O6;EzlJeC+Nbl@TutuX4lEb%YC*eXC{pu!w6~f{w#u9DyH_))z3cn-8(gGDf_^7o z8N9gEZMS7`N`A$DyFRz&vrpKcHxp0q?KKjcbLgBt+hnx-2>LVnP1q-vq^@;i*6{1Y z*71Fq`?1<-y`u)(rsO|TK*L^R$iXOI6Koi-KUXvqcQRVcgHvf+UU|{9=1ryarSqj8 z@=k$UeTJ&%W8@+;>{JF3a)TSZ4t9ZYYJ^fqYuIz0ef)lgS64A1)U(}IJ<{{aVVQc~ z4{b@^cfMgU&HDo9+L`IigzM+d%dFU2urq8#1Dnq)ElINv-0l~8(%~f5*stedMn9%q zmT||~#ea1A0$t_4zpD~!-(5OMh#Y~A7e`4*O=wOj-}M<^HWlx>|on& zAK!qRPv+R2b240Z{IT5gTK)W3r60mXf6QuE$lH5ovEg>HHwzU%cEuaXwx?G~$; zP)Ph8*Fs!O+RuNgmhx(SV=ClWI&=@o`ZGZP;g=W6CD z67Ci|O^WvJiaTmA`iQHQy#MN6tC*&stDS*N>5nJ|7eBz0hK~S`X>n^h&NV3iZJ86y>t>VkNra!ElxzL=)JAPJLRkj%tc%fv5!QYArreWX9hV01`|_y)xh+* zR|FU3QF`^G)GCm>Afrq%p#m=T_zn2DYDj9Q#aSJS=~7aeH~bTrgQ&v-MF5wbA? zbZ|35Mp#Poc4O?&Ulj6Rfyixj#;MMz4i^VmKWA&x^PTRd5OKzqo<1D}CCK=5xyk$I z?N&IM_z^q9`HX`a#E*zAwz|km;W?6KSLi`?3;mc*e*LV;AU|6C-vD zTmq4fjh)Yo^U7xCWM2$nEc0w32ca((qI6#oPllE3;OOcnl2dn)8D)_(XuV{?9*?G7c9SZtiQx>^i><`xZ zdXej|3<-4fDa)XFPeLR+*!LHhxT#q6LSYd9T*4AzkW32UV{Ar%0=mc2ul)EMMel~8 zVi%S{f>snccxyf5*S>+2+krwWKW9P+CCneV(adzZioJ6@uV|Vky3E554T2iuIgYRI zn6BJ0YmyEnVIq0xKmiKKrc2=NoLKPdp*IS*YM@jcOcOcribE&-*)a=3;&efvUqiw?s;nI!=M4hBGg%s@O_oGm$ig^dv55WXjk{@Q)1#752#J z6b-egisE;>T{f$#QNH=F5tHT@ha+ZFk%uT%>ley|Q$`MCC6t`gjWHB*$9bVX)Yd=M zBuMuhW1%ggbdsEcx8gGyXon3h+Nop{%i^N0wx_0Kox*<7jljBrjJlIo{}p8Rlj#?O zFAc}r4U&ksQW2L`tlk|EvIjWab2GdtPP0yKwj`OJxo3Xh)bTFxUsERg(vh1Uj65ek zhj8^2nl3=ES%cJl$1jaOF!qRXxmf2ar(kjM7+)K>W8;8JiV#br5~qbX#!GF9R1qs{QTXK38797vhe>3E$IGAGI%68&U%kuyy5JL6 zr2|jP$>a;2A|Kp;)nM0^o3KpAJC&SS4HG{xv@ic*Anf9~!@xsD+h&y5)2fWyK0Iu< zFrCOuo`Y1UajgRLynL=f8131%CG^Sm$4+6Ox1={h88zSi1cBsLiDBYsxE@}i` z+p5v)q&W)Qfqb4q^v^S-K3jA(!sNm*;M{TIN0*f=A4Q(r``Pxo!eU!x2G~T+vj}gs z&?G5&vE~GX`((@|vL>np4RO#CX~(%%Xd2`fN>FgKyL;O*L--}*I0 zV|nf6H^)~wey@hYU4J*f5*WPtyY}jhJb!h9j(n-IeH8d8cnUu0ZCSm6eXBh8rtce} zf6btO#Td`JlD|&qld0+-$N-aRIK}mTa;obVCnZJ6z#Z!Y6otV(hJ)H0PJ=oTgSrKS z`cDQ8-VPeA4^kC|Od!dl(~w2PkX6CZp(jH&Z-H##KmyL)oH{%V#Kpxmv+>Q9r}c0H@Kwh|%DJ(a_%*}vET{4ChT-8=@tSP zHzD}u?c0~ccMB@-?B4o%)6T)~y`6YD@%~7c@W`-^YSR}}FOK{B#RxEr3ZAtvccWjV zIhoQQqW&7*^KkEs=f3NG3tpk2o!D22!WXA5nP$b^xDnzFPkRqZL}&Hi*k3ZWCAlwZ zpI51N(OW@UbK0p{SbS!`PT=jl%=kU$py(42`5|QuUeV(}RBidkbXx%XN)T4OQ zzIVSOrrYlg9Tssny)$+E@4J&}Lm4?rqbA45#GU)zTa(?54|qoP9c69koZ4>$9hO4-z-H-F z&6oZTnqHryy4g77TODX_iHjV%{?0Y&-TNCy0~AN+E!OAPZ$EIj8E>~bO7J{tM>DD{+TQxXle`)(Um?t?cy6}eRZbz%NlvKMGo zsdfi-Trl@|_H7P2CQ@-|vHirz*%K+|3tP@`<@4*~|Gxio2;JUr?QTjqGmIyr*GVK@eo4#12o~4@b{nheLn<-KElB?=y3}|4;B-@ zqAPJrI?d-ciCSJ5L0_Vo%WVj+|MRt@{k`JQF{eBb(qcBe(;q%oTdx!KUH8`aBkrhc zj809_yU+o55Jxte{neVdtmxXsYI<+AE`QVdZ3G8(f`no&2D+zB9S;G&E1aua0t42D zWcGM2#4Hwm^gjJ4r&`|}63_Pj=ma(m%e5MPIbVI4SzUCc;$ zKc5drJO{@bijTd^eP*;jeAsuRLBUeE-3W&yAf0~~CX@38wNp$3?(pu}YKQx8^ z86f}PW}*Kden5j-cha!`D^~rla}pEIW)`)@5k*z0Y$k`aJ7i89K7F-cA?APfY5zs& z^yH|*QlE)F1yf&jsQohWXz!E2OAG<*G@MBIW&Hc$zah2gMahKgoYVLG!*IC< zzgl3T(h~oM)ZY(bIAFBm5m}Sd-Q}pEMQmsNr%~?ygJ;7ZuttWm?uw|>&l7Ks@nZ0o zLvwtVrq0!VDAmjWu1-R+>OsB7OT!DpR}^&tXGHG2pzYX3XQaa8;Yv}(u2}NZCx6Dy zPW9xxzv6P`4Mq`zVF1EE8^9L#vmpo{NdO5FNP=iW#c<}Shm*5o-Y7Ros+lB&JdQh* zVM(`gIHD>RubN$cEk4B!<#PUTz~-RWyKI`dFwYN7El%l5R_$Lsd)cRyx32F`!Xp~q>=-J#&s5{znXF+mrgL8U`{`b`(puVw5aD zvcS@^Eg`4Lx$^!5!{uCmqW(&5%%}T&7t@!!Z{3H*DTE4GmbF$*#@OAx*~y!6P#AVZdr130>fS8-j0A<1B+Da)3+ieK zQgOH8N3@sA&!=h2pP$mw`8aS=w7uWJl&CY874}m5PHslL0_6>>Q<>P#XE3BkOKjy* z8%>D%3)D=%M@lh{U-rx-gs160h?C#BGermZ-n;KTAz!;k^Ezhd7_|~gVGB*~4N^s& zMD>)9nZDXnIIXPNp#8>spZ9WtLMnf^ZymvKl)2&f@}K}Ia@dK<$YP1DqGJ%bWNt5-qAY4c;YQHBS{H<}}UHiX%)=BhnBdcP%n z_7mYg&rbA0fQZ~IlY{()7Qe9$mxGzccP__28T}<{SC0_vUx zU6kgfEs(eGEgs9a#6Ot#xm@_^45=l4hnHi_Wxi1G%||lTjov272;Q~$C_v2kp9_p!#RH`}cg^vJ+RPg-+dtN8+oe0@nR&dR zSdI$gUtVgGPkg||mqQ2s(W*qt=X3wYMt>#{b z4tAet_Z$o!&_?U6YMhLhjS-`8jE`JudWlZh9%#7YrR+%_Dw4Ih*(a^0Vb!APji6(NDn4(2W*l z2x1guS@_zb^Nn}R z%{_Yajy4(9qIfeNOQnE~HiRD@^<=bOy6bj1?5+4&)?CX?A_l2J{n3bvQ%}jxNcBcmN9z(?Q9dK#sJf}bwwKo*Io_=9+xIC_HpUE*%Rq^! zgy<&ZB6pK@wIRYESAJ^Svo5hWrp2Zkn==J%n@2d9-BEhF@qT7?S+$4Uy(lk#@09ws zrz!cUYP49QeVaVS-(!+|`$SdobFJ9Z!`fwgWT$CyJB(NfRQ@C}weaW`!f}i#Q(5%s zg4y;!XM&C2qVzL9YUfrK=3t|<2#YhWX!2RnY}2%el9-i}v?1xBu*uk)-j>mCYEt>mmr=pQI8D~cg*s@Cb z8d~FwKSK`+YvME2%#>A=pSIPl91%v#tqCy48e66MgmDMfCQ0dy?c0tnC7oIOApNSb zV}IXLTG-kYS@cnt<obh%h=ea`Aw(?Cw&a_NEfd0KkYP}$LyiZkmA_OF^o8v0hMZ`~CQoQsk8 z!@yWxpCl#lW9nE6u_SFC09S^(YGk0+$`wF!shC|RART->t@ICs7d`s}oxdIg3u==a zVQywIcyN?D6^R^Rp)r#okw5>uV7&#n$?GK7{f}=6fkmSUmRJN3D<|N9zMop~q4q;E z1mjw>+_T?rivAE7pIdFqR2=gMY%PB9;`Qm`lB{utsA{?xANYb=YA`LPlt>yuh;JfM3W2K6dGTxsgWl!$Do6pzFWW0 z%jc$9vmvpgK~Fo;5#*@l29z}ed5DXo^Mlqpqf|OW;9X(GIT1l@%$uJfQFM&A5D7tp zp-PFEG>f@EYSAt*h=NIEU``21TC3V!JLT zo(sO;hVo})#B{-LK@cw6!2{|dmO%zTq%|3JCq9zAKUQNbC<1aLQc*Mp3N6iQLwjEV z)Wf(jK|6t#aQYPgV&g>I{H-9R&JPaAh?70}igE9LEu2SUlGtYIvix-(%^}CtWeJ zy58w@e1#C#)tLH#i{sL9WegmX8m1_g8sHiESBmxS8V0H|CNpp~LR=*+y^4gdVx(7+ z@DCWcWEz;jRre-i#sg!wmisQuB}Os97Y{Kd+|()x{t1=PLL#)12<;U7cYAyZ2jkCC z59WdovoTi~Y?+@?xL=?*8&d+{S1<$~J)@15u{SZJmXfZpA>*+3!We3>=I0XYub@R=;OYfI--YvK*HZ-V~M3z_qH5A4kvA;y&-9>7Dpatot=UR?AlXoX@LE@;oe?Xo7k_sVaj5&r6B zHZXBD473|R+J}mE0#fnYNnd1q^S3b!a#^5yats5_;ilG832t85PZ=fqL$fda!j*6W z!)O?kb+My;s9a95D>){)13exHnp>x9cW3dKCHwQT@?NA~2QV+UN&7G{CclFoT6!Ox z5AwpGj?tNQR*9N7@@hHRI-w;zKK=aWZ4IU%K^;2A0^d8XKh zg7GD%SMqU@GIw-0ewY5zE%W7pOez?~z_@d_hx2n28K}Vd#6%8;Lr?i0jBlXe>$ydx zT--f6KADLD=VFh|#c#`lu0@D>QZfXfPA64#Cg8YSoO}|llmp!Y*A53q!t9C>{a_;F zUivr~3gnltQ=WyUln61|U9mUHSlcTq5n2rAg+!tCRd*65orb;6#onZ0lNjMwaNgl_ zLH>-}$I2^XVWlED#h+h9)oj0Y2gGQWm%02dYt~@=#fgJm@e+0g5py+>+qs4IHSb>6 z;Cl*>bX9%8)y8bE_2+nh!-~tDsWt76awVaTlWSl7j&o&)H~_cQA6HsAhxzmFN^~Nl zH<~JADp=nd?g|=^4hePNWNUcYm>2G0-c*#bdif(=Z&>Hu{;j??d?0!215n+Ys}I@& zkv6nak)EjV+&F(B(t?Ymu`8+8=$EqI$9DLZdTvkYh%NXThr>kL0rzFP&~{YR%(kfJ z8xM?-9r@bliFf=nKUH3ps) zB9zFR{}f5;PD)~EFeN@BL>SuQ;j=rwaQY%fhYiCqCD$)1+KWN2&M;{*6523()1U3K zyp~7>O`zp7MQk68Hzx-3*8(7xaq9`6-9eim#JU6k!;*tu7`NCjNs_oq5<&zn-!FOd z)oXQd6jwn)`Cr^sx)|Vp9I!Q(oL`dMNCtK=ASVQRM-bDc5mclBJL?qu8WfBnqB&Q= zR#VK_TUnWlMm+HI=U~o~+b~o~6$2Rr8vq${Cmext-`09%K!bd2d&7Tpj}Y>l1Dt3E zAjx8QK0=k!f}ymb$uK=$Cq%H5rnem5NC6NeN0z`zUb)M)AZzbpE7q`3*tU;1Ry6jj#?79Pb z+br6I0{qSZAaXlbv**61=ze++!0frf>3L1-zG)zbgLXm;%ole*A2tjr1l%+?_Zaj% z$d~L-0Uk{lzTx-GK!!!W*w#+84!gBWb2E(kA4nV(^DiWh^c=|m#F+?3D#nq7RpfMy zDML##b+sG}nIg3suI%D0gCI))97%C8U|y^*+xL0B@@8mpuWjn*O>bG!l7TnNMu_zp z8vvV(AUtbz&F;3@Ho2_qcm<@iAw4u-~;$A1)4ir%H-Z#1wg+c8`4_gD+%Q z^r{-Z^kfvnX+!W}XVmUvL6Sj4^)W(rc$GSsW!~OMD`e4&yq#u6sGY z&l#p-2PQv)!8uAvZ{LiuI_GO)Zx7#=wFDn$!=6`-bP;*k8GxMN;`x`p0o+`*4SMg8 z)7!Dhb`7rg-oR$bQL*RV6U{r{)z>PJ$jHZA$Bqe+bB4Zldfv)5K4Co73+-6U*#ZYC8+Mcd{&*k1;se;K;@F)eYz=Un4NkF6k zsDY>^KnBy*;|73?*87NRzr9q%PWBfYa|DGC7aAdU3gEk_h+QPxA3k5<%3lSx{(C8i zpI^Rc^Wa}Z5E~-Go~*A#&Q~JutD4mR3=v(STLuY<7ydOV7PJ%@m$dMrL87=fshpMU z6yn(E36#Q;(e9c$V;GhH|4fR~u>U)gBGiK&WAqgm`QO%bF8{SY+SulGz=RIB$rfAW^<5f=xGszUuMvwd!F!-glenVn55g(myl z$-2#2Cdapb{7hxT6_k1<)L%JX=*f0A%?a9l+IxUo>;Gf^)8t_J$&#mVw`5nAYc)CI zlJ8M(9VyiQ;5Ts@m0p9~;qNw%?uMa_BK^hzWn2nXp3x(#reFw98qjzIDNyekTIPK zYrT&lRg82h_^TGbH`)A8t`$Q1su_DvPx)TRzE|r{+{c$Uwcc{pZ0`3$PJsuY$`a~;Uxnt=2K!R$knW!hKb=k&nfNz?n#ab%@Q|#R}Y?aPvkMQKl z1xp=r$p$VnDv^~e6*CWE|BF3Z$~UGNi_@8+2ZNhpUh0iHme_2wyPz@05v!d!9)F$b zcWCE>@_{)GjzL|QqUd~of>wu`jD+5+tnYnBORIa+4h-d=7tvc0w07CYK3rI^e7v&J z->LF2zZIuh(F{+nPAhc5EBWXSVbsRh5@n?n7%(PNu!+8dPuX1{ieIIrrEFib}+>@eiy$hQB%J^KeM+i_{WsUfjPd)7QGkrjQEF0 z(brS*?kUCH+oyjFBvVLu(_YP5-U+;V67n%$@kRdNbo1O0g1GGZ3H|a(%o9C3%8`f^N*i+UJEa}Den?Yl_lXXZ+@X^@yR?p+5!pWxFi70LXmj$F z_U>PgT?kt3i!0&M8sEB#YnR1)#|>{q=xB8 z;b^SQA(g$2!j;K^Q}dvD4cA47a^SmxO}hbNu&1%4F$3cYE}4jW1ZF0r?uxt5T%CWl zWZb!>O#5g`U&b*bz4kW1w70P=Ac3@{BV18GaYfocVZK}{Dt94$NW5n!k9YLIE3wgC zy_z*c-@25WWCBvYKD%j`zURD*&)TY&+PBWF``@1kS>z8s{jRzB@-?e;%+9umpU)%G zF@z79Uk{opT+1+7Wh5M0ys){Ip}6cmYP2xyBH@wSro5|Fk3~bsU8#R_-$*J|#A535 z+*of$IZRx3?Q1M1`RiE_m>$R z$IIP1FtF#fo5?2qTC#-6p&z7c38wAFA+1PYInc&zvQrf2P>TDq}hE&b}|M~zh3A6rr?=*JKpCUI{`w}a+Z zmAi(F>%y0(uH7yML(*Ic2R>+}b8Nffc(!Q6xfb`zT$EO<^MP*Ilt*i$x#n4|q=SOV zY)s4ZeGkR;FX*bs1h!@#oNyNLr|sCEGM|>zSn?1I^<&S6IertYlq)W zI@CLv{aGKQksa_X9)XYM!-}Kn~fO_>4f3hSpp-m%j7s)!y1amK}OslIhBbvg&=nErMrcaq@rX$n;ikqH9hi^uH!8zF6dj!cMVvf7)rkJ?MJdwZ7wa`Q{^t3+=U1^ zRb!2-ln)Yi&(EFKw--GcELrLDS>vB407f&bNW6qTw0-mAvE#PUq8hC2rVwA})%?C4 zqX}_Z6T(q0S|`gD6)Uw2#OdTJi(I#|y`UtPV3svW^cgc%mX4Na-ng(*#~O?!>;D=5 z#bpt87}&U+odOT^wupw8?+EcTAss-+agPuUnT4aSKM5Swy(J zCthC=_BNznO0H)PN&ZbrST{=8cOuutVTOUy(EYZ}bY=wnJWo!3V@vd;vl4AFUmSqS$tw`V)4ScHJl2`M8 z@b>2KPzP?@?`OvBX0z{%HEWF}TWBoVB10;n8at7QWQoQ)_QsN35!pgxNkTPJ*)>FI zA4^EvD2g`c)AxI>=Q-y(=UnIfaIWVcm>-zsbHDHR{dzgDvfecpj!4U;qzFh>y&}Kd zB<&aP6vaP*@7DF=7gy6Uf3QCljb`1(aZvdiKBy;uy>_nC3of<13~08pw0}&0AehUc zUlups{h~1`WF(|$mg2r={tvGfe*fBQ*fUb9YO;(tB;9UhKH>Zu+F_L!mkjjSn8UGW zQ^p)Vo%ASYSbC*U@RJB*wmcz?YbpDc%}B1e4awjPbYG~(s=tNHl)@bY_4$NCD12fG z@n@)S6;Gj#{dzltA2~SkhhfPY`4-rAilNR=?cCeluNd1sAp1Q`AYXj()ju5EUjVU-lZdNSeYi8* z8K1LXQQ}?|Tua)^TR*)q^Viak;dcdRF7aK%6~0fQWUpl-3~M06WQ3Njc}9&0Y&pU9 zfz($Z-GJLKXklQjGXg1NeYh`Dz=SCp_Nuqr$FHWef!ryl&HKeL@U(tFOXQ17@yN&+ zKRQM1E8WwwqF=y0VDQ*Pb#4+zLvoPB&;W(5m&?uXA*vmu?v}nqV4EQ$yx03B>A476 zDT%Q;5D1q~w=9(6rE2%_kT;_BZOz9I(atZdQBI{v-^U@k?z`nzcP>odl|?n~GL@vG zM=BB@p)&SwD^@%K4zMpvciu2{KqUEv=uv_m5mC~D0w^eS@8*Em-T7NAn6gs;<edc0rh$EoUk1YSJ%cw0JY3VQTl^#kAXY89~oS$WAJOy?bqSDhvXNpk16d4z??~K1x zd%Ka^vp_qhS4l~-uyE#08&Nxpxc4?fNvi32ZJEB>SrOdyBh2Ie`g?_^6yJuQnpY0U z>Cc$%GQw4%?nW~%3kcL366?CXE5(wA+(f5UH279B4n51=w*JRezVMBxG?f!)PCS}; z@oIGLzP#L3TX|n@E>t3n1|a)*S@=W|R3a1SEdA3~h%n6jtt$L?GuoHq%xA<@b`E{m z9*;)nA?5S<&808bGhMqc3fbiuZC>=?%1Ed&mHSbTz)~Yo;KLE=$!*HTx8iw+qVqo# z=kEZap`lx$`xc_RO}=Y~m9biq zQe%Nr=|yBX?*3fty?{$cj0)Y4p7jS0w<*XdFp>;IWZpt%b7IOm3*UKN^j9k~*eXB> z7|!lMEh7ruhR>faJrhkt#nDm6MO+g&s0#ww! zL(pfgEPcNc;wCRP>1mhX`!NRzr+al5+r z(N5)x*IZAcGOA7qT{@B?QNP_$a&t}Q{I-3DGqXaxzdi)jpe*T{Modi`xti{7BAb|; z^&-v7O{$zPgKXazMWGZM*Z$~e+!0wD0l>ZOn8$22cIZ)-4kxpThEnZMfd+4;uY zi{@Tl;;)e#?=j7ff`V@LSi<;Qi_1{&mUi)vvQ8}A;FCa|%Ya91ow_4dUS5Ml8Mh%c zS`{X*OTJ`Hm*vUwBac%N(ZtHr+h7*VjhH`#`sU91UWPgaAk3b(*AKLjgOC{<)P|UA zuyHYiT3pX)-y4@6T7@34?R=!D5k)Gr9YuoKp)(uNmVv6|Ak)LT;z+0fsk6>Dp`Op- zG5->j{cgXk+($i!uEFhCFgqf=O#*eASe~?}J4LGe_*uApEc`SJahBiy+$f^n9loD^ z%b6RO2y~w&VbBlwA?bm0Eo5m(`w>9*=G}PyODjD%|H;US>Ll z%qF8VIC10rq5<1dck|zae~i*;e*35U1@}Y>ho-|5IVfM{yKm#b6;S7Cw)(Qio%+$6 z`o+i`E_g|g(oGYDVNn6Je_Yg55-Ne4y7kLy)Aado*k> ziTE)Hb;3M>fVCHBP6VGC%9#A`0K*NEbi5*Ns!CIM7P!qrCx$h8hu@DWyk9s86!H={ z9Kb?P;aQcKz0WawOGYmVle{`GUW;Ueg@C8bOJv+cn3xz$T3j!Caw^8{_55Eo8RMH>4|xhiAB$e zrSlUj6%(s{6Vl$uc&24ExTj+wjT4at#mIx)%dvE%82}Zk0x)y{YRW-@h{e}{4CN9B z=7_BEx^Ghc<+toeI8x+1DufF^PD1Z|cLfL1Bk2$@c|q|2xHc*+5cw@@j96)jBTtIt zpd!g$J2w#Lwn54y4}jo*X_0u8f82k>np7a@&eM}J0GbF8h+yCcyZjP~_%S1J=h^;KlV7A^0!&bn@;vR% zQ#6$W3Xw4sh&fyOg~oHQ@n_~Yk;hoz%65vtd7jn+%8FlN!T>%pput6gcH*J)JrT?o z5nOmS*H?}N@X-Mz56qeXBxN==2l%Jdc7mTi@0nUrpZ$>}yvEOm-VqZ1Nl!9VSH23M1Pry;G=08g&g>&%K zKe6U-t|lnfgz#P^I|2t~03iTBjVp<$bdBX zM%v;ewMv_V_`CQOgS&ts8y>_&oS{Qu)R*J&FY$D~6!M2W3KRyWd(00#mp{Nk!zM=| zQFbz$*&!G90si_aeEdzwDbAtzmskG)SF~P^$bdPj3p{*(24hs?81hRVN`wax>2pIn zKP`sNB8h-7fJF5py4H~|@K22=0SgM?6uP)@{PU(6=cp{?0QV%k|8x3}&tJ|XM)IJ< zs@b{L*IRdaAgGUz3Mdknw%>fdEv%Z3qSDZei!^Gm*KnpUidvp1VGQFew4@9jG(= z;5~#drTB6%WaovC9#_5}`m-oPzX$7=RjtnK`Y7N*`Js95x%`I2#EE4T#!^(Mi$v+jhf$vs3Kp#6-# zvDvVf6M77=pqf)jj6UqINV?*=3*T>@m|4Lq^*sd+F-tn6p*P~cL+P(4TYi+khS14D z4o&H7X;|(y9wM>x&n+k5?&mKCgTGrpA|hCjxYpO)i$7tk_p|xLr7t7@wROI|@#^81 zWpJSg%+>W%l5n4~CT`R@i!_{!S&F#xyj7-%*6{}=0t*#7GFS?c1SCOWNMeOmsmnY2 zB9tCm+qZtM!YqC%@ImRpNrCI^oNQNSTCT043$V{8Lc83v0zIUF3tbx?PE9nv943I}_Tr z3o)9=4O%Iv6fp_Rq)^a`PMF=UmAi*Smc}c;nJ5i2ndp1wjKj=h{Sdg&ad9t;chRfK z7jG&Z{p`4NoDo8J`@>{M~Deg|jmnGwdU-+=hzpv09 z5ajiU2YQj6d)^!51Bz%qImtC>o+WneK7U5Y8S*}nQlFGO%VyM7K7I^6Q>oBep$g#B zM46{Q!CR?b7hx!UHM%q8k`dYMlw=&j+bYjY^y^o4^Ae`RY417p1*YX!o_q>4(2Binwhs~r)bb&I zRwtUURu-}s|31(=*uvvMlEK82=DZK5O7Gt(fjPGkwhL1Z?dE;{ zdhwu3sXI)TNKsCOiG!kfgwEnpMRCFba{aaVo`z&3G>GaIRQjmrPGkMXZNDUiq0i5o z2NQO_OG8Vk((@7&drvK0&HXvvpRzN31%c;+donHEIQP~gF6p{$KUAb|v}}JjUSM`f zRfP?HII|f(?8T4zU#lAh_q~80q#yk4-Ug&lApmMZ%IZqCks_m2-Z+SO&8Q+hS8}IA zFQQ6%$2snDUrtp5N#@K{H8qr@Co{It)K)6Sr0(=*o@pI_{b z;D4?EAT8(kotw|<9F+oQ#T706@j1sQ`fh()mTTM1gn!L0b{%ES92Rqw*?rb>vT?h- zyQoYzVp7(rFKt5FsP6*Q{E8EZplE55nx0F^joJ>~rYq76H!31rEtg&S4$$YK6-zI) zZ3Im0$S(|kn=0<#FlVz=@-0vK`LSe2(|50g9-IAIQ6%iPzy*w({2kMMtUtYZ#KRJ6 zq?;iY8uG6zom6*#Zz^8T`SfgwIAQNvvs+EhtXs#$z1k-&bE0fZ4-$4|tP3oHUFoeU zD_KW9U%r_2a3#~(W{`xjw!Yw-gP&AX-g`Rq-~rD+GvFe_8=<~ZMXG)RT*2*TjZuPC z-aGAeJvlIujNfOJyZ6_F4qTCK+tG{K2}JeP26Kf~X5OXt=Lhtg{gVm1KhAE?a++Gw zEO)rP^E^ndy3vew&N;4hL%m*2!j`WW9=d z$dBOuqCZrJm&t2p<^}e1McowpA28_fgwUd?7ejC+YXv+OVpeWP8sP?FmCx zZP4>mqiv7}Z{$@OUyfYl&_~_Ikn{v&P(Cl$i zw7;ksGpo&TbvdLJm1eG6w)z<04^08!{vmg5&4`%7EP$DF*^jtp34H=RuXEkib#x>^5t)!ZM}}{)g>uUdxg+@H}26A zS^cP8g8c}+&Ca5myfg!uRqALh1ovGNERS*r{;H^!SZ3!6%zEwjY;~r@S}i0`u5cl3 z=3=&_#3WT%wv^^ds#TbRR{KfYuSI9su&8u{RVG47lAVG`QBS2z+e>Zd3Z)y+yafBw zN3k}6mVpj@l8;JYiaoScImB9~+w&|N!$kf_B2s2H9f5pYT}Za}Nm(IV?+|wuJ?XWU z;|X4~8_r?;RDC3-)1||H)(D^cZ6Q`z@<8qlplc|3MuS$8ETW+ru%PFQ)$Rj8#RLWFB@-2*mXIMI?t4-pY@!;o)imHCtknoS{uT#5b1d_W@Vd*Hno!WxQBVlH2$VY zey#5DV|C{C)Y--;xZR<~k!MYRXHIzC(KyaOI(uKX%RM6}$V2Mo7-b#Hf13BfXH?{9 z-s-8^n0@c}Yw;s@70z8N|8pc@FTZ<76uKsgWvL)3fSTT}JAC~5pO5?fMj0CHv-}xc zJ_X}Rg*mhe(`^QzO&4~0f4U-z82-&r5Kr%5e-6sAZ{ z6iDDKg5Z-k20Y<#`hlZ!lIa_CMnTu7;MnpnHT zRXyjLS-4fvP6{GOJ4xWYVM^K$P8T})NdNl7hBre%s*-JD(@&B2REas1RrbCC$-8`q zONobWByHO8XKCmo=aGA1y9GkJp~CbJ&;9GV35;$&8J&*^17WuZzR{q_i=)rMJiwK8 zucdacI(x@<-u%*XCgNhnqO#o8VJA`ThjPzy4vzugAaM(#FzEzO+&63o!}AoHEgC~l1{0z>;7XoG2;-m>=N z57QWducZr)chvC-yq6U>=Xx!e&qNcyH+jS_nGHx5m-}}GlB5q`#GWmj>5A$C+y3^7 z<<9f6IqPS%X7_AD_+!W)&Yko4XsUR1IF_an{D+O&3LK7J6CJM*Y7XA(>G78Q=j`?E z_*BIZNci4|de%o&Q7%NyIn0S!8?iWq6>z^Pew+HE>(`w{E4@;T*j}E~iCC10@Vpo) zU}E=j^9K_>D1VsSh8(NqYgAlsQ}Zb+1@S!Wg1pHxC&*tGHi;@!Jk}7wdvQo$5}cI1 z+-=@|keZ;S1)P^&b-tG@Gr~d?-s+mjwAFrg2mk#@$JvjkqY*DYJQ*{Uwh#<`{6Vyd zn))j<^7Dyv1}D~9f84QTV1InFl;DzGp8hSvh8;_C-bmZ*(JflbttYg9p}^l;9BLT% zJnJKW0ZFFMVF!oaJh$8>+52sG>3-Fwxi3|*kJ{gC1Z=`f1MgK#B8@PvEBD9ZPPjRM zz3Ssb4|y4TLw!#a)axw&(Uf}WNugX3m8>w`-Y`v0-=lQ2)g?OVn(oBmKY|mr8LE6z zuZXX{zl0ECNP~J60y%3MeM1V@;I8b08;1g{AH_w{0z-Mo6N872iFj=O5dX7$p<2L4 z{hCVz{jBZ}I?C4NGyr#DAvS){e>~gugl;2@k_6mhJRc@V+lHJJga3VaggCqF{Xs=F zxA;PlAen|J)1kvn_0m&ZtWiVqs~<-{7kk?c9cg0dWw(4mr)AH|8DGoD9w?~edGz4^$EGiiyFhs;&r z&J;JKZ7QwdMC?K!zTwo-dN*7cK6XL`3?Rz^6CEO&4xC?LDrh4Q`q47QsbCiWvsu!mMiRFU<2N6U->e1?Hi76Alcya280 zVVBe*67>`JDovJc3j?J_cpU|T0XrX>SY9oG`9>t%&441AWmVDg0?bk{AL?x6<7{u0 z@Pz?AYKp&TWni`OWurWt1sD<$puN_cUg@v}{I{4G1ojl^2n8N+`T6DV;UE$k2#Nu9 z1m%2d0Hg{)Pyr&d5(opt?W)AQt9P^&T4Yqq{;VcRUR5x@s^orECHAUn*;VzPtD65| z50fQpbc}2C+-tyUvte0{aZioOiyAXYvuKJj4%B~=0W=d(S0hSo0Z?~{ED^?F860PN z?rjnTE9eC~cH+Rc83zytWFz1clf@qk$dKW&bkymQ zh#SU&3u?rhs{$Ad7%tWNsu6`afEx>WQoVU#RX}67q4Q_s&`E$m{nwJURR^Ht>$~Zo z2T5qgUGQU20gBUl|2gp4zS@4ZRDcA?vJsyOS{3C12vGO=XDjbT>+_83(mzWul&p@j zoS*7~D9(+KtF3$jH@c645epz1*9OqC_-KGW3&F1;i2rp%R^Z0F_l*$V4QOxy$SRkU z62yJDp(@p(^Xo=`DL^xL(N0=xCup?mYIFb`7Eo1bc)i0^ie>t;72sA$W_Bn_wcGsa zJYrOB@v99Av|01BwD$-8uXg49n^!@#D&)WOs-)--|;+%X{s*8;a1EZQaBDjSmU{8{7=$G+~hg`onC6aSf4!Q0*|2k!K2e3Y&d)wOpLX}I0i3V~LQ7R4KZLj_cH<@Uy zI+r4RIB@Pi^JTq2W5DON5OnCo#w+*`5(<0vVJQNluRUdZf%fcIn68W z_5Uy^by#U0p_ooOGbGD+p!sc#k&P9-bcvkbZFX&+11{$x*yU}IV8W+@-?7%%yMHx zI$}NilYErkwhKzwfecBNOA$NJEt5g~lrBOqasF}e*>Om!XzaJuN0t7C&JRV7SIliK zU*T*2By#K{B>n0|-Ls#B>};L4T2H?UPg6`o-A^MiP+u9O^p%}MUuaaBRQbp_Bpa&o zvFbctNg7~(Ln?unYwD`^_y}UsfcXU~b$#~@SIQ(@>TU4}vo|3oZ7ITx^DQgV&<*+R zNVFjtvG@08%{UHXLZGjsX%y8>?cC@PJ~){DCypVzo@w(EV|p#+cuSO%%HH_yq_>fH zE?skMA^B@}BJleNeMOrjv{gR%uh|c2OC(_{xO@Ss{YHA!w7w)csh?r}h(NM2hIM7{ zHt0Si%Ccj&_pkw2$=(an;F@RJ=?3bng?N!uf-e(_8!txbQ#pyfv2Ufjs?z1`2NS-L zs!SqDbbW9KMx?UQTiO?+1Njn>fnM~*9eexcJ_W}!Ux^~oLskwPhDq%eSQrywQEQG> z4XXhJ0SF!{kIS)2M&*Jv_OsT!V%*fiJotsyT4Dx!SreNoavwJgi()=1JZ zy_41rGjy*BCdLl+N~q6v1f5^ae?m8X|25-0GgftDii4F{Qj36Tc+ z)36cOTC*`>f0;Np^mieQ^NJ>)m+I`Kv(f9$Qx?rwgGo#A#to{N}s4Wn9q5qW!_ny(3i$r5$h` z2M+5X{}~Ak6G~cgf}pZHV-YGq^K6!1CLp!Q-Ep7afY;8eGis&12?Y zuWC7Cdw$c=eD&ysvM2Iw8%<{&gFP>#JS$$>Xo~-o=lE7~ zc0lF3HNecX*ZB3EliSr%8Mp z2hI8Qr+-LT1t6Q-MtwrJqb;RaRSZgl)Vok#iOfeHV$WDL_BaTc_PqT0>cfyF+0g;H z#b+q%&3^ovKQYzJ1v16H% zUUGYU>dad;vcg0M;=o$P@&w=yaNYLCw95pIC zE%^EEoBhq^jBO(`FIxlGx6c^t2dvic@9z{J{CbVNU@@JD_cKZsN~aBMh3BCC9DKIg z1nD5C5alHiXio5F#BEUVs}yu9=~Q%pn2G?sO@|H(q{q)k+sP+vkZ~&{{&eIi_ayu@ zIZ-$;5tfc$rQ*LZ37Z{+&(sT307xQ4w<1q9<)2qkrK_bA-?5W+b`!pFLF! z@q2JM=CRlXwLrXPH{myj@SBStrHY}~&`DKiMjoYnT~1LON|F;MSO(%bOyUw31H$gy zZlx?vrHm~ngg{buZQ|Ag1V?BnIz2M9K{ISK;Y@?bJ2vh$CH*xwy4f6^aW%wrHeF@g z3-4uso1&%U+Ux{142e{eyG!X6ZWk^Y&@VM;E>rkDEki0-(yyR0FWKN9adonzGfhh} z7c6!z^7wPuYIUeAHN)7NBPlC%`l1VA6gbsWl0=7U+;fY$Z=3yaNc_@r7P~=nbTedp zNc>e;4#+T^rV^S2vj=Q*W&#EIO9&R}d!tq|7Tfs~h^X(=K$1$poNDenCSi+W1&UdV zoMTq#{NR-?PmvZPda+9T;znTZmhH|JE;^R%vrwA?QDYu4XMRt=xMHhW$N`lqhh8fN zL1XZs7j=<`|3%JU;NsJqV}|LdXf8NWXmmf#lYM_~g(SI7!+)gab#K#M zfvY4YjnwhK{2}SpC{mEb@81|wK-LT%7Bx0U2arzXRwc+97IW&+;3@9ZBajG(POv4U znM<5RXN|J)xeRm+4-rEw0i~m~Tgm~^JG<($+%0hJ06d0v?wZunQ@uTzrrh=r+v+O7N*(mE#APu-Q$21!Q5HVveN(} ziG{3S@=xewv@7HsSHMk&VLO?HpwIP9kwH2K^>GMQ@EqOB=6}S*Qg2~hiur5VmlN5@ zL@q-8V5lC8NCx=ySk-f)|Hpfy{~y^z|Bo+41W153b-KV*HUOquG6-**4V`YOBCd+z(9g>uii3tA1U zG`Z%+&wAP=b^aski%c}$S3(O_qiQgH>a3G|ie4aD|3kz?5yJDPn^uYK_3`WHesa&- z-vXgUB=|z}nSJsjo#$p*lS$nmSzol}C)Ih2;bdZjC*+|h z=8W|tkt0lV_Y)D3&iky(afK*pI>&+#3Mf&)jue*3hQQ9MQXo_dHDa8`cdIl?V1Ku$ ztDZ&p;h>kNo*k8YrX`T-F09W*O5+?XaiLJnu>1J)OOzz2qG%W$BDlz2#+%THkg8*= zuCG0uvyI#n^;{qiMF`fr0Pf*4qR`Z6Mfj|ow42q7b!2>UoL|q zB@OF5nYP3mt=aECb?3Q%yVl;9=U?7%-o}Wsau7mU@`}CF9ofcHCCO3{el8IqpOT)K zlqn}mg5W)g_xCBuYBtS3%2l`a8G10~wJsAq>t&Vijc|{ho&p~lN0d9oGQg%YrDb`BA?0+*&sxY5rZy?F@DdbiP=g` z(T!GYlYjqE)+Tteh3aZcW&BILY$W_p-svC#`J3_Py4bDa%?f3UyQjy4c!(Uq#z_w7DSz5x!)9|2?{C@#Tod%RLsU(Kkz(s^hs_jCv^{Yzd=H` zrG#d>Ij$@@SvGqvGV_|sctZ->u+{>XSa4tDI5}CWu$s6nb-%AoX)c{g&43*mvLM|J zOQ*sJI2=qRifk#SDtvn7p900Z#`k5Ta?CmlUA3wjd#7)k=%JyHf2`${;K$WocKJS+!-UO z0B8aJb1<+Arh`lWqnmik*PqsOpP?%qLagTPp_R(b~TSXcc%$AT$TDN@AJ-#oMuXbqV3e0d3*kDIZIsH(P4zc`AXa;$5|sg z?+nq7{xiXT$0A+~)#)307&-+D{c>k-!y>zmn5qSJ_4FygmyEi1*ZE=;)S)d)2K|?s zaZ<@6;eVF)hdJ(auXgc}+2DuFj<-hL8h!oI{)e(>kn6d@{cpas7WS>YZjISh_U5|+ z?as%uyJF`&9JlI?Fi<@wtQ+cl5SZj#Hbq=b+s&!Q9RLnB>sU~41mp3V!90XIhPwc*U+5|%&`!cdd40{2t z%Hkm@9P=e8`hlYB8s;$9Q~W+DZMp`((J%-VX^Kvf?aE#6$(Gy-ui4qSc8BdM;?f3|4|`+a__#Ey?@g^(oMtz#Cm5xnPI)h=0$FDVxN7YW~4oa zir*D~G&`CQRVkG)Uq0JC_KAgNJ+=@}GU#2=klQ$3byc*StYLFNz0N|{OjM!xH}?bn z?C1|~k%njI`AD5{?!V>)H14Hx8BV`x!goc|FFcS#ow2XryuhEL#cHNG#Wnos6B|16 zP{CF1&as-?EifIk6jBJWfV zoNHkJ1KU(1W7(XrrZCJ!Dk2WJfQTlIrh%6h$T%{%li}|-;5Uor7hqs+a6^N+QOGsa zR@WJUb7w6!F-`0!e_9F>vi}nE^mTy$IvrETOSwdcJFBK5=1!d@qQa?Y|G>0x4&uEf z;_IVuVoETp3;E&b3OPfJr@;Np;WF~+p=r_w=2T)!h<@QJollVqYr-Y@#3MWfJ{O`3N7#6s z5uGOFONF;R%xJA6o=X&pe|j{Lf(+oMCa?^*l$@fthyYq7&6ao%g*?s_d{e8JwSWkv z9U3(x+83a}T7Ep+K9;E$6^3F^a-JCy{pqMeE*iu^53;bI8?eK4{#pt;hv;8D4Xm?G zabY|7s!bU*)VBvj1`!n!hKOV%v+3yD0B)9wS5y9H5*1X#O;YnFnfxWxoM z-}8JrFHVOQ@)U@bU>}^0!uF&5c?eJxHcG)cC*zmN)=QLvCC;T7E9=!NJSf`iC8NP( zpp7GbRz}tw3hsLHMUatxwF-G06kh@;94Yry3HfvtdJw>YUe^saK4$%rPewtkD*-gA z>Sf?7>F@-=-?*^==7k^S029$jf70nP2}&Fr*~kKgtAtTX$vPeXjAJU8f%|PmcvppQ zRouNEhCd^L$|0go5BL={x~Lu^taCBMc|=7i%AbRXqVRvR0Ih2Hbqa3G$XY)Gr>}-v zK&{^t7n8$<$8h|5w~rF1fRYN3z``z#<03&6^_MB*2Llze)XHNsNJgdz@g< zrD+;&flTOb$JxXbwkj44g357nwoBL5Y%A-kNFC6CsjE&*V(~9AbqZn#3mn2L1%i0f zl_MEAD`UcMy0-Jdyxt64$1s03*vuC4bLlDiNG+@ly6V!8ZbjQ0->$JXCit!%oF(P8 z_~6#qS|Qt+IFT4UGZz2zeR%~5@*(4>94T~ zsZ9mTtcndv?NpMsQBdVqOWayAKO+ormWOcfDmUK3etVA{b0G}U(a~H)CB1TmT`)<; zePH6I%5dXVwJw%8Y4sLRZ8%4})Yf>#c zr`^XU{59j!v8(u_c7%`g>NQOBC>Jx_P~K}t7&W?baR5Ke1W8_+uafbv3d(%eu|BT& zSv$fHOy$708{6syxpX-9>-FA@Do|p|Xp+Z~(W5-p*z@)}$537x;2?POnm)BUAW zC#Tbq|K?$po9tE1N3>3lxGrg_-S_B_-)o&mRk}_$VMpjU13hjT@5c?&AgB1d4k2z8 zNZ_(!Zba?rUZUW^RV#r6KzLo|{LPPzap0LzU{=dJ-tmmyZoU`Yy%IRk|B$C~J0O_F z3%{M6)j7$;4pS?uvbxr};3pB1e!bgywC&(%#V`#!*3;wutLYvG0wDpRQeB-MtVcZl zn@zn(<8Y?KkWT`A@q79T>%B~<*V9b38=TfxgQW8dMg=zK>ng?xm+1FFS;-+07E#d7rd){X}} zMNd}Yzh?DEbYR!1^`VbY-ITJoF9yth4;&fRyv-~MS%MdGbGe!QD!S%28SJ35 zB5njggK3Ft)&11($g97Rw>dkPfZY+vI>JGD6P!ziv6ni@=;IWLPW_P!Y!vx@`R@1S zADV86do+E>#P@Ok=`3T&s8RpX3=%4vg1*hfJ~1+I5yFi~;Xaq}SFn*u0KyGG&K(`C z!DfpnIaN|Hpci1c3acH9eZ=CgrJ=K#sIPjcXcGE(7=mWY_wyt4_eUt0m;7vg3|o8b z@xMGW`L_!W{6BJ@{}Y+u&-*ZPC5$V>oNH{_6!t}m>v}8M;GXR;OlFf@0y&&3Bne4W zG?RX9vgx~XAAg}# z_8?fICqh)~J)%Or9idcW zvza{srLwn&#CFv5>gPz_^qVcZ)UySc^DW={w+ntvTSZ)lccs}QMg&z{eqy)l9B;;2 zSo!0XN8qQPTXDTpM<1sSW*3(d@k-(V7l6Rw$`lG8QaG9p^viMWH&J$)YY)=TNuO~`j+Ry!G1_PtxN1FH z+vs1WgAp6wT_K&c0Nt_38o(Pds{lIeJBZXf_nFOygj?KNtWJM4v*=`HZX_(>N~zwL zb|kjDP&fv=bS*i+q1~Z0$X@6~dF@q2N7GTa`9TM~@7!avzeC#*eCBXTHboVzSzpvw z1ps4+svFiQ8rPWZ?)`Il*WS5ET!YB?ksKH zK_|N+4=MdaLYU6y^I+kzVY^@VmzvFh;(m#03*Ut+R^?p~AamvYVFTX+!iQci|s$3cOyVuHP(P+qK!;g>s z#@Aj)V=QNQbNiIvtVB+6pz>NCVyANlPdpxYY_JY84|ZPY^k7CnryXdcNR}Q2dVcEj z9d_SdjW=)c-^?p@!WOF?);ym0j1Bhebjq>bP)fE5c)NUK+hK0+Xyn!ta~o3?bF@y| zMb}FU)&`_Co5o=EyZZs62si9VmY>ild1zy zAH5pc$LWu1)^K!6pQAvV^8=$YrosfrS?I^mkmbgtDmvl?!O~6NGWS}HYVF`|{YHVa zCCRcrN9TOXRQZ=CYuHDy~t&`*IRh!BgdaPh) zH#SwfRM_Y{;KDxGJOG(c3#s)(N%68gS6%3@hWz#0*2=Rv0}{g|4mr#2K(Dm!(H6DC z;7&3{iIRxz1D9g^4|4)$KGD-6q8349HAyP?#L>3a98>Rix35VCko+%I9q` z6MD1A6{$l&$t4dHW8=nD_)9TJmPdke0jBNN+ zDm_p3w_ zD;aQgH@zttwd24YsAD<&NPznWKC)%Bj5A3O)}xDsu=#emu2}3p>1eoG1(%+n5HYgf zJAcRdI#&UcU!Q;5S+I{`T-T2_+~z`rG^T>fR}wP}nlU%ocQQ^M>D7DYeRk(7LDbF> zI?|-VE9>%;mN4o`qyg+lkDkP5>Q9QD7#9+5Qc%?5=Coj$v4`t z2WaMz8h1ngH{RYesL6-z)4oC~B@lW-4ZVmMdJ!-{fPi#SQ32^7&CrX82_*$7p%(!I z2x13BL~JzaU??h8se*_Kii(xZfA44a-ZIa;v-7_14qw7tWF}w8oaJ{O$3@S%a>STU zuk2e91v?kw55xM4*A~(+ju`*Rzd8Mml{F$=(f)f%Mk<>MHV!Gi-p^N$8j77LFnSrr zVQO3MNw7)7+WupJ*<8cm)Z^3-QdWgL98AMuW>;L>(EPMW29LcvSKQ5-JO|^MF^LD< z3a%g{l~almr8(B+FLbkm@fO_3hP2G>Fn47sUODS(u7f226mwQwjjB8-+??L|fc>dc zv)xwq5sL0qu}bly-L1xPogu-RDBN|Z|IUPe}Ge0 zJT=>@xsf&?h}04$gmZKEU>Z9_7ZIl%dZ@j+ET1e}Tn+EZ1hC}H)@-0`F<+C^J?i~= zxVD@5bEi5t7qMbGEKIIjZ(7mnWlxRRSMj5hyBW$72*!MK$OQn6P$gXa0!0l>2KeoJR0i7+k zK|&aP=;6;oq+?*!r3A>*HFtRW#v9I{xXni(H8^4^$mt9JZ1Mq}0ZUm_9>M^UD*T|I zo|=};Z+KE((5t^2v(BdNZnR@Tr+1Eonx31PZFx=r9XvMbjIg0EiJw+0+B5F(<$rqr zH=h?dts}AqU1yj)7JbL;+OloNQ~S)cnd3zH*Eq)M4yKHvJ_gstzr*6KWFlhNq{uPo zKp@1aL*jEu!Xrx<|Ego7Sg4JbaL_`immt#Qxcb)x3nt~v4vCM`dRN7ZimZ#Xq%SN&C|mFgY!91GcpMv+jXRD6 zz^N=a4bL0oVnKCN9%Axx&x+neCY`6^7>#zv$DGovg-T4ECF8u>PGhdfuUrnYR~ZgJ<6V0e8_ zsv0&R1|@D|iHjor7F4e4;5>K6iTPCVUSsbn)@ogx1mR9o>0|-5rtT8!ep(^tshFuhqQ;(ROkKw^mlSVg6`B#&K1KD!1-mouk^VaG`f{ zf<^_(3h8F|0!3T|;OnqtAlkpdy~}<;eW9I|H7X>;UN)TC%~#@F*lHkucE__x`wa=b8Mj>GGxB z^5!#Hfkrtv9A}7f;^N*rzS}5-TE8&gT__T#cEnIvC9yK`OF45Ae$XwvR}cBlivNKv z<^c||h7te3eK~N2KEtgB94~d=w!|soN6y$@ZRR}*XMxmN0g9Jx7B%vB%$?Un16#e` z0?4OKcmPS1?~Cw+ZMA4fwMZ14`;|4W@p4x+Un(md;Zu`VEggL|SGC>7sKvaVE)qny zJ_M=tTMRsps%DFeaHNZwtLc#H+JnZd>~)+~H1rq`36XaCkM57ra<~l4H%QD&ULrhVf{@{n|vfgQbJ*>_a5XQlp_4Kxn-eHjy)R zU%mM@#Bwj}h1i9$p%=z-QStTXLo6K(>okl-%++PW2O-Wl51QQ-jvbA&mWLHcoc66b zi9538R$F9j=OlOZT&Kov#?@0Cts)zGvfR8y@hZlgC$SL@d_Q!rHys&&t>EYN?DT4@ z)*JR|6&63upCla?>Om;;Ko3ljmMPNDmad>`Ze|tM-2y=`+o{bb8dmev4q`=LZisAf zN9vo)FC1_qB`Pw-bLY$TLrY&mFolSlO1AN; z*Oh7fmEMRHW~vz{W*jMvxYYQj!0zFOE`MnD&!088a=i_XKCTlHe=PKPvFGk?v&@Y~ zU2&_Wy_3yv2>kEwdYyyt;%-P8g!0! zq?o71NX#jYP{$^1ziMzC#UrmQ|Ke(l9You4gcHdtI;HAeIS#bQ9^xBpzlN1pHowsImii7;9#QkR)`yi4M=J#ICiEB zgb~dlQI{4d9$rX=!Zxh^59Cjnr^439->&3yhln=XTJycTn8xldo7hwE+7RBQqNNf=wkL`OrZ4hn}BX4dedY0v$dRKht zA!H#C72biMawe7?5swxk)_Bi`DWBa5JnQKVO~J|(Cc%%g0nG;CilnEGgyNGVrK2lP zW#2N^Tu`#&Q3}VSR7#@88l(1qkCNb}NDm#tR@C!-vCq2k_^)N#;f5IdwU|jDwp{7RR8r9&^W5y4SH%Y=#=i+J1_;at~FNi0k9Zz7863$8FBMaJ- z)Sii5(Kz~B*KAX6H&~dZl62)N`syqAwfjl8z9%&qJk~RL3ch&tX9F%wqYg6W5xU2a zHs3xts9kRe%1xdCCgM?_LqwZanxj2X98AkE9kbN#M^lmT@CC%>fu|DGT5h6lQ{hF%vi zS}VdB-yrP5!G@MW#k@*OaMsmJ&x$>|TNPvw+Y-O~J0}v*Dm)|LZd~puwH0udbw@6^ zS9P-4v({eN?WHelHn-&GBRv<54*LMt@h2lDa_@&nyPM6;+eyyMV3(~YQ}*)jli~E; zTl=Z@h=<{lVZvGcRnCK+#lMBK8rUs&;Yd>BIm&oezp8Nfn3Pn%JvQs*o=cXHP`~L3 z(%Pe-w43DV2Gn5ic#`G4p@X^{lJ~g|&l`fnNLH}n{{MUWyyElIrIpj=kEbip)< zv!rKgOlPim&eWfuX{emJ{&?o*{LJk?GmX--&8D+0p0lmzXWJ@g+aJ$9nxB30XSPFn zuFG_;$8)at{9J$K+~DK6;rY4IKXYv9`Ek>E4%GSO`T42J`I*P_bMy1B_x{W;NWWe( zeZAuO`tAAG>y@uJ9>3n4f4yUMFYWzn(`^G6Z1Cq1;b)%>zGqjNJbv@{&l^ByfqE~z zU?0Z~As)XF?j*fXp0J?#Rw>Fv{9Lu6XXxmkLCnHN=M8va1HF`P<>f2T|Dg(!F|PscAms9YBF z9HulaB7XQz)GI~fS8|hUA670y?~8nJ)0Ne{>(y$R(g~Sak3Ew-%X=05pxd0rl5^P- zM~OD4nVV@Fl6r%6ttWzxaY0YWL;bO|OD!wn-G$n45q~VS?NliZb+{>{_*A3FsvXPmg*2rYkzv~($HW!C z78fIqwmc^BB+6MuD^))s9bKyTh*;*GUJh!5t#q#~Z9}ZL>i48(&*dRr9TO>jz392- zcFSjp=yj>_vCpu?2U)WZsV)slGE09HW^bEAXa5qv9u=wd`mpq*QEYT(N(|aG`7!Vz z^s3B9%gzsMFA?|Y8Cvb9xO4EAPo(3Vw^(b?nf^^j@K)Txtrrnnv5n1g%n!V>TQW;q z!Vy~x=ga^UNaPca?`$htKooYNiA<8!TbbFblB*l7I&pZBI`R45-t4+pN7ylh#`VXa zlU<+-w`)Xl5OY>g2bVRQPDJcAp;5}0Yq!3PjI+%SeBAd=4iSuyAbt7kwM}7MMIQLr zo3PFCbE3V!Zt;t1IxdSn^;FURl;R+c{uI=#Eo^ZnLhlotv_mi=BoFLCoW^o(_eCy_!!w_X$B>~H zyg|8dW|}e!i$1Sj{6%~f=${~QrS6G{o3jRk{lnP>L2ZOfV!z$^WAJ>&{V4f{_O&v} z7IT|u=&6@bKAm4-pX3rqzm8ts`73j2-?aOX(C==m-;Kv^^n4O&*}EV@ZiGKB1K7`J z=B#c<1+1O-{&Rg*!+8txNcP?NQQZ8UrPEGjNY+lnh-YT(2l4VdK%~tAJ{9t7PR{Zz z&m}_GPZJJ}g;hzR!7Q>=w*{&WU63#dCbK$;`AQB||AMgW_k5C$Ir7%NTrd1}x{=MA z6a%{dTg8M*W?_}pCnd06v>Y}Sin~JJP4MRwDo^JD;21gK5Jho&>VetD7yQ4Owubppr-IOVs`%(rAW?5CZZ~OLUM}FRn=0HJH(W}R( zZ9BIos?N=y{8)B0w-yFSUAC?~`E{eb#Pg{bQRrJ(tI}RtV{hfD?-9>h&WmgOxTG(= z)RQP{Q{`b~zu1*#a5X&WN6f~HM9c4gq<*bFe);$UJa@|bveEG?M21I8uDn%Z-}5nT zX7;y(<^o;}u4k(L141RuKD8GQqRJFZzybtPeX59^JhyTU>685E+nQ8v$#}V)efMDSk(D_~VcAl;GdDLO}O;2-mQctw-{mZBAgB7Nv?jCmju;! zNBau3y8FaFyLZLYFQD~A@oNnCi8G0MFWr4_+9D|35hO|1=f(&xk)2*!tg6 zk-=MY9R}fZkd1B%Hv8kq}Z0KzSy zh+>gb1q^9}_V?`W-?j*wL>LRN2GzR*+JcB+z}%b+;^9FDvuG~%519%!*7yh7tEnYW zb;HaK#dI(jh7D#lFjpoTLM_}!lJq)X+8P|;XK&+e?ja#{uSX=SSKri1{yv6Xz^}Hn z8mORA>3n6c+2>T8agUA7HzvU**z5SM7&aLmvd3_If^Gk@J~jraI)VK)$n`{keh6zp z{=6%B#p&taKONtTku5XcX{$D`Q`W}{J5u>H-BD?Oxf?q066h((RPIs8!Udr9Dkc)i z=Ph>@U1c!g-I*;W71)8<0U$^K`_Bm&(GRTR;J%REP}CrkjewBe8sCe5u}s-N?tt~4;}L}2-;+XMAQ5l?LE7*QORV`akIYghfv zLLR78tP{Sh6=L{9*0XmM?F5dwmn}CKjS+Z8GS1iB9jZZEG2%p zm!QYIh^IXQVfK$Iwr3gVZHQ;rKwKux!b@xrn`s%nXmQs<|FPH76rau=n+j&pKUZV*r6V_pBAR}P; zh%G}(i0qZEl+xKUdR3#apZb<*nX7f#Q0T6B92t3bZOb~vM%*6f(<2-eY8F4wt7Ks# zUx}{GYDvF*TwnujaW2QG*IwJD$p4Vnx9RiDCWx?|YG?1P&soPu7Li4cx2~-hg8zKE zakpw+Z|@Gh@bJw_^gCzhnV+IYF$U!@VZfp_VqpBFN+zJ|n35X3{Ha z=HBA(e;)aODrRx`-CwQDs@9}dI>b7=taANGT>NQF$!wGn9HNZN%ap28SXD-NO@dDq z6Hum;)j0vB%TgV#$2HL)8Y~%kh(5<9x<$2fD?eM5u#n!)5r2kw4lV zEd7-QSCNQ>nPo*~9;aK(WR~^&1x7|ZQa%2q+&0($k%G*x(zga;8QH<5mSS{6OX(-J zd!s4J+EK{}f?WHQJzwQ=*^e#Sp`}Ist{1g3mvQL^HY?9F9krs)HCke$v~Hw+T$AV} zzcty_5{M@%n2wRGPVb%>E4Cg*rFcVhOpV)uHvDA2sJ_!4jNKn#$i>UA?R=QlZhBYC z7ggAx=rCf%9}!b4=~xGII%=KmXh;D>l&=51-T);;eB?`DJu@zuX+PnhXF7z1ce`JI zage>0ajozaW@}-v?xd?{dvHg(#^f;}<(i7vV=&2jdR}uK$)~3>{j~b+_#x$&_W0TgLX}UG$hwPMjq*sVT7t(7_yxwDK zhn)(R^AN_IH2h3yX>8uNSg(b=N#>EfyEz&r=sY`*ZD#m4%n7||vxu5mykGY$)3Z_z z`NE>MY7BUxyAH}m+$36!I5@})cT)GcX>vOOw_=+l=jzPTr04n%9Gs$fSjC25MA;fi zwU+uI`SEyI;aM#V1>u)|+$CUGBZ&^(x1ziZX@J zeU^|aun5cB7owkcf2AT#8YkELYJBI}5BhdiQC9XT9}7e2UMKErtQRU(?t7H8ZE3P` zAu|UK8_F&%^cl{{8Sa!SU?`VfK5aYWN_x$!BckEyRF%lB~M}y@GPlKR9amdX&St%dH^ZeHLKvf+%~38W4$ z_YNf*3~y3wp>JT-&;&oh))-#aak~o^C@-r((6;#@$^e zfr4O^{X+WHG56=@(J&AcENJvf?`+^QjL%KUB?gN0zUWJ2WV%CL3hBop;qugr+gur^ zSkR+nqf2*zd)`SM2jiYM}w6Z2Ke^3@;YYfR^B z{>aB+3bc(1bWap;QjvyxWd+6$3QVR8%>KFTDzq>vB%CO;PAnvr71}*0w4W|?{88wP zDRMO`aywCUEV0PFtmx!}B9G}JuOCI;m}1>+kRVn9%?u37oh}?@Z}(7BY_4rkYNWjy#t!J0BTe#{i6ziu|o5S zr9T)7JOBs+l=6fyXY)$sTv1@W5Im_A*A@Q@PpN8Yls(TJjE<8^7^7iW&+x=>mQ7?ag{jzrU)WjX{(DH|U z_fDH!)`C}sdWU*_VM6Yfjz^7aJ5*=2*mL5D5|53u|J-8`m?_C%Cckjqf`qh#^ zP36|oAvf(x<&k?2U|lb+b@$57eou=>#MqGEO0V!4jr$yb9`+VL0UY2O^P#%lRt1mf zO*Z=F7e2w)qZg_Gu^}bq6=QF77MP#r9)@)~ji*j7M@zO{Ajm>Db=& z{6R%JqQ?dU$!ud|3z_n~ntg^xa*YPjmP5>x9gdXB_Y<5vyag;uJ`it!C3I!!d3^(QiQBdwmg#J-^a4MyB&m+0?}8wo@e|cJqj}oINjG znl8f?gvmm1QB|MnUcDM7R(983yAl^g6(?;*u9Insd*uzXacy!?Vf$e7Gqurek#K3+ zRA)f`Tdl5s#b!4v0sUHSny1mLm2q472*uCWZ#K94*oYs-HT4LGZf?Npvp);&t6(Gx z$=;p7&E5<>Qp2Hej1i(owJHm%-&sugo_a6V7WeRmbj+~ULgU)Wpk7QiWQiws`tiP| zP&TW8Mm$a z0Jj*MAJ!NNTuV+xNM@=2iR@3Qu!o5pChk$1Z$>vZtyj1W&A>7Cih@u{Fjom&>CCGm zg0tZUr*d^aS#>ky!lsa)r4{Z<-4mNqcGZ(qjQ9L3?Qh~k5b*-{4QTBEo{6(NlSlZC z3eihH$klg~ZDi}8hb2FyEmS8j-^6rRKD_T#aZrC}>vQiX)?JO?QrfbgOlt;d7vM5C zpG|u0n6Jqojo!xiBIFPINj&QGcXrwRuCwnAyniuB+2Ns}<>j7c%L5)FU49A7%is7~ zp{ff47X}Z@^hcy)Kub9?Z>Q`{YsMHRk2?99mb$OV5W7GhG6lU1d*o7_&3Wwv(j*A9 zeKRZ?!P_o9PF0RuZIx!K*u{%PP`LK^m%DZXaPi}Yu=4zPxYWiL<)Iyf2P#kQHUvwv z7uXHAn<~~jQH(VfL`(>ztzx!)?%_-~mhAql8@ErvO6rC90UGioWQi|z3M`3>wVnWl zh3vaDeVwmULetk$`ec?u*ju{MNyQqz#JOb*EnZJOjN<>@vao4^4M4nM5$vG}J&peU zq~8|K8Egev6^70yPz@iQjSn0)=a2G-ONve$NqUq(F#anqK%%dxKQA@@R7v5_Z^TO? zYy0CnQPNYm5p7?kVM2z0fW|4zsP>NzNRkho2N47=<}MD@e_uA%)2sW*c_d}poY#%T zg=sOcHRRl30*I@I1JipF(Hq)n>_TO|qeePCv4^V)q)<{nIg#@9X!`H4%tL!+RWY~k zR!+5B2o*O%R7nNtBt#j0$7u9{y{=PqI@LI93}KC3LS0aGU&u7TMnt+*`R!Ze%C5Uc zD|XXV$#$@7YT`9V*Pt#Cu_N}dC;JKJJPm#u;P^qfX&4Knn5}vwwY}f#*i8|VVLH}3 z^9ufC0Wyr3EpgGZILul+G&>^woW;RnI@2t6?(yjs?XL?y*F5`HzoqWOm6&&_ zGeOPEm^gA82JJIJW8WXo8(UxnRQXK#uVk@&NfK-BR28r5*K!s?K?6lE z!k#y{o3(-HU<6(TykOhN`TyN?wgyHRoxzh}ai60;;LV4sH&z8&oHh@fg=Ya=G%F#s z=6Qlv8()xf7taMIE(5Ig;<;w-)0C|{GJmlsAf~P3bRt-}N)Y)y3LzW;SZ2Pz^vG!S z__0?NSE$@ZOi9`^zVk%rYU_HJ>lD+S0(3W_s~h$aL25fkBv*Kj&g9(`eRxOxH)mSA>>idtK17&LbBQ%b>uz{^|v*5mk(vn+1bu>oZ_ zN?^ws%XzMCn1+3-coNUIz;*kuU_vh%=}i!q@P?&DVN z1#|tw&>GqBD2GPHu9H*_EM>zpwjHjT`SSBiv3`4I=*wiammzo|34HtVW1n&Pq~Bo= zIdCEq^Tg~=mp$^y`_99OPfPFfACYXlMx7|4}5eN9?;W&e)9@MKxzc*ig2RB!^^CL?sGge;>8-;=o*Y zVpItxf1Hd$Q-lmDBJLEi1PZ#8BGE>Xnxe?;Qn2V$dBapi_f+MCRMpZ{^|n-vsZ`C~ zR2(`@+b~VnJxxC$&9F4hxGl|OD$Q&+4UbN@Fia=7r(1LSpVD-@wsiZcbjRIvXLN?E zVTPM~#<7G9_tJkNPMrS7Yd6CiP4zXT`nyvD6R5$Z)Q~o6*c3Hlmr6p@q6}#X6wKS9Djlw#}$Dp2>!2yHx|utMZmV)4R$#* zaF~Y{e+v=CQEkAP7yqIJ{!tv07>5#oZbSaxY+-|i%z?J7vJQdQZAevC79Mdi!YAt< znm*W{^%oEyThBCEXBgrUpH<50H^K-K!SP<0VW50$4F@V zf9ikUrKLKrmv(rGMZMiwUY~t&sCyYxY+!cn$ zQ2+ONdQt!dsQcd<4D#*jZYosP*n`+q^Xwv8N3+l{58Ia`VVNmqtG#3sBXg+uAO3q+ zfhx2r#&%-q0nC8+_3*VOnH~zxPupL$seY`Mr->WNqoZVV#kp3$BaWneq{#K|xNea1 zs_TuD+2fLj1umTkdpll!u~$sO`Xqa!tQ$6RLbu?s@OWs4Uf575`A$>7;nvL)S!6p0 zor6C&f$*XU&tRV1P(#jZHV**ePM5p#AC`@y<0xjr*EbSqU z=qxQ{;kYgM9OSdLESw6d3Jcg8BkvPY=M>nDf0{4uE0l^t$R~7A7KkZQ@(e}VW*!k6 z)JEx(4S2L|jiDms=?QXU9F$!A++wd{-GXnH_Rg-Z+=1Kn<9(QLhy`5MJJWF4JQ^{% zB6nW*TA(arc&twjUHGun?LYmd1f0j?)JtN97^d_M z^U3;hn|{bCGO$@=rd1e^Wz{*F4#sfxVV6G`+qC_a8Qon9Ik_Kk;49YPhs{ny?Wp$K zp2GsV*H^oUG8}uIp6JW+kJnyYjkeIzy>p>wNDiSp6Pe1e$gkI`8 zVwb#u{8A%wcc9kNN&8dJ-REE4xG^{^i~iJpOf2i=pFgoP)la39A+Ge7tH1h?+87bQjY%1krVN@!Go=7o@)h(M+f0P!Di6fKe3E4|i7PcPZ`Q&1GgVTOP$B$U>A7 z4)I8&8uibz2pdf%=QsUi7Uju1;xf^7`(_->gq@mfyHX9SRTT_oE}Oy|mqdMfn-}$N@+EkC3Z1sm^3cVD zLPX@hw@x))`jeU-y;<&*(h2(InRn!@Yq=x_#=+{TGZVY5O;ac!6PRE?NYunk?@?o? zcb_m%jE1D1yBv%qDoDH{QGfKi=0*d8pvEu&RV#0-7j7O9&UZ{j?b#vcV}Ut0%X@tr zUC_Sy;Nt$mI=8qwqBgGjl$*1pGV;}s^EVX+2k50%O+j`sGbHX`9KyHVW8ca|-mf7?9fk?f!Ue<}h{7j)MTE%;W!AmS4!}F`0N9N?} z;#+UHeR6i4q)CYV{_AdG!~DV)=pZAIv9@}nv!im|gyhEsS}dcvPH==~xU0n<-_R%b z6$evbiX4rWY5r&Ye{dj!HzA@i%5T8Kw10}3w3#YEl)rk6e_F9e{Us@xwNJUK(_t_I zWjg*&68n9C+mwmSE#3@%DzyD_VNW_oFy^tPQB|Y^ro2dIeBqWcGdRn#O<#bE(h+Dh zJKeo4CwMLxP-$=x2nlB256Yv))d`jdu+xOR)`hUJRC0FjOV#0U&EV=;;9z6UA={h7 zadkd9>+3awVd@2V{SdE)EAa)(Nf9yc@C*cHJNEPZZKq-_?wP;ls2Cyv+%tXp%5Q_) z4nidGDEq~`9)pLol`vggHO+EmQ6SkHVy>PnVmxrh-_`l1vT1;tr`OSo4!&UX1vZS} z&1E>3;d?fSUMcUlRvn)P(=(&c@$0YRW7{pvOSVpMf1~iQlRx8y^bvSAS4F80();gf70q3eSd`X~ zS;ayYMBQX_q%16d(7Lb7Ko971u%N$)YT|of|I&cX?7eZ`!fv20L*`89GewCesO`Z_ z53T`S`3$sLru&G;Q#${?0gKbN&8t$)mVKHgorr@SApc1TdP?+7dTzNR_T}feZXr2@ zwX?kVKu3<}&1+$IH7qgh>yW397dS$9pWtmx`eWoP;GiCBmbXbldX#f~fa~(O(x0J~ zQ_UBmp@8Y#n}VuwRw;Namta6Y*tiK5ndW@b;b0{=Jk6?L-AhW4YM4&(AjH%>otx1! zn0U6ms+?<_jTMV$-0N?o>TxK05&i8i?^k`ja^dIFM1CMx^TT^5n@XjL5I|VM`NI}& zbKPdoa^`VPxXxnoCK83NV9BVLFhhBfhFyYH&eX0FIUkQtNh80CJ|)tw^Cq6|68iZp z-ubkLTxSG1XZ1Xa!mYS&rDD9E7jc7dEcs-tZ2wquWm2kt`{(MVNei4NQ}re#)qDKS zi#t_2SC>zH`>cFi{QAPLH2ZyL8gq?BYnb6_g|@s$JHV1RztJ|1%i8)AlfD&TGqlhA zSc+&rwrA1&hJ3wTI^c{`m&Ks-0X%Mw*K~XWGwOUpAz_~R{rS_s-=FXO{l($xqRCu_ zWQaQ%mOw_7l6l(5d{bop-T!wDMlhM1Y{LOw!|?#e1qB9yc>a;#5d=u-oXQl!O65=~ zlpDdWrC`M^Ck}Dlse#M&U!}gYyHdhGu<-2#f!|mGW6mFMXZ&M^S(~vkl#>Iz+lTMWv>(nHx$^$(K8afl+hr+b(3=q z;Jqt=SwB!lw)};qflz?DJHSf@`=DTFu&^RklmL(}L+R~0r~^M`4v!N1CC;dqz(kx83ulrI=-91 zp@#1e^)IHS@!->k5&)?!y!#fM&O`)}p{VbvL+*g}6yUT9awNf4mgKcdfq!QEcmUr3 zfY$KVs{1E4K{2j^I>gc^XPKX%WGMT6|oT>5?o zu&RK=w9^0q)vpLxp@CsyHj+cccjmzF66N{}9l49j^&I`V(^Jr=tZV_OGhl&F!A=!A zVTxTD_9^Z_3L>k#E(_UMtbiz?U=FWj*bgU^!U0fh0)S{tiG0Bqg3*>V=M*$#sh*KSptpeTp&OI0Ie#OpPeqh&0X9WQ##N9*wSq*&{;eH zRkDE^g(uS*aJ;!WoGe=kD*&(|0K z={q>FE+7#o`j56FjC7UNnHtmaNNZ9huTatc_Kp#8d!ggv*-f^VR9mCzaM6oujC@3 ze6)R1dGe08xy)vFy0TsJP3EN{=IE3%76AR zpES)G;$@!M6YGqP(M_eB)4l0L%^?j-PW}m7S}Cd8?$ehUheub-rwlK`$?+XybgUP; zhZ8NrTJY;{OdiQ)sMH7hUxbd7BJ#gXk69}OpwnsMj2Kr>CLyq-uQ-vssDwtT2ht=N z{gX7Y!yb_+g+N?aKQ+E$3?X&9G!3ag&Mr`j(oOA~^6U`=tq*m37h>$6W?(ofyMNT+hTCHWe--mAH?wWhWj zT|r6DM~WF|>uleL-s^~Huy0D3;n8#t)Qo#3WQztt+i%K?&Pm_S?}XJHRtA5%|Al(= zto%uIGA z8O6`LzUd^YSakiqKcj1Ax-G3dq+&4N@i~$xpM?g0Vdl=Zwyr>*tyM0TayA!O`ik73 z5r(Fj^p0No2;NR{s+jc!e-$KZt}>%pn@UIhqtYMLPR>e+L&xz(DZ+Q=qY976oc_Gr z_2SegibjgV&iLiYBle!eNX;9eP6X;VB<_$icJzW!%C?A$mYYz=FbhbO$W)XHSI?!$se@j7Hw!0RpU$7I$yfr7-1>KIpO3L z*6#nCpD%V_mxh$rEw^zNyS4I+sZ(Yln;j9X-Zm8$ZgRyTV$XHG0vpFiHZ|g2oeLst z+P_siPv?RNOc5GPw&l&w;DxTQ6Y8*n7d_i}#QWSOx%9|l96%t3LBNK1*q}mn^Ni9V z?S}Gn!VYj*+8`~*>}ON%x#3aSJv4EXZ#?&%nqm>~Fba@j<#5I&ZnGhAJk=m@^_iryP0 z|A2jP%u*c5?iK|?P?o`6=t(?W+7Ip+J9RzsZAS)vj!Cn0*0l|h>NokLEh*=@EMeKM zyniJgfvf1`J-6$tBv1oC@MxGHyl53^F-i9098+II5#{-tXlA8_0%(N*CrT^al9Vd) z2uIb<5v=h?gT-%`rYlz5hjXM`4i1<)Sx6_sZhZ+o*#I}L(L#nH)QA@HH0`KP?vcbr zzAyl+zCVO7tYTRzh5%=b`tWmEDM%!yQ*}v;+?Jbi0#L;O%^GuOlgd6!Zg#Kid-EG6 zMs8xoK$@I7gZ7h{Dp2Wet*>`&AbPJHEb)AuAkRmk6ILjU$n+d(kyY+sRAK8qQx&}{ zOXSaywI_>`sG7ZlyZ+ZQuRg6TH@8jV&jmKk;L zRe^ve@ge-e6W(`H;22~usY_4^)WK!nn8vk6hQK*=2mvM;tbvC@UxNfRITOIS4ZcG1 z`xJwlE(VTE%JQ~kaBqh|%C4oP0fL+gVlO~&Sdrgo8pjStgvzwJR8g+pNi!o{`jDE&i2>ipP2-Z!Z+HX-MkgBV(BwD zy5pk5rhtUN!d@?-d9!1EdptBavHL5|T47i{U+}p??`Jjzfb9pk@OY5Z+dh+}>TC&F z)}XRBBnIaabnwk|-}8l@VvBrEt%XIV6VuSn_+XGCz(s&rNZ_+!f~A;oM0uL@-;RrR zr)T=!E@WWsX8=P10tluGfF!VFE(gUV`zMNJH*G3B> zcnGr!(1Z@K1d4pTIJ-MXL4 z*=f1zdcScnAt;dY0w_U}k`o2*90d9e{Pt%o_yrB^IL`lMjr)Rc7wzudHS^RDlRPaQBfFOY>u4u22IjyCpV?l`Z% z{<%IDt^QhS5WtBGbcUbw|L$0L+~!R>t-t>|Sb8<#tc<8E@_2>ZU5A(>Vc$mkWBtLh z)BP~RhV3LmbGKwCJVj^4_*aQ~u`rV5C@Qc>J0z7VK4xfh5R;gj(BmiTUvItV@rURf zy1D#2T+CJW*&$@?LaJb+Q2+hJU{@1#jqsXk-|B-;xe0~tGD|jb(f-KHK;Kzb%DQ(# z)H80XH|(a)`Q=DNGK+Vo@jNX?;LrC9AY#f_Jo!Se031zP1CVPSyjdhf9O@{yIE9Mm zUPYbzy>?;IAmzL+8Hz|npq&Qr+=-1G#xz_$Mnp&#{(Kzxdi+TC7IzR>@xml>fddiS zN>})7GsGc?p)#D%{1)=a2|WK02M5PGBHVHa$%f*3q-@&YsaV8ioXuBn$_}0%QsWY{ zYcq~NNv-2=2;!%&^3`Fvvrq(~Nx&Y9f<~r?LSczy-XRvh?w;ZCDjI)rZ_M~s(wZV| zW6Pa$Eg8UcH4w4T90UBZD4Pa&))qXplxL;UE2Yrpl@;=b9^+ZQ%_=J4U@IS!4LElw z`?lF^(YX7tw5SAkS}bvtMjYRA|H|Sk#zT;7=~6d*cmQAivvakv{N_tG8|=gf-rSy? zgV%+Co2yWF6r!||_hlS%%J8J4%Ejl-0-SJMQYZWX*e)ADaQJjjUmP1?@~t$+JE$-& z6!KO&ALY>N(DH{<*f2VtyULsIX~)rsEC0dBBemG8l_IjSsIff6EvPPa0X|N%9|gmD z#sD=#<%BJd2yfNVbU98t61V_MVgq&wY#_D+v6BEJ1HSz&abOn_(2D&(xvmqo2vj(t zBZ3DTZ6scVAOS`OfYI^8u4P5wyrLu5ike#rJ+4Kcuq`qYk7*Mr;TCR7Y8QD5fQPUBc`NQP|ToWXRvaBE4@|st}*X zBdwnAay4LR-P}Y)*ms0sc+KI|H#I?d0^x4R{7*rbYXKAWF12D!?b)aSS#kbjUgs+<_9@X(MwpQ!Mu zKDhk2yh;2=l!2q^%IVf~-!-qExKq{aNU8DLJF{@JJ(2Qib~Xr_3*s-q2*ZJ*s+lli z^tNGQHfjgYxrA^uvUF0r=B;xj^2a?|O~3@}F7JW$uoE67N;Tkmqjp_ZPFZvHgSisZ zngV+xb_eEC#ltbom@X&%o?Uf$JMn}mcPgf=I9)w(J(lq;AzNn@H`CyeTb0pJp8t0o z(EI9wh zv!xmPnzcyE*rJdj`#NJAYfaWkDqFTnq`pMUkT8g`HYH^XX;Ze6Z_d*>=kt4e-km?; zc{9&E_kG>hg&|71oV_|@KGFg?U^8N{K@3`Y5O9)DNN??Hv(|chvDFc9MroJa!Z7_DOul~}AJN0}^-~`D z+;7WM$+v&YQXXeSIvTGJjbH8qknXFxe>N5_ADw2hbBj4DlCQOv*XCakV>DdGj{F&V z?%208_J(DE@z8c;?+QQppb&i0vN2*)U7IaVn$aGfT<@2kx8is_A2Hc2-~E&uNZIq1 zd_G8?hl!Dnuk-6?(OvobM~Qy=^Ly#?i;Ogl0$u}j>5nOCqUxm+7x|?17G&+u zw311R?LsPBt9lzfY)eqd|H>iW?-6q03M6@jO8I8Vg_Ucf4wWd5{QE=3G!3Kk*1j9L z?*udDPvAu+L@C<&GM#)(4w?#27!t^jo?H?wtB1%MRu$Li5beZ-k^YI0rXcBVbWufy zUB~(pxu`m$Rcfzhgip2&fHy_X=Ik#BH_ei=bKtouxHiyCzYU~An^+l|dj&p<0a9tA z?Ual^ygmpcfXRuU%^`@9P%ew6c-0)cIH7B&1~}x z8b4*@{Ry-W(%3pxAPv8Y-1Ki z+#v8PUFG(?<@Auum$3=$X0n#zEE9$K!Wbr3dZogqC*&hKS|eTFzIveE;SBd({g%ye}J9_?tPVpi=k3<^pb8j9v|iJUZPGIMBU z$fxM0eY%CTAwrKxK-n;O#64HX=%&H~#yV^;mO+IhsUYZ2YmhL5$^)8h00DpGavDJ- zDuG{4!MT>F1MSHL^CeOtj@8vbBMHnqY`>)ao0a*u{8OEG+pGF{rrHEaAi*Qh2C6Xu z!4bh`$5})y>(x`NE;v;r3lIH{y2xd?y$g3yZTgYfl2d?o@< znywN%htpI?fdq*lFn|!F^r==q)u=ps?NbkVRL?^szN5}jb6l^cIvuH)Lmy+!q5Uwk zQp1fIB{#-yg~T>^E!$=)ixGIUDj-SLa#UD388Q13j1p#Wfe<7}upb;7!-AmAsgH|* zbDi4FCw^XLq1q4cs5lzT{Ml)HM(6lPT1S8o{sftSxvw!$4G)$UUTD4kjh+*c3uie> zPX`yT9<#!nB$J+vhA&9H{rO&S+r5(01+kG@%Sc{ykU7i=g@9PI2=9E^H+V1nOlZL6Hdto|FM#)Wt8`k*eGFSbX39Ap7ry8E88Z&17=#!C7XWg;WnaC_geZ#; zqucBiOQW4pMzoHa_#EtfPCAH>MG`}jkeU_v7R=fwumTlw5SPk#WsQ{}qOp=?Zmr6x zu#g<0W;y!OnD}ActZj;|WjbB~g=0am8h=4zkPKnfg+K%~$bcs9)@B(NwH&qhNgL=G z{wF}7D@%*)(}xFx;(qN=&!)wLFyt@IY(B6*pWzz;TBeTD6}maA3s^Zl_-Fh2kcot@ zf`Hh0pPQ=!JZR!9k2MG?w??J0u6YhpEC@qXIuVpAkUiYqd-%rp0~$VjiPq`8*SI~} zMRU0odt256tv@L5A|GM`#AL*VaL6*mJVxKD=Ha*t&hRmq|Dt}nXkyNQS@UL59eVbuh}u-0 zE@*;!Id|6mY)!W;@H-IJu+Zt0p~fIQdwoJhY-Khha70h@8eMd*)F-xQ|BdfA0dB8^ zWv><){i6(Orq*?YG4xzglChHzz6_a588@Jw9tyRtU}XR z@!hQmS0|Xfp6fW&_hLZCFpng_@}L;0)=VGvE8JKGU)UUM^kMYPjfCU%-XldwCswk? z3s-)a8=eCK<8hWqnwQT}a=kj{JNg3^m9++s#+_ai^_52YhW^YPQ}jH!#|1&Csc~YW zBaHO@@|X`mnTCqg*+yi?!1q~XJc`cP(WM8>BBQhDR{bLWBut)ydXqHs*juD^#Qr})*4o{v%ZLMowu0oHDgJ>*x@T>sH+SQGfV6z5gZ`u<+x~xrrs{Car8Ww=z zxQv1x z2)RI$U+nOcU77e;1ft(HmE)W~?*x3M<#Ws&BSp5v&;9$c^iV*u&o3L6TSYkp>DVV0 zkAqoJpstLVIQL2t!7~JwB)fg%<_7$si(?ud$O1z#8YTf(#r&ddW$2~$YYBeE*Yc|X z19??4v04-6e6rz0Omjl)BdE_W7v}|+I&&4V^T<3Om?NjM7FHErmoIW zc_q!jn{uKNIRoL8J@dMA{cvWntNHoTTZ@+L&0}^NNeSfZdqRSf7J@ed>fG0$r|@x; z`ks!>$$-qe+)xm?*4Qj@`@Z4rW>4w6ImPwnwsnDxb-dKt^e{!z?QX+6y`+;#!ZIRH z^zOXcU7oQ!g&mhpy)sXx+RBYKEqKbxtvCU;>yPK}X>SQkEIx2rjr%8j(vrBEAPWgMJGfva38I;vUhDyud(8a`Ful?z~Zo8~t$XTiXMBBg98ro5}{d z#qHKz%Orl0(_*YsXWnpnc#yrnc?|2?Pa0y!;l4dK$-cEAp0;);A3}H9eu;P@B`Y53l||1YvZ+a>h&H!R zE;vQlxO+_pyrEh#4#8~NLP<8{b|z1|X0xtzc7)lSvJKb2*O~xN;J_AWkHbGX)w!Jr z`H=i$BhzqC?Md1!ESZRK8E@(5Ycu7J?YT(>TnzK6@r#sshmHZlcp`!Jlje7azJ*BM(D%q!S2vWOyC zCap`x#UthhkICu#hFc=ih@}2gjZ?2#ev>DpU0gPx-F+mS8=e<&{l5CMdrK9Y1V*#MxWC-;`7JBTW_7m z6stf3?XF&exJ{!W$>j@{rarc1QtpFZ(*OTd=HK-w7W|*e@qc1qJ|wf0RR17Aa>0`; zc1Vfxzh_@q72N$AcA*OYF)AKU8~`fE2NS6kB!{c|7oVn6@LeF_P(nq8Nu?5bP&fe2 zCsz;G8Hk``DHTLVAf*I45I+m1fli+`mF9EpN)lT%>_>St*x20!s6!DC2*wTPC!Q>JK1 z5y4RM1b5DAS*ID`6P>EDx*Hr<{w$tKGk(KEp8CFKi%j{KS{wCg7~M^fjbobJJ1)a; zv)+tfm-`?KtIJ`DRi_>>Jm+s}UG{hPY3=HlIy#hv$*`Dm|Gsf=)y)~~>3y7C)PO;zfNF`52-z`c{)pw_0>_&l@R zc3sv06VlLNxc@>n=GunJd3)u&2_Gw~81mi~q*9X(?w0ndwa`o5CF@7B2Co$K&q78O ztj{wb&Mtl8F^M$@sQ{f+)PVv0>W{i(=;`-_LLFi*(d6JbRr7I?sSPA)t*eiK+5Ijc zO)mH8C4^CFHmW?u)k7(X8UcM0wFCuM`)4E)UWCjKckRp zjptF+vVsIoEU!renhLsP;k!ACTsC*^FNQisXDbzkw7x`>cCFYK+DGpYV?KkmlULt_ zTaWO+AG-gSJB6`z>g<2?A3c-~c!E1geIrRjqX@~1*5Nk?63auZWKT_pAjBOcze6W< z-Y05XUC`l#v?l4yMon%J_9CoClP5x2Wka<88#I+_d#j*5VI`}Vu)8kJ*dBc$xISY{ zmy;O*YcJ}JkSv95Gzb>+d4*_gZbV<5o5MZPjh)Z;_%m29y}bd^c??TEM%Ld8fJ&d# z0jnW@CzA1S)3{70ad^ap@iv6m1r-U=ct;vtLLOf+Y zhZQ2=mE!NxJkX5qi*0R)+Tnavb-&fwHrQNTc)oFw{KEYhaDrDBMIPzbw{2 zZ6920uT{YL9xuHpRsJ_M!}|oOfaj%Rx=sv2P2C4&Pvy~FcFLr}=Jr*Q+G#XAhk5}atezdQ1H!g4xu7ypU!^BBE zGU|1%$9(^wderi0jUhJq#;3C7$EB`yO8#NgA|CUHcozvzU4nEdXvy~QgM0DNn6s1f zJ9h#ND;f{pMflVc8!HaCb?Y|i6{_jYG>&!dY~G_{?vN37+-`}j<}8OhzM_NNUc;>x zM*batWxLk{o?G{sFYlZ^JotJzeyg1| z4D4(CcIPbSUTj}5m&lnzduXJ|DnujAG*Sf;DbK}0;Fj((QS6zua_M*E``M33zI5G_ zauRcNlUV|PJS&Y2ooan9kt29RBU2M-eDUs=7jMVI6eYgDI=WVg4htea)47s+k(}nn zrDQhE_sr{mFcpdht_pEJj&)xFht?X8yrB;|D9FZAr?Ib8YB4YVGmlV;pSibH5mFnJ zD%k4*FK8xIyIlp$#_gFUZFd4(9tMdP>YEeFd9$?1^J0s5L_WjqvNk4ekF-gJ>*!eU zA7fTNJ*;IWhi*Yw*6>IS%pU)zTmTbT!?dHE*UWX8RESrX7oi2EcQZpW^|-Ws#^=X! zpFTK82(<3MVS+F$C-IS1Nc^!T9GP8mmWzriY00|yj^E5}a=3xn7&Ei9iNI^N;$sf~ z-J^XrQb|;#(;~Dg4RQT862h-SZ_Pdac=gAyyT@}iX)idlvkXwvVGGwD=Q-TIg0uG zfDz_5;PC*AFuIc0gi#U5B$DC1oQ`eSzbqY@BM^MEiKoqyb=pWaKw3> zvm8~tJf??#wZ&Y@4;F*WFEFi)-};0_n)^vGRo1D)YnjCMD$}r%ZTjN%yu=m1`SqWc zkGsrsOK3zQl>(73nZx@A^Y&xsA2rAvyp+^FBkk|bFesWss^eBLg(@9+%$X&?sqLH+ zAyAKazZ6WVmYmT~%-JWL9}rmuh5*_0ph`tohkt+YcEEM2x)ehX1&L4KEg)}G&G(?{ zqQ&n8mo!IK4o00LIaI;m7a|J(h?(M^SwDH_?p-<6DKotB69%{TPWU5SCGe~QQ5|ix4bB<`OVux=sz{X;i@iSHyljEcgjyQ&Tg5LtT=i3-U*kM}>g^p4aMP4O_@Y-uo( z^>gt3&GojU|C*OrHdS0%f!*Sj(9=wrq+?&vQ}}+MKeI5vBYv)k0n#*mPGAh4`o~A( z87FQekjm?V{RL!$IQ+jWqWFBV%WHhn_NIS5p@)}BU4HLg-_E7O8=Xy(TM-JAR7h( zyvJymScVjne;yzH6{pTyg=MplQfZ*tQCS!q@tAR7yNffX&z7X<=^ zwad~u2nq)^!b<53q&mpSPwZ)6URA^lvQQW&xb*|6=`q9^GUCG;`t^_0uXPIJtdx}d z_+Az=1%!yh;}(?h*KwFBc#f+m#)wBvx*OR(8zi<8|CpQm^k?ofy22xd1AHD<}T44deM-IB>ryLNrB}#zF#KZNvHaSM5rH-MQmL%peGLp*m2$ zij1(-CQ50w8h8H3<-r7uC6JE7f@EI*#FdQ^v@Oir)FbXyFc%r{S+6UM=Vq#9!aLc8o5P^ zac3f_h;Ngq8phw^@4zghkBkP!4<)mQz7=06n1vD2+%%g@6JM8V5+T%&JgFaffS?T( V#e_ODy+4%ZeJox7|5}wA{TK1i_dWms literal 0 HcmV?d00001 diff --git a/examples/dm_control/train_ppo.py b/examples/dm_control/train_ppo.py index 2a3004a1..aa77222f 100644 --- a/examples/dm_control/train_ppo.py +++ b/examples/dm_control/train_ppo.py @@ -4,10 +4,9 @@ from openrl.configs.config import create_config_parser from openrl.envs.common import make from openrl.envs.wrappers.base_wrapper import BaseWrapper -from openrl.envs.wrappers.extra_wrappers import GIFWrapper +from openrl.envs.wrappers.extra_wrappers import FrameSkip, GIFWrapper from openrl.modules.common import PPONet as Net from openrl.runners.common import PPOAgent as Agent -from openrl.envs.wrappers.extra_wrappers import FrameSkip env_name = "dm_control/cartpole-balance-v0" # env_name = "dm_control/walker-walk-v0" diff --git a/examples/snake/README.md b/examples/snake/README.md index 4b47c3cf..4adb9cbd 100644 --- a/examples/snake/README.md +++ b/examples/snake/README.md @@ -1,10 +1,17 @@ This is the example for the snake game. +## Usage + +```bash +python train_selfplay.py +``` + ## Submit to JiDi Submition site: http://www.jidiai.cn/env_detail?envid=1. Snake senarios: [here](https://github.com/jidiai/ai_lib/blob/7a6986f0cb543994277103dbf605e9575d59edd6/env/config.json#L94) +Original Snake environment: [here](https://github.com/jidiai/ai_lib/blob/master/env/snakes.py) diff --git a/examples/snake/selfplay.yaml b/examples/snake/selfplay.yaml new file mode 100644 index 00000000..74de97a0 --- /dev/null +++ b/examples/snake/selfplay.yaml @@ -0,0 +1,3 @@ +seed: 0 +callbacks: + - id: "ProgressBarCallback" diff --git a/examples/snake/submissions/random_agent/submission.py b/examples/snake/submissions/random_agent/submission.py index c599945b..b1f468df 100644 --- a/examples/snake/submissions/random_agent/submission.py +++ b/examples/snake/submissions/random_agent/submission.py @@ -27,4 +27,3 @@ def my_controller(observation, action_space, is_act_continuous): player = sample_single_dim(action_space[i], is_act_continuous) joint_action.append(player) return joint_action - diff --git a/examples/snake/test_env.py b/examples/snake/test_env.py index d4a83839..b3a6bbee 100644 --- a/examples/snake/test_env.py +++ b/examples/snake/test_env.py @@ -15,18 +15,93 @@ # limitations under the License. """""" +import time + import numpy as np +from wrappers import ConvertObs + from openrl.envs.snake.snake import SnakeEatBeans +from openrl.envs.snake.snake_pettingzoo import SnakeEatBeansAECEnv +from openrl.selfplay.wrappers.random_opponent_wrapper import RandomOpponentWrapper + + +def test_raw_env(): + env = SnakeEatBeans() + + obs, info = env.reset() + + done = False + while not np.any(done): + a1 = np.zeros(4) + a1[env.action_space.sample()] = 1 + a2 = np.zeros(4) + a2[env.action_space.sample()] = 1 + obs, reward, done, info = env.step([a1, a2]) + print("obs:", obs) + print("reward:", reward) + print("done:", done) + print("info:", info) + + +def test_aec_env(): + from PIL import Image + + img_list = [] + env = SnakeEatBeansAECEnv(render_mode="rgb_array") + env.reset(seed=0) + # time.sleep(1) + img = env.render() + img_list.append(img) + step = 0 + for player_name in env.agent_iter(): + if step > 20: + break + observation, reward, termination, truncation, info = env.last() + if termination or truncation: + break + action = env.action_space(player_name).sample() + # if player_name == "player_0": + # action = 2 + # elif player_name == "player_1": + # action = 3 + # else: + # raise ValueError("Unknown player name: {}".format(player_name)) + env.step(action) + img = env.render() + if player_name == "player_0": + img_list.append(img) + # time.sleep(1) + + step += 1 + print("Total steps: {}".format(step)) + + save_path = "test.gif" + img_list = [Image.fromarray(img) for img in img_list] + img_list[0].save(save_path, save_all=True, append_images=img_list[1:], duration=500) + + +def test_vec_env(): + from openrl.envs.common import make -env = SnakeEatBeans() + env = make( + "snakes_1v1", + opponent_wrappers=[ + RandomOpponentWrapper, + ], + env_wrappers=[ConvertObs], + render_mode="group_human", + env_num=2, + ) + obs, info = env.reset() + step = 0 + done = False + while not np.any(done): + action = env.random_action() + obs, reward, done, info = env.step(action) + time.sleep(0.3) + step += 1 + print("Total steps: {}".format(step)) -obs, info = env.reset() -done = False -while not np.any(done): - a1 = np.zeros(4) - a1[env.action_space.sample()] = 1 - a2 = np.zeros(4) - a2[env.action_space.sample()] = 1 - obs, reward, done, info = env.step([a1, a2]) - print("obs:", obs, reward, "\ndone:", done, info) +if __name__ == "__main__": + test_vec_env() diff --git a/examples/snake/train_selfplay.py b/examples/snake/train_selfplay.py new file mode 100644 index 00000000..d466abbe --- /dev/null +++ b/examples/snake/train_selfplay.py @@ -0,0 +1,87 @@ +import numpy as np +import torch +from wrappers import ConvertObs + +from openrl.configs.config import create_config_parser +from openrl.envs.common import make +from openrl.modules.common import PPONet as Net +from openrl.runners.common import PPOAgent as Agent +from openrl.selfplay.wrappers.random_opponent_wrapper import RandomOpponentWrapper + + +def train(): + cfg_parser = create_config_parser() + cfg = cfg_parser.parse_args(["--config", "selfplay.yaml"]) + + # Create environment + env_num = 10 + render_model = None + env = make( + "snakes_1v1", + render_mode=render_model, + env_num=env_num, + asynchronous=True, + opponent_wrappers=[RandomOpponentWrapper], + env_wrappers=[ConvertObs], + cfg=cfg, + ) + # Create neural network + + net = Net(env, cfg=cfg, device="cuda" if torch.cuda.is_available() else "cpu") + # Create agent + agent = Agent(net) + # Begin training + agent.train(total_time_steps=100000) + env.close() + agent.save("./selfplay_agent/") + return agent + + +def evaluation(): + from examples.selfplay.tictactoe_utils.tictactoe_render import TictactoeRender + + print("Evaluation...") + env_num = 1 + env = make( + "snakes_1v1", + env_num=env_num, + asynchronous=True, + opponent_wrappers=[RandomOpponentWrapper], + env_wrappers=[ConvertObs], + auto_reset=False, + ) + + cfg_parser = create_config_parser() + cfg = cfg_parser.parse_args() + net = Net(env, cfg=cfg, device="cuda" if torch.cuda.is_available() else "cpu") + + agent = Agent(net) + + agent.load("./selfplay_agent/") + agent.set_env(env) + env.reset(seed=0) + + total_reward = 0.0 + ep_num = 5 + for ep_now in range(ep_num): + obs, info = env.reset() + done = False + step = 0 + + while not np.any(done): + # predict next action based on the observation + action, _ = agent.act(obs, info, deterministic=True) + obs, r, done, info = env.step(action) + step += 1 + + if np.any(done): + total_reward += np.mean(r) > 0 + print(f"{ep_now}/{ep_num}: reward: {np.mean(r)}") + print(f"win rate: {total_reward/ep_num}") + env.close() + print("Evaluation finished.") + + +if __name__ == "__main__": + train() + evaluation() diff --git a/examples/snake/wrappers.py b/examples/snake/wrappers.py new file mode 100644 index 00000000..52f3958d --- /dev/null +++ b/examples/snake/wrappers.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2023 The OpenRL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""""" +import gymnasium as gym +import numpy as np +from gymnasium import spaces + +from openrl.envs.wrappers.base_wrapper import BaseObservationWrapper + + +def raw2vec(raw_obs, n_player=2): + control_index = raw_obs["controlled_snake_index"][0] + + width = raw_obs["board_width"][0] + height = raw_obs["board_height"][0] + beans = raw_obs[1][0] + + ally_pos = raw_obs[control_index][0] + enemy_pos = raw_obs[5 - control_index][0] + + obs = np.zeros(width * height * n_player, dtype=int) + + ally_head_h, ally_head_w = ally_pos[0] + enemy_head_h, enemy_head_w = enemy_pos[0] + obs[ally_head_h * width + ally_head_w] = 2 + obs[height * width + ally_head_h * width + ally_head_w] = 4 + obs[enemy_head_h * width + enemy_head_w] = 4 + obs[height * width + enemy_head_h * width + enemy_head_w] = 2 + + for bean in beans: + h, w = bean + obs[h * width + w] = 1 + obs[height * width + h * width + w] = 1 + + for p in ally_pos[1:]: + h, w = p + obs[h * width + w] = 3 + obs[height * width + h * width + w] = 5 + + for p in enemy_pos[1:]: + h, w = p + obs[h * width + w] = 5 + obs[height * width + h * width + w] = 3 + + obs_ = np.array([]) + for i in obs: + obs_ = np.concatenate([obs_, np.eye(6)[i]]) + obs_ = obs_.reshape(-1, width * height * n_player * 6) + + return obs_ + + +class ConvertObs(BaseObservationWrapper): + def __init__(self, env: gym.Env): + """Flattens the observations of an environment. + + Args: + env: The environment to apply the wrapper + """ + BaseObservationWrapper.__init__(self, env) + + self.observation_space = spaces.Box( + low=-np.inf, high=np.inf, shape=(576,), dtype=np.float32 + ) + + def observation(self, observation): + """Flattens an observation. + + Args: + observation: The observation to flatten + + Returns: + The flattened observation + """ + + return raw2vec(observation) diff --git a/openrl/algorithms/dqn.py b/openrl/algorithms/dqn.py index ebd8d727..bbca547b 100644 --- a/openrl/algorithms/dqn.py +++ b/openrl/algorithms/dqn.py @@ -167,9 +167,7 @@ def prepare_loss( ) q_targets = rewards_batch + self.gamma * max_next_q_values * next_masks_batch - q_loss = torch.mean( - F.mse_loss(q_values, q_targets.detach()) - ) # 均方误差损失函数 + q_loss = torch.mean(F.mse_loss(q_values, q_targets.detach())) # 均方误差损失函数 loss_list.append(q_loss) diff --git a/openrl/algorithms/vdn.py b/openrl/algorithms/vdn.py index 83bdb5ed..f1215c03 100644 --- a/openrl/algorithms/vdn.py +++ b/openrl/algorithms/vdn.py @@ -211,9 +211,7 @@ def prepare_loss( rewards_batch = rewards_batch.reshape(-1, self.n_agent, 1) rewards_batch = torch.sum(rewards_batch, dim=1, keepdim=True).view(-1, 1) q_targets = rewards_batch + self.gamma * max_next_q_values * next_masks_batch - q_loss = torch.mean( - F.mse_loss(q_values, q_targets.detach()) - ) # 均方误差损失函数 + q_loss = torch.mean(F.mse_loss(q_values, q_targets.detach())) # 均方误差损失函数 loss_list.append(q_loss) return loss_list diff --git a/openrl/envs/PettingZoo/__init__.py b/openrl/envs/PettingZoo/__init__.py index e5111afc..fa9e66ca 100644 --- a/openrl/envs/PettingZoo/__init__.py +++ b/openrl/envs/PettingZoo/__init__.py @@ -63,7 +63,8 @@ def make_PettingZoo_envs( Single2MultiAgentWrapper, ) - env_wrappers = copy.copy(kwargs.pop("opponent_wrappers", [SeedEnv])) + env_wrappers = [SeedEnv] + env_wrappers += copy.copy(kwargs.pop("opponent_wrappers", [])) env_wrappers += [ Single2MultiAgentWrapper, RemoveTruncated, diff --git a/openrl/envs/common/registration.py b/openrl/envs/common/registration.py index 90f54e82..3a274c2c 100644 --- a/openrl/envs/common/registration.py +++ b/openrl/envs/common/registration.py @@ -65,7 +65,14 @@ def make( id=id, env_num=env_num, render_mode=convert_render_mode, **kwargs ) else: - if id.startswith("dm_control/"): + if id.startswith("snakes_"): + from openrl.envs.snake import make_snake_envs + + env_fns = make_snake_envs( + id=id, env_num=env_num, render_mode=convert_render_mode, **kwargs + ) + + elif id.startswith("dm_control/"): from openrl.envs.dmc import make_dmc_envs env_fns = make_dmc_envs( diff --git a/openrl/envs/dmc/__init__.py b/openrl/envs/dmc/__init__.py index ad2b113d..4f6ff39e 100644 --- a/openrl/envs/dmc/__init__.py +++ b/openrl/envs/dmc/__init__.py @@ -13,10 +13,7 @@ def make_dmc_envs( render_mode: Optional[Union[str, List[str]]] = None, **kwargs, ): - from openrl.envs.wrappers import ( - RemoveTruncated, - Single2MultiAgentWrapper, - ) + from openrl.envs.wrappers import RemoveTruncated, Single2MultiAgentWrapper from openrl.envs.wrappers.extra_wrappers import ConvertEmptyBoxWrapper env_wrappers = copy.copy(kwargs.pop("env_wrappers", [])) diff --git a/openrl/envs/mpe/rendering.py b/openrl/envs/mpe/rendering.py index 65ca66b0..ab1a47db 100644 --- a/openrl/envs/mpe/rendering.py +++ b/openrl/envs/mpe/rendering.py @@ -29,10 +29,12 @@ except ImportError: print( "Error occured while running `from pyglet.gl import *`", - "HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get" - " install python-opengl'. If you're running on a server, you may need a" - " virtual frame buffer; something like this should work: 'xvfb-run -s" - ' "-screen 0 1400x900x24" python \'', + ( + "HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get" + " install python-opengl'. If you're running on a server, you may need a" + " virtual frame buffer; something like this should work: 'xvfb-run -s" + ' "-screen 0 1400x900x24" python \'' + ), ) import math diff --git a/openrl/envs/snake/__init__.py b/openrl/envs/snake/__init__.py index 663cfed7..7d049e8f 100644 --- a/openrl/envs/snake/__init__.py +++ b/openrl/envs/snake/__init__.py @@ -15,3 +15,47 @@ # limitations under the License. """""" +import copy +from typing import List, Optional, Union + +from pettingzoo.utils.wrappers import AssertOutOfBoundsWrapper, OrderEnforcingWrapper + +from openrl.envs.common import build_envs +from openrl.envs.snake.snake_pettingzoo import SnakeEatBeansAECEnv +from openrl.envs.wrappers.pettingzoo_wrappers import SeedEnv + + +def snake_env_make(id, render_mode, disable_env_checker, **kwargs): + if id == "snakes_1v1": + env = SnakeEatBeansAECEnv(render_mode=render_mode) + else: + raise ValueError("Unknown env {}".format(id)) + return env + + +def make_snake_envs( + id: str, + env_num: int = 1, + render_mode: Optional[Union[str, List[str]]] = None, + **kwargs, +): + from openrl.envs.wrappers import RemoveTruncated, Single2MultiAgentWrapper + + env_wrappers = [AssertOutOfBoundsWrapper, OrderEnforcingWrapper, SeedEnv] + env_wrappers += copy.copy(kwargs.pop("opponent_wrappers", [])) + env_wrappers += [ + Single2MultiAgentWrapper, + RemoveTruncated, + ] + env_wrappers += copy.copy(kwargs.pop("env_wrappers", [])) + + env_fns = build_envs( + make=snake_env_make, + id=id, + env_num=env_num, + render_mode=render_mode, + wrappers=env_wrappers, + **kwargs, + ) + + return env_fns diff --git a/openrl/envs/snake/common.py b/openrl/envs/snake/common.py index eb67e9dc..6a67a0a3 100644 --- a/openrl/envs/snake/common.py +++ b/openrl/envs/snake/common.py @@ -1,16 +1,19 @@ -import numpy as np -import sys import os +import sys + +import numpy as np + class HiddenPrints: def __enter__(self): self._original_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') + sys.stdout = open(os.devnull, "w") def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout + class Board: def __init__(self, board_height, board_width, snakes, beans_positions, teams): # print('create board, beans_position: ', beans_positions) @@ -48,20 +51,33 @@ def step(self): # delay: prevent rear-end collision # = self.blank_sign for key, snake in self.snakes.items(): if snake.len >= self.state: - self.board[snake.pos[-self.state][0]][snake.pos[-self.state][1]] = self.blank_sign # drop tail + self.board[snake.pos[-self.state][0]][ + snake.pos[-self.state][1] + ] = self.blank_sign # drop tail for key, value in self.open.items(): # value: e.g. [[8, 3], [6, 3], [7, 4]] - others_tail_pos = [self.snakes[_].pos[-self.state] - if self.snakes[_].len >= self.state else [] - for _ in set(range(self.snakes_count)) - {key}] + others_tail_pos = [ + ( + self.snakes[_].pos[-self.state] + if self.snakes[_].len >= self.state + else [] + ) + for _ in set(range(self.snakes_count)) - {key} + ] for x, y in value: # print('start to spread snake {} on grid ({}, {})'.format(key, x, y)) - for x_, y_ in [((x + 1) % self.height, y), # down - ((x - 1) % self.height, y), # up - (x, (y + 1) % self.width), # right - (x, (y - 1) % self.width)]: # left + for x_, y_ in [ + ((x + 1) % self.height, y), # down + ((x - 1) % self.height, y), # up + (x, (y + 1) % self.width), # right + (x, (y - 1) % self.width), + ]: # left sign = self.board[x_][y_] - idx = sign % self.snakes_count # which snake, e.g. 0, 1, 2, 3, 4, 5 / number of claims - state = sign // self.snakes_count # manhattan distance to snake who claim the point or its negative + idx = ( + sign % self.snakes_count + ) # which snake, e.g. 0, 1, 2, 3, 4, 5 / number of claims + state = ( + sign // self.snakes_count + ) # manhattan distance to snake who claim the point or its negative if sign == self.blank_sign: # grid in initial state if [x_, y_] in others_tail_pos: # print('do not spread other snakes tail, in case of rear-end collision') @@ -76,46 +92,64 @@ def step(self): # delay: prevent rear-end collision # '\tgird ({}, {}) in the same state claimed by different snakes ' # 'with sign {}, idx {} and state {}'.format( # x_, y_, sign, idx, state)) - if self.snakes[idx].len > self.snakes[key].len: # shorter snake claim the controversial grid + if ( + self.snakes[idx].len > self.snakes[key].len + ): # shorter snake claim the controversial grid # print('\t\tsnake {} is shorter than snake {}'.format(key, idx)) self.snakes[idx].claimed_count -= 1 new_open[idx].remove([x_, y_]) self.board[x_][y_] = self.state * self.snakes_count + key self.snakes[key].claimed_count += 1 new_open[key].append([x_, y_]) - elif self.snakes[idx].len == self.snakes[key].len: # controversial claim + elif ( + self.snakes[idx].len == self.snakes[key].len + ): # controversial claim # print( # '\t\tcontroversy! first claimed by snake {}, then claimed by snake {}'.format(idx, key)) - self.controversy[(x_, y_)] = {'state': self.state, - 'length': self.snakes[idx].len, - 'indexes': [idx, key]} + self.controversy[(x_, y_)] = { + "state": self.state, + "length": self.snakes[idx].len, + "indexes": [idx, key], + } # first claim by snake idx, then claim by snake key self.board[x_][y_] = -self.state * self.snakes_count + 1 # if + 2, not enough for all snakes claim one grid!! - self.snakes[idx].claimed_count -= 1 # controversy, no snake claim this grid!! + self.snakes[ + idx + ].claimed_count -= ( + 1 # controversy, no snake claim this grid!! + ) new_open[key].append([x_, y_]) else: # (self.snakes[idx].len < self.snakes[key].len) pass # longer snake do not claim the controversial grid - elif (x_, y_) in self.controversy \ - and key not in self.controversy[(x_, y_)]['indexes'] \ - and self.state + state == 0: # third claim or more + elif ( + (x_, y_) in self.controversy + and key not in self.controversy[(x_, y_)]["indexes"] + and self.state + state == 0 + ): # third claim or more # print('snake {} meets third or more claim in grid ({}, {})'.format(key, x_, y_)) controversy = self.controversy[(x_, y_)] # pprint.pprint(controversy) - if controversy['length'] > self.snakes[key].len: # shortest snake claim grid, do 4 things + if ( + controversy["length"] > self.snakes[key].len + ): # shortest snake claim grid, do 4 things # print('\t\tsnake {} is shortest'.format(key)) - indexes_count = len(controversy['indexes']) - for i in controversy['indexes']: - self.snakes[i].claimed_count -= 1 / indexes_count # update claimed_count ! + indexes_count = len(controversy["indexes"]) + for i in controversy["indexes"]: + self.snakes[i].claimed_count -= ( + 1 / indexes_count + ) # update claimed_count ! new_open[i].remove([x_, y_]) del self.controversy[(x_, y_)] self.board[x_][y_] = self.state * self.snakes_count + key self.snakes[key].claimed_count += 1 new_open[key].append([x_, y_]) - elif controversy['length'] == self.snakes[key].len: # controversial claim + elif ( + controversy["length"] == self.snakes[key].len + ): # controversial claim # print('\t\tcontroversy! multi claimed by snake {}'.format(key)) - self.controversy[(x_, y_)]['indexes'].append(key) + self.controversy[(x_, y_)]["indexes"].append(key) self.board[x_][y_] += 1 new_open[key].append([x_, y_]) else: # (controversy['length'] < self.snakes[key].len) @@ -126,8 +160,10 @@ def step(self): # delay: prevent rear-end collision self.open = new_open # update open # update controversial snakes' claimed_count (in fraction) in the end for _, d in self.controversy.items(): - controversial_snake_count = len(d['indexes']) # number of controversial snakes - for idx in d['indexes']: + controversial_snake_count = len( + d["indexes"] + ) # number of controversial snakes + for idx in d["indexes"]: self.snakes[idx].claimed_count += 1 / controversial_snake_count @@ -139,10 +175,15 @@ def __init__(self, snake_positions, board_height, board_width, beans_positions): self.beans_positions = beans_positions self.claimed_count = 0 - displace = [(self.head[0] - snake_positions[1][0]) % board_height, - (self.head[1] - snake_positions[1][1]) % board_width] + displace = [ + (self.head[0] - snake_positions[1][0]) % board_height, + (self.head[1] - snake_positions[1][1]) % board_width, + ] # print('creat snake, pos: ', self.pos, 'displace:', displace) - if displace == [board_height - 1, 0]: # all action are ordered by left, up, right, relative to the body + if displace == [ + board_height - 1, + 0, + ]: # all action are ordered by left, up, right, relative to the body self.dir = 0 # up self.legal_action = [2, 0, 3] elif displace == [1, 0]: @@ -155,16 +196,18 @@ def __init__(self, snake_positions, board_height, board_width, beans_positions): self.dir = 3 # right self.legal_action = [0, 3, 1] else: - assert False, 'snake positions error' - positions = [[(self.head[0] - 1) % board_height, self.head[1]], - [(self.head[0] + 1) % board_height, self.head[1]], - [self.head[0], (self.head[1] - 1) % board_width], - [self.head[0], (self.head[1] + 1) % board_width]] + assert False, "snake positions error" + positions = [ + [(self.head[0] - 1) % board_height, self.head[1]], + [(self.head[0] + 1) % board_height, self.head[1]], + [self.head[0], (self.head[1] - 1) % board_width], + [self.head[0], (self.head[1] + 1) % board_width], + ] self.legal_position = [positions[_] for _ in self.legal_action] def get_action(self, position): if position not in self.legal_position: - assert False, 'the start and end points do not match' + assert False, "the start and end points do not match" idx = self.legal_position.index(position) return self.legal_action[idx] # 0, 1, 2, 3: up, down, left, right @@ -175,10 +218,10 @@ def step(self, legal_input): idx = self.legal_action.index(legal_input) position = self.legal_position[idx] else: - assert False, 'illegal snake move' + assert False, "illegal snake move" self.head = position self.pos.insert(0, position) if position in self.beans_positions: # eat a bean self.len += 1 else: # do not eat a bean - self.pos.pop() \ No newline at end of file + self.pos.pop() diff --git a/openrl/envs/snake/discrete.py b/openrl/envs/snake/discrete.py index 20867064..7d6d318d 100644 --- a/openrl/envs/snake/discrete.py +++ b/openrl/envs/snake/discrete.py @@ -1,12 +1,14 @@ import numpy as np + from .space import Space class Discrete(Space): - r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`. + r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`. Example:: >>> Discrete(2) """ + def __init__(self, n): assert n >= 0 self.n = n @@ -18,7 +20,9 @@ def sample(self): def contains(self, x): if isinstance(x, int): as_int = x - elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.char in np.typecodes['AllInteger'] and x.shape == ()): + elif isinstance(x, (np.generic, np.ndarray)) and ( + x.dtype.char in np.typecodes["AllInteger"] and x.shape == () + ): as_int = int(x) else: return False @@ -28,4 +32,4 @@ def __repr__(self): return "Discrete(%d)" % self.n def __eq__(self, other): - return isinstance(other, Discrete) and self.n == other.n \ No newline at end of file + return isinstance(other, Discrete) and self.n == other.n diff --git a/openrl/envs/snake/game.py b/openrl/envs/snake/game.py index f3be6166..c0e35d39 100644 --- a/openrl/envs/snake/game.py +++ b/openrl/envs/snake/game.py @@ -1,12 +1,20 @@ # -*- coding:utf-8 -*- -# 作者:zruizhi -# 创建时间: 2020/7/10 10:24 上午 +# 作者:zruizhi +# 创建时间: 2020/7/10 10:24 上午 # 描述: from abc import ABC, abstractmethod class Game(ABC): - def __init__(self, n_player, is_obs_continuous, is_act_continuous, game_name, agent_nums, obs_type): + def __init__( + self, + n_player, + is_obs_continuous, + is_act_continuous, + game_name, + agent_nums, + obs_type, + ): self.n_player = n_player self.current_state = None self.all_observes = None @@ -44,4 +52,4 @@ def reset(self): raise NotImplementedError def set_action_space(self): - raise NotImplementedError \ No newline at end of file + raise NotImplementedError diff --git a/openrl/envs/snake/gridgame.py b/openrl/envs/snake/gridgame.py index 996e58b1..b75aa44d 100644 --- a/openrl/envs/snake/gridgame.py +++ b/openrl/envs/snake/gridgame.py @@ -1,11 +1,13 @@ # -*- coding:utf-8 -*- -# 作者:zruizhi -# 创建时间: 2020/7/10 10:24 上午 +# 作者:zruizhi +# 创建时间: 2020/7/10 10:24 上午 # 描述: -from PIL import Image, ImageDraw from itertools import count + import numpy as np +from PIL import Image, ImageDraw + from .game import Game UNIT = 40 @@ -14,25 +16,58 @@ class GridGame(Game): def __init__(self, conf, colors=None, unit_size=UNIT, fix=FIX): - super().__init__(conf['n_player'], conf['is_obs_continuous'], conf['is_act_continuous'], - conf['game_name'], conf['agent_nums'], conf['obs_type']) + super().__init__( + conf["n_player"], + conf["is_obs_continuous"], + conf["is_act_continuous"], + conf["game_name"], + conf["agent_nums"], + conf["obs_type"], + ) # grid game conf - self.game_name = conf['game_name'] - self.max_step = int(conf['max_step']) - self.board_width = int(conf['board_width']) - self.board_height = int(conf['board_height']) - self.cell_range = conf['cell_range'] if isinstance(eval(str(conf['cell_range'])), tuple) else (int(conf['cell_range']),) + self.game_name = conf["game_name"] + self.max_step = int(conf["max_step"]) + self.board_width = int(conf["board_width"]) + self.board_height = int(conf["board_height"]) + self.cell_range = ( + conf["cell_range"] + if isinstance(eval(str(conf["cell_range"])), tuple) + else (int(conf["cell_range"]),) + ) self.cell_dim = len(self.cell_range) self.cell_size = np.prod(self.cell_range) # grid observation conf - self.ob_board_width = conf['ob_board_width'] if not conf.get('ob_board_width') is None else [self.board_width for _ in range(self.n_player)] - self.ob_board_height = conf['ob_board_height'] if not conf.get('ob_board_height') is None else [self.board_height for _ in range(self.n_player)] - self.ob_cell_range = conf['ob_cell_range'] if not conf.get('ob_cell_range') is None else [self.cell_range for _ in range(self.n_player)] + self.ob_board_width = ( + conf["ob_board_width"] + if conf.get("ob_board_width") is not None + else [self.board_width for _ in range(self.n_player)] + ) + self.ob_board_height = ( + conf["ob_board_height"] + if conf.get("ob_board_height") is not None + else [self.board_height for _ in range(self.n_player)] + ) + self.ob_cell_range = ( + conf["ob_cell_range"] + if conf.get("ob_cell_range") is not None + else [self.cell_range for _ in range(self.n_player)] + ) # vector observation conf - self.ob_vector_shape = conf['ob_vector_shape'] if not conf.get('ob_vector_shape') is None else [self.board_width*self.board_height*self.cell_dim for _ in range(self.n_player)] - self.ob_vector_range = conf['ob_vector_range'] if not conf.get('ob_vector_range') is None else [self.cell_range for _ in range(self.n_player)] + self.ob_vector_shape = ( + conf["ob_vector_shape"] + if conf.get("ob_vector_shape") is not None + else [ + self.board_width * self.board_height * self.cell_dim + for _ in range(self.n_player) + ] + ) + self.ob_vector_range = ( + conf["ob_vector_range"] + if conf.get("ob_vector_range") is not None + else [self.cell_range for _ in range(self.n_player)] + ) # 每个玩家的 action space list, 可以根据player_id获取对应的single_action_space self.joint_action_space = self.set_action_space() @@ -42,18 +77,25 @@ def __init__(self, conf, colors=None, unit_size=UNIT, fix=FIX): # 记录对局结果信息 self.n_return = [0] * self.n_player - self.won = '' + self.won = "" # render 相关 self.grid_unit = unit_size self.grid = GridGame.init_board(self.board_width, self.board_height, unit_size) self.grid_unit_fix = fix - self.colors = colors + generate_color(self.cell_size - len(colors) + 1) if not colors is None else generate_color( - self.cell_size) + self.colors = ( + colors + generate_color(self.cell_size - len(colors) + 1) + if colors is not None + else generate_color(self.cell_size) + ) self.init_info = None - + def get_grid_obs_config(self, player_id): - return self.ob_board_width[player_id], self.ob_board_height[player_id], self.ob_cell_range[player_id] + return ( + self.ob_board_width[player_id], + self.ob_board_height[player_id], + self.ob_cell_range[player_id], + ) def get_grid_many_obs_space(self, player_id_list): all_obs_space = {} @@ -69,7 +111,7 @@ def get_vector_many_obs_space(self, player_id_list): all_obs_space = {} for i in player_id_list: m = self.ob_vector_shape[i] - all_obs_space[i] = (m) + all_obs_space[i] = m return all_obs_space def get_single_action_space(self, player_id): @@ -80,14 +122,16 @@ def set_action_space(self): def check_win(self): raise NotImplementedError - + def get_render_data(self, current_state): grid_map = [[0] * self.board_width for _ in range(self.board_height)] for i in range(self.board_height): for j in range(self.board_width): grid_map[i][j] = 0 for k in range(self.cell_dim): - grid_map[i][j] = grid_map[i][j] * self.cell_range[k] + current_state[i][j][k] + grid_map[i][j] = ( + grid_map[i][j] * self.cell_range[k] + current_state[i][j][k] + ) return grid_map def set_current_state(self, current_state): @@ -132,7 +176,7 @@ def step(self, joint_action): reward = self.get_reward(joint_action) return all_observes, reward, done, info_before, info_after - def step_before_info(self, info=''): + def step_before_info(self, info=""): return info def init_action_space(self): @@ -147,39 +191,58 @@ def init_action_space(self): def draw_board(self): cols = [chr(i) for i in range(65, 65 + self.board_width)] - s = ', '.join(cols) - print(' ', s) + s = ", ".join(cols) + print(" ", s) for i in range(self.board_height): print(chr(i + 65), self.current_state[i]) def render_board(self): im_data = np.array( - GridGame._render_board(self.get_render_data(self.current_state), self.grid, self.colors, self.grid_unit, self.grid_unit_fix)) + GridGame._render_board( + self.get_render_data(self.current_state), + self.grid, + self.colors, + self.grid_unit, + self.grid_unit_fix, + ) + ) return im_data @staticmethod def init_board(width, height, grid_unit, color=(250, 235, 215)): - im = Image.new(mode="RGB", size=(width * grid_unit, height * grid_unit), color=color) + im = Image.new( + mode="RGB", size=(width * grid_unit, height * grid_unit), color=color + ) draw = ImageDraw.Draw(im) for x in range(0, width): - draw.line(((x * grid_unit, 0), (x * grid_unit, height * grid_unit)), fill=(105, 105, 105)) + draw.line( + ((x * grid_unit, 0), (x * grid_unit, height * grid_unit)), + fill=(105, 105, 105), + ) for y in range(0, height): - draw.line(((0, y * grid_unit), (width * grid_unit, y * grid_unit)), fill=(105, 105, 105)) + draw.line( + ((0, y * grid_unit), (width * grid_unit, y * grid_unit)), + fill=(105, 105, 105), + ) return im @staticmethod def _render_board(state, board, colors, unit, fix, extra_info=None): - ''' - 完成基本渲染棋盘操作 - 设置extra_info参数仅为了保持子类方法签名的一致 - ''' + """ + 完成基本渲染棋盘操作 + 设置extra_info参数仅为了保持子类方法签名的一致 + """ im = board.copy() draw = ImageDraw.Draw(im) for x, row in zip(count(0), state): for y, state in zip(count(0), row): if state == 0: continue - draw.rectangle(build_rectangle(y, x, unit, fix), fill=tuple(colors[state]), outline=(192, 192, 192)) + draw.rectangle( + build_rectangle(y, x, unit, fix), + fill=tuple(colors[state]), + outline=(192, 192, 192), + ) return im @staticmethod @@ -188,9 +251,16 @@ def parse_extra_info(data): def build_rectangle(x, y, unit_size=UNIT, fix=FIX): - return x * unit_size + unit_size // fix, y * unit_size + unit_size // fix, (x + 1) * unit_size - unit_size // fix, ( - y + 1) * unit_size - unit_size // fix + return ( + x * unit_size + unit_size // fix, + y * unit_size + unit_size // fix, + (x + 1) * unit_size - unit_size // fix, + (y + 1) * unit_size - unit_size // fix, + ) def generate_color(n): - return [tuple(map(lambda n: int(n), np.random.choice(range(256), size=3))) for _ in range(n)] \ No newline at end of file + return [ + tuple(map(lambda n: int(n), np.random.choice(range(256), size=3))) + for _ in range(n) + ] diff --git a/openrl/envs/snake/observation.py b/openrl/envs/snake/observation.py index 4e1c65b1..6e28b37f 100644 --- a/openrl/envs/snake/observation.py +++ b/openrl/envs/snake/observation.py @@ -1,6 +1,6 @@ # -*- coding:utf-8 -*- -# 作者:zruizhi -# 创建时间: 2020/11/13 3:51 下午 +# 作者:zruizhi +# 创建时间: 2020/11/13 3:51 下午 # 描述:observation的各种接口类 obs_type = ["grid", "vector", "dict"] @@ -9,7 +9,7 @@ class GridObservation(object): def get_grid_observation(self, current_state, player_id, info_before): raise NotImplementedError - def get_grid_many_observation(self, current_state, player_id_list, info_before=''): + def get_grid_many_observation(self, current_state, player_id_list, info_before=""): all_obs = [] for i in player_id_list: all_obs.append(self.get_grid_observation(current_state, i, info_before)) @@ -20,7 +20,9 @@ class VectorObservation(object): def get_vector_observation(self, current_state, player_id, info_before): raise NotImplementedError - def get_vector_many_observation(self, current_state, player_id_list, info_before=''): + def get_vector_many_observation( + self, current_state, player_id_list, info_before="" + ): all_obs = [] for i in player_id_list: all_obs.append(self.get_vector_observation(current_state, i, info_before)) @@ -31,7 +33,7 @@ class DictObservation(object): def get_dict_observation(self, current_state, player_id, info_before): raise NotImplementedError - def get_dict_many_observation(self, current_state, player_id_list, info_before=''): + def get_dict_many_observation(self, current_state, player_id_list, info_before=""): all_obs = [] for i in player_id_list: all_obs.append(self.get_dict_observation(current_state, i, info_before)) @@ -56,4 +58,4 @@ def get_custom_many_obs_space(self, player_id_list): all_obs_space = [] for i in player_id_list: all_obs_space.append(self.get_custom_obs_space(i)) - return all_obs_space \ No newline at end of file + return all_obs_space diff --git a/openrl/envs/snake/snake.py b/openrl/envs/snake/snake.py index f9eb3f54..84e09f8b 100644 --- a/openrl/envs/snake/snake.py +++ b/openrl/envs/snake/snake.py @@ -2,20 +2,32 @@ # 作者:zruizhi # 创建时间: 2020/7/30 17:24 下午 # 描述: -from .gridgame import GridGame +import itertools import random from itertools import count +from typing import Optional + +import matplotlib.pyplot as plt import numpy as np -from PIL import ImageDraw, ImageFont -from .observation import * -from .discrete import Discrete -import itertools from gym import Env, spaces -from PIL import Image +from PIL import Image, ImageDraw, ImageFont + +from .discrete import Discrete +from .gridgame import GridGame, generate_color +from .observation import * + + +def convert_to_onehot(joint_action): + new_joint_action = [] + for action in joint_action: + onehot_action = np.zeros(4) + onehot_action[action] = 1 + new_joint_action.append(onehot_action) + return new_joint_action class SnakeEatBeans(GridGame, GridObservation, DictObservation): - def __init__(self, env_id: int = 0, render: bool = False): + def __init__(self, render_mode: Optional[str] = None): conf = { "class_literal": "SnakeEatBeans", "n_player": 2, @@ -73,15 +85,28 @@ def __init__(self, env_id: int = 0, render: bool = False): self.save_internal = conf["save_interval"] self.save_path = conf["save_path"] self.episode = 0 - self.render = render - self.img_list = [] - self.env_id = env_id + self.fig, self.ax = None, None + if render_mode in ["human", "rgb_array"]: + self.need_render = True + if render_mode == "human": + plt.ion() + self.fig, self.ax = plt.subplots() + else: + self.need_render = False + self.render_mode = render_mode + self.img_list = None + self.render_img = None + + self.init_colors = colors + self.colors = None def seed(self, seed=None): if seed is None: - np.random.seed(1) + np.random.seed(0) + random.seed(0) else: np.random.seed(seed) + random.seed(seed) def check_win(self): flg = self.won.index(max(self.won)) + 2 @@ -110,6 +135,16 @@ def set_action_space(self): return action_space def reset(self): + if self.need_render: + self.img_list = [] + self.render_img = None + if self.colors is None: + self.colors = ( + self.init_colors + + generate_color(self.cell_size - len(self.init_colors) + 1) + if self.init_colors is not None + else generate_color(self.cell_size) + ) self.step_cnt = 1 self.snakes_position = ( {} @@ -120,7 +155,7 @@ def reset(self): self.current_state = self.init_state() self.all_observes = self.get_all_observes() self.terminate_flg = False - self.img_list = [] + self.episode += 1 # available actions @@ -128,16 +163,15 @@ def reset(self): right_avail_actions = np.ones([self.num_enemys, self.action_dim]) avail_actions = np.concatenate([left_avail_actions, right_avail_actions], 0) # process obs - raw_obs = self.all_observes[0] - obs = self.raw2vec(raw_obs) - share_obs = obs.copy() - info = {"action_mask": avail_actions} - return raw_obs, info # obs:(n_player, 288) - # return self.all_observes + info = {"action_mask": avail_actions} + self.inner_render() + return self.all_observes, info def step(self, joint_action): - info_before = self.step_before_info() + if np.array(joint_action).shape == (2,): + joint_action = convert_to_onehot(joint_action) + joint_action = np.expand_dims(joint_action, 1) all_observes, info_after = self.get_next_state(joint_action) done = self.is_terminal() @@ -149,34 +183,31 @@ def step(self, joint_action): raw_obs = all_observes[0] obs = self.raw2vec(raw_obs) - share_obs = obs.copy() rewards = np.expand_dims(np.array(reward), axis=1) dones = [done] * self.n_player infos = info_after - if self.render: - img = self.render_board() - img_pil = Image.fromarray(img) - - self.img_list.append(img_pil) - - if done and self.episode % self.save_internal == 0 and self.env_id == 0: - self.img_list[0].save( - self.save_path.format(self.episode), - save_all=True, - append_images=self.img_list[1:], - duration=500, - ) - print("save replay gif to" + self.save_path.format(self.episode)) - infos.update({"action_mask": avail_actions}) - return raw_obs, rewards, dones, infos - # return all_observes, reward, done, info_before, info_after + self.inner_render() + return self.all_observes, rewards, dones, infos # obs: 0-空白 1-豆子 2-我方蛇头 3-我方蛇身 4-敌方蛇头 5-敌方蛇身 + def inner_render(self): + if not self.need_render: + return + img = self.render_board() + self.render_img = img + if self.render_mode == "human": + self.ax.imshow(img, cmap="gray") + plt.draw() + plt.pause(0.1) + + def render(self): + return self.render_img + def raw2vec(self, raw_obs): control_index = raw_obs["controlled_snake_index"] width = raw_obs["board_width"] @@ -618,6 +649,10 @@ def parse_extra_info(data): return [i[0] for i in d] + def close(self): + if self.render_mode == "human": + plt.close(self.fig) + class Snake: def __init__(self, player_id, board_width, board_height, init_len): diff --git a/openrl/envs/snake/snake_3v3.py b/openrl/envs/snake/snake_3v3.py index 80364b3d..78d787ef 100644 --- a/openrl/envs/snake/snake_3v3.py +++ b/openrl/envs/snake/snake_3v3.py @@ -1,51 +1,50 @@ # -*- coding:utf-8 -*- -# 作者:zruizhi +# 作者:zruizhi # 创建时间: 2020/7/30 17:24 下午 # 描述: -from .gridgame import GridGame +import copy +import itertools import random +import time from itertools import count + import numpy as np -from PIL import ImageDraw, ImageFont -from .observation import * -from .discrete import Discrete -from .common import Board, HiddenPrints, SnakePos #TODO: Snake类的重名问题 -import itertools from gym import Env, spaces -from PIL import Image - -import time -import copy +from PIL import Image, ImageDraw, ImageFont +from .common import Board, HiddenPrints, SnakePos # TODO: Snake类的重名问题 +from .discrete import Discrete +from .gridgame import GridGame +from .observation import * class SnakeEatBeans(GridGame, GridObservation, DictObservation): def __init__(self, all_args, env_id): self.all_args = all_args conf = { - "class_literal": "SnakeEatBeans", - "n_player": 6, - "board_width": 20, - "board_height": 10, - "channels": 15, - "cell_range": 8, - "n_beans": 5, - "max_step": 200, - "game_name": "snakes", - "is_obs_continuous": False, - "is_act_continuous": False, - "agent_nums": [3,3], - "obs_type": ["dict","dict"], - "save_interval": 100, - "save_path": "../../replay/snake_3v3/replay_{}.gif" + "class_literal": "SnakeEatBeans", + "n_player": 6, + "board_width": 20, + "board_height": 10, + "channels": 15, + "cell_range": 8, + "n_beans": 5, + "max_step": 200, + "game_name": "snakes", + "is_obs_continuous": False, + "is_act_continuous": False, + "agent_nums": [3, 3], + "obs_type": ["dict", "dict"], + "save_interval": 100, + "save_path": "../../replay/snake_3v3/replay_{}.gif", } self.terminate_flg = False - colors = conf.get('colors', [(255, 255, 255), (255, 140, 0)]) + colors = conf.get("colors", [(255, 255, 255), (255, 140, 0)]) super(SnakeEatBeans, self).__init__(conf, colors) # 0: 没有 1:食物 2-n_player+1:各玩家蛇身 self.n_cell_type = self.n_player + 2 self.step_cnt = 1 - self.n_beans = int(conf['n_beans']) + self.n_beans = int(conf["n_beans"]) # 方向[-2,2,-1,1]分别表示[上,下,左,右] self.actions = [-2, 2, -1, 1] self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} @@ -58,7 +57,10 @@ def __init__(self, all_args, env_id): self.current_state = self.init_state() self.all_observes = self.get_all_observes() if self.n_player * self.init_len > self.board_height * self.board_width: - raise Exception("玩家数量过多:%d,超出board范围:%d,%d" % (self.n_player, self.board_width, self.board_height)) + raise Exception( + "玩家数量过多:%d,超出board范围:%d,%d" + % (self.n_player, self.board_width, self.board_height) + ) self.input_dimension = self.board_width * self.board_height self.action_dim = self.get_action_dim() @@ -67,10 +69,23 @@ def __init__(self, all_args, env_id): self.num_agents = conf["agent_nums"][0] self.num_enemys = conf["agent_nums"][1] - self.observation_space = [spaces.Box(low=-np.inf, high=-np.inf, shape=(self.channels, self.board_width, self.board_height), dtype=np.float32)] + self.observation_space = [ + spaces.Box( + low=-np.inf, + high=-np.inf, + shape=(self.channels, self.board_width, self.board_height), + dtype=np.float32, + ) + ] self.share_observation_space = [] - self.share_observation_space = [spaces.Box( - low=-np.inf, high=+np.inf, shape=(self.channels, self.board_width, self.board_height), dtype=np.float32)] + self.share_observation_space = [ + spaces.Box( + low=-np.inf, + high=+np.inf, + shape=(self.channels, self.board_width, self.board_height), + dtype=np.float32, + ) + ] self.action_space = [Discrete(4) for _ in range(self.n_player)] self.save_interval = conf["save_interval"] self.save_path = conf["save_path"] @@ -98,10 +113,12 @@ def get_dict_observation(self, current_state, player_id, info_before): snake = self.players[i] key_info[snake.player_id] = snake.segments # key_info['state_map'] = current_state - key_info['board_width'] = self.board_width - key_info['board_height'] = self.board_height - key_info['last_direction'] = info_before.get('directions') if isinstance(info_before, dict) else None - key_info['controlled_snake_index'] = player_id + key_info["board_width"] = self.board_width + key_info["board_height"] = self.board_height + key_info["last_direction"] = ( + info_before.get("directions") if isinstance(info_before, dict) else None + ) + key_info["controlled_snake_index"] = player_id return key_info @@ -111,7 +128,9 @@ def set_action_space(self): def reset(self): self.step_cnt = 1 - self.snakes_position = {} # 格式类似于{1: [[3, 1], [4, 3], [1, 2], [0, 6], [3, 3]], 2: [[3, 0], [3, 7], [3, 6]], 3: [[2, 7], [1, 7], [0, 7]]} + self.snakes_position = ( + {} + ) # 格式类似于{1: [[3, 1], [4, 3], [1, 2], [0, 6], [3, 3]], 2: [[3, 0], [3, 7], [3, 6]], 3: [[2, 7], [1, 7], [0, 7]]} self.players = [] self.cur_bean_num = 0 self.beans_position = [] @@ -129,7 +148,7 @@ def reset(self): board = [] for i in range(self.n_player): board.append([self.get_board(self.all_observes[i])]) - + board_ = np.concatenate(board) obs = [] for raw_obs in self.all_observes: @@ -137,9 +156,9 @@ def reset(self): obs_ = np.concatenate(obs) obs_ = np.concatenate((obs_, board_), axis=1) - share_obs = np.repeat(np.expand_dims(obs_[0], axis = 0), 6, 0) + share_obs = np.repeat(np.expand_dims(obs_[0], axis=0), 6, 0) - return obs_, share_obs, avail_actions #obs:(n_player, 288) + return obs_, share_obs, avail_actions # obs:(n_player, 288) # return self.all_observes @@ -147,7 +166,7 @@ def step(self, joint_action): info_before = self.step_before_info() joint_action = np.expand_dims(joint_action, 1) all_observes, info_after = self.get_next_state(joint_action) - done = self.is_terminal() + done = self.is_terminal() reward = self.get_reward(joint_action) left_avail_actions = np.ones([self.num_agents, self.action_dim]) right_avail_actions = np.ones([self.num_enemys, self.action_dim]) @@ -156,36 +175,39 @@ def step(self, joint_action): board = [] for i in range(self.n_player): board.append([self.get_board(all_observes[i])]) - + board_ = np.concatenate(board) obs = [] - + for raw_obs in all_observes: - obs.append([self.raw2vec(raw_obs)]) # obs:[[(14, 20, 10)], [], ..., []] + obs.append([self.raw2vec(raw_obs)]) # obs:[[(14, 20, 10)], [], ..., []] - obs_ = np.concatenate(obs) #(n_player, channels, width, height) + obs_ = np.concatenate(obs) # (n_player, channels, width, height) obs_ = np.concatenate((obs_, board_), axis=1) - share_obs = np.repeat(np.expand_dims(obs_[0], axis = 0), 6, 0) - + share_obs = np.repeat(np.expand_dims(obs_[0], axis=0), 6, 0) + if done: reward = self.get_final_reward(reward) - + rewards = np.expand_dims(np.array(reward), axis=1) - + dones = [done] * self.n_player infos = [info_after] * self.n_player if self.render and self.episode % self.save_interval == 0 and self.env_id == 0: - img = self.render_board() img_pil = Image.fromarray(img) self.img_list.append(img_pil) if done: - self.img_list[0].save(self.save_path.format(self.episode), - save_all = True, append_images = self.img_list[1:], duration = 400) + self.img_list[0].save( + self.save_path.format(self.episode), + save_all=True, + append_images=self.img_list[1:], + duration=400, + ) print("save replay gif to" + self.save_path.format(self.episode)) return obs_, share_obs, rewards, dones, infos, avail_actions @@ -193,24 +215,24 @@ def step(self, joint_action): # obs: 0 空白 1 豆子 2 我方蛇头 3 我方蛇身 4-5 友方蛇头 6-7 友方蛇身 8-10 敌方蛇头 11-13 敌方蛇身 def raw2vec(self, raw_obs): - control_index = raw_obs['controlled_snake_index'] - width = raw_obs['board_width'] - height = raw_obs['board_height'] + control_index = raw_obs["controlled_snake_index"] + width = raw_obs["board_width"] + height = raw_obs["board_height"] beans = raw_obs[1] pos = raw_obs[control_index] - obs = np.zeros(width * height, dtype = int) + obs = np.zeros(width * height, dtype=int) head_h, head_w = pos[0] obs[head_h * width + head_w] = 2 for bean in beans: h, w = bean obs[h * width + w] = 1 - + for p in pos[1:]: h, w = p obs[h * width + w] = 3 - + if control_index == 2: h1, w1 = raw_obs[3][0] h2, w2 = raw_obs[4][0] @@ -314,10 +336,14 @@ def raw2vec(self, raw_obs): h, w = p obs[h * width + w] = i + 9 - obs_ = np.zeros(width * height * (self.channels - 1), dtype = int) + obs_ = np.zeros(width * height * (self.channels - 1), dtype=int) for i in range(width * height): - obs_[i * (self.channels - 1) + obs[i]] = 1 # channels的最后一维是territory matrix, 此处不生成, 要减去 - obs_ = obs_.reshape(height, width, (self.channels - 1)) # (height, width, channels-1 ) + obs_[i * (self.channels - 1) + obs[i]] = ( + 1 # channels的最后一维是territory matrix, 此处不生成, 要减去 + ) + obs_ = obs_.reshape( + height, width, (self.channels - 1) + ) # (height, width, channels-1 ) obs_ = obs_.transpose((2, 1, 0)) return obs_ @@ -331,28 +357,31 @@ def get_board(self, observation_list): # read observation obs = observation_list.copy() - board_height = obs['board_height'] # 10 - board_width = obs['board_width'] # 20 + board_height = obs["board_height"] # 10 + board_width = obs["board_width"] # 20 # print("obs['controlled_snake_index'] is ", obs['controlled_snake_index']) - ctrl_agent_index = obs['controlled_snake_index'] - 2 # 0, 1, 2, 3, 4, 5 + ctrl_agent_index = obs["controlled_snake_index"] - 2 # 0, 1, 2, 3, 4, 5 # last_directions = obs['last_direction'] # ['up', 'left', 'down', 'left', 'left', 'left'] beans_positions = obs[1] # e.g.[[7, 15], [4, 14], [5, 12], [4, 12], [5, 7]] - snakes = {key - 2: SnakePos(obs[key], board_height, board_width, beans_positions) - for key in obs.keys() & {_ + 2 for _ in range(snakes_count)}} # &: intersection + snakes = { + key - 2: SnakePos(obs[key], board_height, board_width, beans_positions) + for key in obs.keys() & {_ + 2 for _ in range(snakes_count)} + } # &: intersection team_indexes = [_ for _ in teams if ctrl_agent_index in _][0] init_board = Board(board_height, board_width, snakes, beans_positions, teams) bd = copy.deepcopy(init_board) with HiddenPrints(): - while not all(_ == [] for _ in bd.open.values()): # loop until all values in open are empty list + while not all( + _ == [] for _ in bd.open.values() + ): # loop until all values in open are empty list bd.step() board = np.array(bd.board).transpose() board = np.expand_dims(board, axis=0) return board - def init_state(self): for i in range(self.n_player): s = Snake(i + 2, self.board_width, self.board_height, self.init_len) @@ -363,7 +392,9 @@ def init_state(self): else: origin_hit = 0 cur_head = s.move_and_add(self.snakes_position) - cur_hit = self.is_hit(cur_head, self.snakes_position) or self.is_hit(cur_head, {i:s.segments[1:]}) + cur_hit = self.is_hit(cur_head, self.snakes_position) or self.is_hit( + cur_head, {i: s.segments[1:]} + ) if origin_hit or cur_hit: x = random.randrange(0, self.board_height) y = random.randrange(0, self.board_width) @@ -378,8 +409,14 @@ def init_state(self): self.generate_beans() self.init_info = { - "snakes_position": [list(v) for k, v in sorted(self.snakes_position.items(), key=lambda item: item[0])], - "beans_position": list(self.beans_position)} + "snakes_position": [ + list(v) + for k, v in sorted( + self.snakes_position.items(), key=lambda item: item[0] + ) + ], + "beans_position": list(self.beans_position), + } directs = [] for i in range(len(self.players)): s = self.players[i] @@ -389,7 +426,10 @@ def init_state(self): return self.update_state() def update_state(self): - next_state = [[[0] * self.cell_dim for _ in range(self.board_width)] for _ in range(self.board_height)] + next_state = [ + [[0] * self.cell_dim for _ in range(self.board_width)] + for _ in range(self.board_height) + ] for i in range(self.n_player): snake = self.players[i] for pos in snake.segments: @@ -400,7 +440,7 @@ def update_state(self): return next_state - def step_before_info(self, info=''): + def step_before_info(self, info=""): directs = [] for i in range(len(self.players)): s = self.players[i] @@ -423,7 +463,9 @@ def is_hit(self, cur_head, snakes_position): return is_hit def generate_beans(self): - all_valid_positions = set(itertools.product(range(0, self.board_height), range(0, self.board_width))) + all_valid_positions = set( + itertools.product(range(0, self.board_height), range(0, self.board_width)) + ) all_valid_positions = all_valid_positions - set(map(tuple, self.beans_position)) for positions in self.snakes_position.values(): all_valid_positions = all_valid_positions - set(map(tuple, positions)) @@ -432,10 +474,16 @@ def generate_beans(self): all_valid_positions = np.array(list(all_valid_positions)) left_valid_positions = len(all_valid_positions) - new_bean_num = left_bean_num if left_valid_positions > left_bean_num else left_valid_positions + new_bean_num = ( + left_bean_num + if left_valid_positions > left_bean_num + else left_valid_positions + ) if left_valid_positions > 0: - new_bean_positions_idx = np.random.choice(left_valid_positions, size=new_bean_num, replace=False) + new_bean_positions_idx = np.random.choice( + left_valid_positions, size=new_bean_num, replace=False + ) new_bean_positions = all_valid_positions[new_bean_positions_idx] else: new_bean_positions = [] @@ -444,10 +492,10 @@ def generate_beans(self): self.beans_position.append(list(new_bean_pos)) self.cur_bean_num += 1 - def get_all_observes(self, before_info=''): + def get_all_observes(self, before_info=""): self.all_observes = [] for i in range(self.n_player): - each_obs = self.get_dict_observation(self.current_state, i+2, before_info) + each_obs = self.get_dict_observation(self.current_state, i + 2, before_info) self.all_observes.append(each_obs) return self.all_observes @@ -461,12 +509,12 @@ def get_next_state(self, all_action): eat_snakes = [0] * self.n_player ally_reward = 0 enemy_reward = 0 - for i in range(self.n_player): # 判断是否吃到了豆子 + for i in range(self.n_player): # 判断是否吃到了豆子 snake = self.players[i] act = self.actions[np.argmax(all_action[i][0])] # print(snake.player_id, "此轮的动作为:", self.actions_name[act]) snake.change_direction(act) - snake.move_and_add(self.snakes_position) # 更新snake.segment + snake.move_and_add(self.snakes_position) # 更新snake.segment if self.be_eaten(snake.headPos): # @yanxue snake.snake_reward = 1 eat_snakes[i] = 1 @@ -476,7 +524,7 @@ def get_next_state(self, all_action): # print(snake.player_id, snake.segments) # @yanxue snake_position = [[-1] * self.board_width for _ in range(self.board_height)] re_generatelist = [0] * self.n_player - for i in range(self.n_player): #判断是否相撞 + for i in range(self.n_player): # 判断是否相撞 snake = self.players[i] segment = snake.segments for j in range(len(segment)): @@ -494,7 +542,9 @@ def get_next_state(self, all_action): snake = self.players[i] if re_generatelist[i] == 1: if eat_snakes[i] == 1: - snake.snake_reward = self.init_len - len(snake.segments) + 1 #身体越长,惩罚越大 + snake.snake_reward = ( + self.init_len - len(snake.segments) + 1 + ) # 身体越长,惩罚越大 else: snake.snake_reward = self.init_len - len(snake.segments) snake.segments = [] @@ -505,9 +555,13 @@ def get_next_state(self, all_action): enemy_reward += self.players[i + self.num_agents].snake_reward alpha = 0.8 for i in range(self.num_agents): - self.players[i].snake_reward = (self.players[i].snake_reward - enemy_reward / 3) * alpha + ally_reward / 3 * (1 - alpha) - for i in range(self.num_agents,self.n_player): - self.players[i].snake_reward = (self.players[i].snake_reward - ally_reward / 3) * alpha + enemy_reward / 3 * (1 - alpha) + self.players[i].snake_reward = ( + self.players[i].snake_reward - enemy_reward / 3 + ) * alpha + ally_reward / 3 * (1 - alpha) + for i in range(self.num_agents, self.n_player): + self.players[i].snake_reward = ( + self.players[i].snake_reward - ally_reward / 3 + ) * alpha + enemy_reward / 3 * (1 - alpha) for i in range(self.n_player): snake = self.players[i] @@ -529,7 +583,12 @@ def get_next_state(self, all_action): s = self.players[i] self.won[i] = s.score info_after = {} - info_after["snakes_position"] = [list(v) for k, v in sorted(self.snakes_position.items(), key=lambda item: item[0])] + info_after["snakes_position"] = [ + list(v) + for k, v in sorted( + self.snakes_position.items(), key=lambda item: item[0] + ) + ] info_after["beans_position"] = list(self.beans_position) info_after["hit"] = re_generatelist info_after["score"] = self.won @@ -568,7 +627,10 @@ def can_regenerate(): if len(seg) < 3: snake.direction = random.choice(self.actions) elif len(seg) == 3: - mid = ([seg[1][0], seg[2][1]], [seg[2][0], seg[1][1]]) + mid = ( + [seg[1][0], seg[2][1]], + [seg[2][0], seg[1][1]], + ) if seg[0] in mid: seg[0], seg[1] = seg[1], seg[0] snake.segments = seg @@ -641,7 +703,10 @@ def is_terminal(self): # all_member = len(self.beans_position) for s in self.players: all_member += len(s.segments) - is_done = self.step_cnt > self.max_step or all_member > self.board_height * self.board_width + is_done = ( + self.step_cnt > self.max_step + or all_member > self.board_height * self.board_width + ) return is_done or self.terminate_flg @@ -676,8 +741,8 @@ def get_action_dim(self): def draw_board(self): cols = [chr(i) for i in range(65, 65 + self.board_width)] - s = ', '.join(cols) - print(' ', s) + s = ", ".join(cols) + print(" ", s) for i in range(self.board_height): # print(i) print(chr(i + 65), self.current_state[i]) @@ -690,18 +755,27 @@ def _render_board(state, board, colors, unit, fix, extra_info): fnt = ImageFont.load_default() for i, pos in zip(count(1), extra_info): x, y = pos - draw.text(((y + 1 / 4) * unit, (x + 1 / 4) * unit), - "#{}".format(i), - font=fnt, - fill=(0, 0, 0)) + draw.text( + ((y + 1 / 4) * unit, (x + 1 / 4) * unit), + "#{}".format(i), + font=fnt, + fill=(0, 0, 0), + ) return im def render_board(self): extra_info = [tuple(x.headPos) for x in self.players] im_data = np.array( - SnakeEatBeans._render_board(self.get_render_data(self.current_state), self.grid, self.colors, self.grid_unit, self.grid_unit_fix, - extra_info)) + SnakeEatBeans._render_board( + self.get_render_data(self.current_state), + self.grid, + self.colors, + self.grid_unit, + self.grid_unit_fix, + extra_info, + ) + ) return im_data @staticmethod @@ -709,14 +783,14 @@ def parse_extra_info(data): # return eval(re.search(r'({.*})', data['info_after']).group(1)).values() # d = (eval(eval(data)['snakes_position']).values()) if isinstance(data, str): - d = eval(data)['snakes_position'] + d = eval(data)["snakes_position"] else: - d = data['snakes_position'] + d = data["snakes_position"] return [i[0] for i in d] -class Snake(): +class Snake: def __init__(self, player_id, board_width, board_height, init_len): self.actions = [-2, 2, -1, 1] self.actions_name = {-2: "up", 2: "down", -1: "left", 1: "right"} @@ -777,4 +851,4 @@ def move_and_add(self, snakes_position): return cur_head def pop(self): - self.segments.pop() # 在蛇尾减去一格 \ No newline at end of file + self.segments.pop() # 在蛇尾减去一格 diff --git a/openrl/envs/snake/snake_pettingzoo.py b/openrl/envs/snake/snake_pettingzoo.py new file mode 100644 index 00000000..a9c18c76 --- /dev/null +++ b/openrl/envs/snake/snake_pettingzoo.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2023 The OpenRL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""""" +import functools +from copy import deepcopy +from typing import Optional + +import numpy as np +from gymnasium import spaces +from pettingzoo import AECEnv +from pettingzoo.utils import agent_selector + +from openrl.envs.snake.snake import SnakeEatBeans + +NONE = 4 + + +class SnakeEatBeansAECEnv(AECEnv): + metadata = {"render.modes": ["human"], "name": "SnakeEatBeans"} + + def __init__(self, render_mode: Optional[str] = None): + self.env = SnakeEatBeans(render_mode) + + self.agent_name_mapping = dict( + zip(self.possible_agents, list(range(len(self.possible_agents)))) + ) + self._action_spaces = { + agent: spaces.Discrete(4) for agent in self.possible_agents + } + self._observation_spaces = { + agent: spaces.Box(low=-np.inf, high=np.inf, shape=(288,), dtype=np.float32) + for agent in self.possible_agents + } + + self.agents = self.possible_agents[:] + + self.observations = {agent: NONE for agent in self.agents} + self.raw_obs, self.raw_reward, self.raw_done, self.raw_info = ( + None, + None, + None, + None, + ) + + @functools.lru_cache(maxsize=None) + def observation_space(self, agent): + return deepcopy(self._observation_spaces[agent]) + + @functools.lru_cache(maxsize=None) + def action_space(self, agent): + return deepcopy(self._action_spaces[agent]) + + def observe(self, agent): + return self.raw_obs[self.agent_name_mapping[agent]] + + def reset( + self, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + if seed is not None: + self.env.seed(seed) + self.agents = self.possible_agents[:] + self.rewards = {agent: 0 for agent in self.agents} + self._cumulative_rewards = {agent: 0 for agent in self.agents} + self.terminations = {agent: False for agent in self.agents} + self.truncations = {agent: False for agent in self.agents} + self.infos = {agent: {} for agent in self.agents} + self.state = {agent: NONE for agent in self.agents} + self.observations = {agent: NONE for agent in self.agents} + + self.raw_obs, self.raw_info = self.env.reset() + + self._agent_selector = agent_selector(self.agents) + self.agent_selection = self._agent_selector.next() + + def step(self, action): + agent = self.agent_selection + self._cumulative_rewards[agent] = 0 + self.state[self.agent_selection] = action + if self._agent_selector.is_last(): + joint_action = [self.state[agent] for agent in self.agents] + self.raw_obs, self.raw_reward, self.raw_done, self.raw_info = self.env.step( + joint_action + ) + + self.rewards = { + agent: self.raw_reward[i] for i, agent in enumerate(self.agents) + } + + if np.any(self.raw_done): + for key in self.terminations: + self.terminations[key] = True + else: + self.state[self.agents[1 - self.agent_name_mapping[agent]]] = NONE + self._clear_rewards() + + # selects the next agent. + self.agent_selection = self._agent_selector.next() + self._accumulate_rewards() + + def render(self): + img = self.env.render() + return img + + def close(self): + self.env.close() + + @property + def possible_agents(self): + return ["player_" + str(i) for i in range(self.env.n_player)] + + @property + def num_agents(self): + return len(self.possible_agents) diff --git a/openrl/envs/snake/space.py b/openrl/envs/snake/space.py index 918dcf2e..672e2367 100644 --- a/openrl/envs/snake/space.py +++ b/openrl/envs/snake/space.py @@ -14,8 +14,10 @@ class Space(object): Moreover, some implementations of Reinforcement Learning algorithms might not handle custom spaces properly. Use custom spaces with care. """ + def __init__(self, shape=None, dtype=None): import numpy as np # takes about 300-400ms to import, so we load lazily + self.shape = None if shape is None else tuple(shape) self.dtype = None if dtype is None else np.dtype(dtype) self._np_random = None @@ -31,12 +33,12 @@ def np_random(self): return self._np_random def sample(self): - """Randomly sample an element of this space. Can be + """Randomly sample an element of this space. Can be uniform or non-uniform sampling based on boundedness of space.""" raise NotImplementedError def seed(self, seed=None): - """Seed the PRNG of this space. """ + """Seed the PRNG of this space.""" self._np_random, seed = seeding.np_random(seed) return [seed] @@ -58,4 +60,4 @@ def to_jsonable(self, sample_n): def from_jsonable(self, sample_n): """Convert a JSONable data type to a batch of samples from this space.""" # By default, assume identity is JSONable - return \ No newline at end of file + return diff --git a/openrl/envs/vec_env/async_venv.py b/openrl/envs/vec_env/async_venv.py index 1ca95674..7c620aee 100644 --- a/openrl/envs/vec_env/async_venv.py +++ b/openrl/envs/vec_env/async_venv.py @@ -233,8 +233,10 @@ def reset_send( if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( - "Calling `reset_send` while waiting for a pending call to" - f" `{self._state.value}` to complete", + ( + "Calling `reset_send` while waiting for a pending call to" + f" `{self._state.value}` to complete" + ), self._state.value, ) @@ -326,8 +328,10 @@ def step_send(self, actions: np.ndarray): self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( - "Calling `step_send` while waiting for a pending call to" - f" `{self._state.value}` to complete.", + ( + "Calling `step_send` while waiting for a pending call to" + f" `{self._state.value}` to complete." + ), self._state.value, ) @@ -571,8 +575,10 @@ def call_send(self, name: str, *args, **kwargs): self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( - "Calling `call_send` while waiting " - f"for a pending call to `{self._state.value}` to complete.", + ( + "Calling `call_send` while waiting " + f"for a pending call to `{self._state.value}` to complete." + ), str(self._state.value), ) @@ -629,8 +635,10 @@ def exec_func_send(self, func: Callable, indices, *args, **kwargs): self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( - "Calling `exec_func_send` while waiting " - f"for a pending call to `{self._state.value}` to complete.", + ( + "Calling `exec_func_send` while waiting " + f"for a pending call to `{self._state.value}` to complete." + ), str(self._state.value), ) @@ -707,8 +715,10 @@ def set_attr(self, name: str, values: Union[List[Any], Tuple[Any], object]): if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( - "Calling `set_attr` while waiting " - f"for a pending call to `{self._state.value}` to complete.", + ( + "Calling `set_attr` while waiting " + f"for a pending call to `{self._state.value}` to complete." + ), str(self._state.value), ) diff --git a/openrl/envs/wrappers/pettingzoo_wrappers.py b/openrl/envs/wrappers/pettingzoo_wrappers.py index 687384a4..647c13be 100644 --- a/openrl/envs/wrappers/pettingzoo_wrappers.py +++ b/openrl/envs/wrappers/pettingzoo_wrappers.py @@ -24,11 +24,12 @@ class SeedEnv(BaseWrapper): def reset(self, seed: Optional[int] = None, options: Optional[dict] = None): super().reset(seed=seed, options=options) - - for i, space in enumerate( - list(self.action_spaces.values()) + list(self.observation_spaces.values()) - ): - space.seed(seed + i * 7891) + if seed is not None: + for i, space in enumerate( + list(self.action_spaces.values()) + + list(self.observation_spaces.values()) + ): + space.seed(seed + i * 7891) class RecordWinner(BaseWrapper): diff --git a/openrl/envs/wrappers/util.py b/openrl/envs/wrappers/util.py index a7bf6379..a0a97576 100644 --- a/openrl/envs/wrappers/util.py +++ b/openrl/envs/wrappers/util.py @@ -38,6 +38,8 @@ def nest_expand_dim(input: Any) -> Any: return [input] elif isinstance(input, np.int64): return [input] + elif input is None: + return [input] else: raise NotImplementedError("Not support type: {}".format(type(input))) diff --git a/openrl/modules/networks/utils/nlp/hf_generation_utils.py b/openrl/modules/networks/utils/nlp/hf_generation_utils.py index 8a44d8c7..37d80875 100644 --- a/openrl/modules/networks/utils/nlp/hf_generation_utils.py +++ b/openrl/modules/networks/utils/nlp/hf_generation_utils.py @@ -1359,9 +1359,11 @@ def generate( elif max_length is not None and max_new_tokens is not None: # Both are set, this is odd, raise a warning warnings.warn( - "Both `max_length` and `max_new_tokens` have been set " - f"but they serve the same purpose. `max_length` {max_length} " - f"will take priority over `max_new_tokens` {max_new_tokens}.", + ( + "Both `max_length` and `max_new_tokens` have been set " + f"but they serve the same purpose. `max_length` {max_length} " + f"will take priority over `max_new_tokens` {max_new_tokens}." + ), UserWarning, ) # default to config if still None @@ -1847,9 +1849,11 @@ def greedy_search( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -2147,9 +2151,11 @@ def sample( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -2453,9 +2459,11 @@ def beam_search( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -2463,8 +2471,10 @@ def beam_search( ) if len(stopping_criteria) == 0: warnings.warn( - "You don't have defined any stopping_criteria, this will likely" - " loop forever", + ( + "You don't have defined any stopping_criteria, this will likely" + " loop forever" + ), UserWarning, ) pad_token_id = ( @@ -2857,9 +2867,11 @@ def beam_sample( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -3240,9 +3252,11 @@ def group_beam_search( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -3686,9 +3700,11 @@ def constrained_beam_search( ) if max_length is not None: warnings.warn( - "`max_length` is deprecated in this function, use" - " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" - " instead.", + ( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))`" + " instead." + ), UserWarning, ) stopping_criteria = validate_stopping_criteria( @@ -3696,8 +3712,10 @@ def constrained_beam_search( ) if len(stopping_criteria) == 0: warnings.warn( - "You don't have defined any stopping_criteria, this will likely" - " loop forever", + ( + "You don't have defined any stopping_criteria, this will likely" + " loop forever" + ), UserWarning, ) pad_token_id = ( diff --git a/openrl/selfplay/wrappers/base_multiplayer_wrapper.py b/openrl/selfplay/wrappers/base_multiplayer_wrapper.py index cc23c1ff..38050cc7 100644 --- a/openrl/selfplay/wrappers/base_multiplayer_wrapper.py +++ b/openrl/selfplay/wrappers/base_multiplayer_wrapper.py @@ -58,7 +58,7 @@ def action_space( if self.self_player is None: self.env.reset() self.self_player = self.np_random.choice(self.env.agents) - return self.env.action_spaces[self.self_player] + return self.env.action_space(self.self_player) return self._action_space @property @@ -70,7 +70,9 @@ def observation_space( if self.self_player is None: self.env.reset() self.self_player = self.np_random.choice(self.env.agents) + return self.env.observation_spaces[self.self_player] + return self._observation_space @abstractmethod diff --git a/openrl/selfplay/wrappers/random_opponent_wrapper.py b/openrl/selfplay/wrappers/random_opponent_wrapper.py index 96d562e0..e429b605 100644 --- a/openrl/selfplay/wrappers/random_opponent_wrapper.py +++ b/openrl/selfplay/wrappers/random_opponent_wrapper.py @@ -29,6 +29,8 @@ class RandomOpponentWrapper(BaseMultiPlayerWrapper): def get_opponent_action( self, player_name, observation, reward, termination, truncation, info ): - mask = observation["action_mask"] + mask = None + if "action_mask" in observation: + mask = observation["action_mask"] action = self.env.action_space(player_name).sample(mask) return action diff --git a/openrl/utils/callbacks/checkpoint_callback.py b/openrl/utils/callbacks/checkpoint_callback.py index 56bf31b8..a4b3f5b6 100644 --- a/openrl/utils/callbacks/checkpoint_callback.py +++ b/openrl/utils/callbacks/checkpoint_callback.py @@ -72,7 +72,9 @@ def _checkpoint_path(self, checkpoint_type: str = "", extension: str = "") -> st """ return os.path.join( self.save_path, - f"{self.name_prefix}_{checkpoint_type}{self.num_time_steps}_steps{'.' if extension else ''}{extension}", + ( + f"{self.name_prefix}_{checkpoint_type}{self.num_time_steps}_steps{'.' if extension else ''}{extension}" + ), ) def _on_step(self) -> bool: diff --git a/openrl/utils/evaluation.py b/openrl/utils/evaluation.py index 391ba10f..d603daa5 100644 --- a/openrl/utils/evaluation.py +++ b/openrl/utils/evaluation.py @@ -68,10 +68,12 @@ def evaluate_policy( if not is_monitor_wrapped and warn: warnings.warn( - "Evaluation environment is not wrapped with a ``Monitor`` wrapper. This" - " may result in reporting modified episode lengths and rewards, if" - " other wrappers happen to modify these. Consider wrapping environment" - " first with ``Monitor`` wrapper.", + ( + "Evaluation environment is not wrapped with a ``Monitor`` wrapper. This" + " may result in reporting modified episode lengths and rewards, if" + " other wrappers happen to modify these. Consider wrapping environment" + " first with ``Monitor`` wrapper." + ), UserWarning, )