From af26b5dca9d390214c2eb88fde3ac6a86aca87ce Mon Sep 17 00:00:00 2001 From: Byung Suk Yoo Date: Wed, 28 Nov 2018 14:18:00 -0800 Subject: [PATCH 001/443] Merging initial changes to following files to support multihead Siamese model. --- include/lbann/data_readers/CMakeLists.txt | 2 + .../data_readers/offline_patches_npz.hpp | 7 +++ include/lbann/lbann.hpp | 1 + src/data_readers/CMakeLists.txt | 1 + src/data_readers/offline_patches_npz.cpp | 58 ++++++++++++++++++- src/data_store/CMakeLists.txt | 1 + src/proto/init_image_data_readers.cpp | 16 ++++- src/proto/lbann.proto | 4 ++ src/proto/proto_common.cpp | 5 +- 9 files changed, 90 insertions(+), 5 deletions(-) diff --git a/include/lbann/data_readers/CMakeLists.txt b/include/lbann/data_readers/CMakeLists.txt index 425f7e3672a..c66f49b6763 100644 --- a/include/lbann/data_readers/CMakeLists.txt +++ b/include/lbann/data_readers/CMakeLists.txt @@ -31,6 +31,8 @@ set_full_path(THIS_DIR_HEADERS lbann_data_generator.hpp opencv.hpp opencv_extensions.hpp + data_reader_triplet.hpp + data_reader_quadruplet.hpp ) # Add the subdirectories diff --git a/include/lbann/data_readers/offline_patches_npz.hpp b/include/lbann/data_readers/offline_patches_npz.hpp index e3c60bfcc79..6d4c9e9065d 100644 --- a/include/lbann/data_readers/offline_patches_npz.hpp +++ b/include/lbann/data_readers/offline_patches_npz.hpp @@ -57,6 +57,9 @@ class offline_patches_npz { using sample_t = std::pair, label_t>; offline_patches_npz(); + offline_patches_npz(size_t npatches); + offline_patches_npz(std::string divider); + offline_patches_npz(size_t npatches, std::string divider); // TODO: copy constructor and assignment operator for deep-copying if needed // The cnpy structure relies on shared_ptr @@ -80,6 +83,10 @@ class offline_patches_npz { size_t get_num_patches() const { return m_num_patches; } + /// Set the number of patches per sample (the number of image data sources) + void set_num_patches(size_t npatches) { + m_num_patches = npatches; + } /// Reconsturct and return the meta-data (patch file names and the label) of idx-th sample sample_t get_sample(const size_t idx) const; /// Return the label of idx-th sample diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index ffd7be7768a..abfe8538517 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -122,6 +122,7 @@ #include "lbann/data_readers/data_reader_multi_images.hpp" #include "lbann/data_readers/data_reader_mnist_siamese.hpp" #include "lbann/data_readers/data_reader_triplet.hpp" +#include "lbann/data_readers/data_reader_quadruplet.hpp" #include "lbann/data_readers/data_reader_synthetic.hpp" #include "lbann/data_readers/data_reader_jag.hpp" #include "lbann/data_readers/data_reader_jag_conduit.hpp" diff --git a/src/data_readers/CMakeLists.txt b/src/data_readers/CMakeLists.txt index 5d9d67f0395..151165091cf 100644 --- a/src/data_readers/CMakeLists.txt +++ b/src/data_readers/CMakeLists.txt @@ -34,6 +34,7 @@ set_full_path(THIS_DIR_SOURCES data_reader_multi_images.cpp data_reader_mnist_siamese.cpp data_reader_triplet.cpp + data_reader_quadruplet.cpp offline_patches_npz.cpp image_preprocessor.cpp image_utils.cpp diff --git a/src/data_readers/offline_patches_npz.cpp b/src/data_readers/offline_patches_npz.cpp index 42d4c03f023..ce8c702d0ef 100644 --- a/src/data_readers/offline_patches_npz.cpp +++ b/src/data_readers/offline_patches_npz.cpp @@ -31,14 +31,36 @@ #include #include +#include + namespace lbann { +offline_patches_npz::offline_patches_npz(size_t npatches, std::string divider) + : m_checked_ok(false), m_lbann_format(false) +{ + m_num_patches = npatches; + m_variant_divider = divider; +} + +offline_patches_npz::offline_patches_npz(size_t npatches) + : m_checked_ok(false), m_lbann_format(false) +{ + m_num_patches = npatches; + m_variant_divider = ".JPEG."; +} + +offline_patches_npz::offline_patches_npz(std::string divider) + : m_checked_ok(false), m_lbann_format(false) +{ + m_num_patches = 3u; + m_variant_divider = divider; +} + offline_patches_npz::offline_patches_npz() : m_checked_ok(false), m_num_patches(3u), m_variant_divider(".JPEG."), m_lbann_format(false) {} - bool offline_patches_npz::load(const std::string filename, size_t first_n, bool keep_file_lists) { m_item_class_list.clear(); @@ -88,6 +110,8 @@ bool offline_patches_npz::load(const std::string filename, size_t first_n, { // load the label array into a vector of label_t (uint8_t) cnpy::NpyArray d_item_class_list = dataset["item_class_list"]; m_checked_ok = (d_item_class_list.shape.size() == 1u); + +std::cout << "d_item_class_list.shape.size()= " << d_item_class_list.shape.size() << " m_checked_ok 1= " << m_checked_ok << "\n"; if (m_checked_ok) { // In case of shrinking to first_n, make sure the size is consistent const size_t num_samples = m_item_root_list.shape[0]; @@ -111,6 +135,7 @@ bool offline_patches_npz::load(const std::string filename, size_t first_n, m_checked_ok = m_checked_ok && ( (d_file_root_list.shape.size() == 1u) || ((d_file_root_list.shape.size() == 2u) && m_lbann_format)); +std::cout << "m_checked_ok 2= " << m_checked_ok << "\n"; if (m_checked_ok) { const size_t num_roots = d_file_root_list.shape[0]; m_file_root_list.resize(num_roots); @@ -137,6 +162,7 @@ bool offline_patches_npz::load(const std::string filename, size_t first_n, m_checked_ok = m_checked_ok && ( (d_file_variant_list.shape.size() == 1u) || ((d_file_variant_list.shape.size() == 2u) && m_lbann_format)); +std::cout << "m_checked_ok 3= " << m_checked_ok << "\n"; if (m_checked_ok) { const size_t num_variants = d_file_variant_list.shape[0]; m_file_variant_list.resize(num_variants); @@ -159,6 +185,8 @@ bool offline_patches_npz::load(const std::string filename, size_t first_n, //for (const auto& fl: m_file_variant_list) std::cout << fl << std::endl; m_checked_ok = m_checked_ok && check_data(); +std::cout << "m_checked_ok 4= " << m_checked_ok << "\n"; +std::cout << "check_data()= " << check_data() << "\n"; if (!m_checked_ok) { //std::cout << get_description(); @@ -184,6 +212,20 @@ bool offline_patches_npz::check_data() const { (m_item_variant_list.shape[2] > 0u) && (m_item_root_list.word_size == sizeof(size_t)) && (m_item_variant_list.word_size == sizeof(size_t)); + +std::cout << "m_num_patches= " << m_num_patches << "\n"; +std::cout << "m_item_root_list.shape.size()= " << m_item_root_list.shape.size() <<" should be 2u\n"; +std::cout << "m_item_variant_list.shape.size() = " << m_item_variant_list.shape.size() <<" should be 3u\n"; +std::cout << "m_file_root_list.size() = " << m_file_root_list.size() << " s/b >0u \n"; +std::cout << "m_file_variant_list.size() = " << m_file_variant_list.size() << " s/b >0u \n"; +std::cout << "m_item_root_list.shape[0] = " << m_item_root_list.shape[0] << " s/b " << m_item_class_list.size() << "\n"; +std::cout << "m_item_variant_list.shape[0] = " << m_item_variant_list.shape[0] << " s/b " << m_item_class_list.size() << "\n"; +std::cout << "m_item_root_list.shape[1] = " << m_item_root_list.shape[1] << " s/b " << m_num_patches << "\n"; +std::cout << "m_item_variant_list.shape[1] = " << m_item_variant_list.shape[1] << " s/b " << m_num_patches << "\n"; +std::cout << "m_item_variant_list.shape[2] = " << m_item_variant_list.shape[2] << " s/b >0u \n"; +std::cout << "m_item_root_list.word_size = " << m_item_root_list.word_size<< " s/b " << sizeof(size_t) << "\n"; +std::cout << "m_item_variant_list.word_size = " << m_item_variant_list.word_size << " s/b " << sizeof(size_t) <<" \n"; + return ok; } @@ -211,24 +253,38 @@ offline_patches_npz::sample_t offline_patches_npz::get_sample(const size_t idx) std::vector file_names; +std::cout << "-- Process for " << m_num_patches << " patches\n"; for (size_t p = 0u; p < m_num_patches; ++p) { const size_t root = cnpy_utils::data(m_item_root_list, {idx, p}); + +std::cout << "\troot == " << root << "\n"; + if (root >= m_file_root_list.size()) { using std::to_string; throw lbann_exception("offline_patches_npz: invalid file_root_list index: " + to_string(root) + " >= " + to_string(m_file_root_list.size())); } std::string file_name = m_file_root_list.at(root); +std::cout << "\troot file_name : " << file_name << "\n"; const size_t* variant = &(cnpy_utils::data(m_item_variant_list, {idx, p, 0u})); const int ve = m_item_variant_list.shape.back()-1; +std::cout << "\t\tm_item_variant_list.shape.back()= " << m_item_variant_list.shape.back() << "\n"; +std::cout << "\tve == " << ve << "\n"; for (int i = 0; i < ve; ++i) { + std::cout << "}}} "<< file_name << " += " << m_file_variant_list.at(variant[i]) <<" + " << m_variant_divider << "\n"; file_name += m_file_variant_list.at(variant[i]) + m_variant_divider; } +std::cout << "\t]] "<< file_name << " += " << m_file_variant_list.at(variant[ve]) << "\n"; file_name += m_file_variant_list.at(variant[ve]); file_names.push_back(file_name); } +std::cout << "== A Sample\n"; +for(size_t i=0; i pp; // set up the image preprocessor - if ((name == "imagenet") || (name == "jag_conduit") || (name == "jag_conduit_hdf5") || - (name == "triplet") || (name == "mnist_siamese") || (name == "multi_images") || - (name == "moving_mnist")) { + if ((name == "imagenet") || (name == "jag_conduit") || (name == "jag_conduit_hdf5") || + (name == "quadruplet") || (name == "triplet") || (name == "mnist_siamese") || + (name == "multi_images") || (name == "moving_mnist")) { pp = std::make_shared(); } else if (name == "imagenet_patches") { pp = std::make_shared(); @@ -347,6 +347,16 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const bool mast reader = new imagenet_reader(pp, shuffle); } else if (name == "triplet") { reader = new data_reader_triplet(pp, shuffle); + } else if (name == "quadruplet") { + //int n_img_srcs = pb_readme.num_image_srcs(); + const lbann_data::DataReader& d_reader = pb_readme.data_reader(); + const lbann_data::Reader& rme = d_reader.reader(); + + int n_img_srcs = rme.num_heads(); + //int n_img_srcs = pb_readme.num_heads(); + std::cout << ">>> n_img_srcs= " << n_img_srcs << "\n"; + reader = new data_reader_quadruplet(pp, n_img_srcs, shuffle); + //reader = new data_reader_quadruplet(pp, 4u, shuffle); } else if (name == "mnist_siamese") { reader = new data_reader_mnist_siamese(pp, shuffle); } else if (name == "multi_images") { diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 10ae0e8dd7f..c345d4f6ef8 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -29,6 +29,10 @@ message Reader { int64 absolute_sample_count = 11; int64 first_n = 200; double percent_of_data_to_use = 12; + + // for multihead Siamese model + int32 num_heads = 211; + //for GAN model bool gan_labelling = 201; int32 gan_label_value = 202; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 219089763b1..3b70349354c 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -82,7 +82,8 @@ void init_data_readers(lbann::lbann_comm *comm, const lbann_data::LbannPB& p, st init_org_image_data_reader(readme, master, reader); set_up_generic_preprocessor = false; } else if ((name == "imagenet") || (name == "imagenet_patches") || - (name == "triplet") || (name == "mnist_siamese") || (name == "multi_images")) { + (name == "triplet") || (name == "quadruplet") || + (name == "mnist_siamese") || (name == "multi_images")) { init_image_data_reader(readme, master, reader); set_up_generic_preprocessor = false; } else if (name == "jag") { @@ -374,6 +375,8 @@ void init_data_readers(lbann::lbann_comm *comm, const lbann_data::LbannPB& p, st reader_validation = new imagenet_reader_patches(*dynamic_cast(reader)); } else if (name == "triplet") { reader_validation = new data_reader_triplet(*dynamic_cast(reader)); + } else if (name == "quadruplet") { + reader_validation = new data_reader_quadruplet(*dynamic_cast(reader)); } else if (name == "mnist_siamese") { reader_validation = new data_reader_mnist_siamese(*dynamic_cast(reader)); } else if (name == "multi_images") { From e220466cd6be4d3d4067d70a0bfd15bac39ec34c Mon Sep 17 00:00:00 2001 From: Byung Suk Yoo Date: Fri, 7 Dec 2018 12:40:56 -0800 Subject: [PATCH 002/443] Changes are made to extend triplet Siamese model to more general m-head Siamese counterpart. Current triplet codes are removed from the build tree, but the sources are kept jost for reference for now. --- include/lbann/data_readers/CMakeLists.txt | 3 +- .../data_reader_multihead_siamese.hpp | 97 +++++++++ .../data_readers/data_reader_triplet.hpp | 3 + .../data_store_multihead_siamese.hpp | 68 ++++++ .../lbann/data_store/data_store_triplet.hpp | 2 + include/lbann/lbann.hpp | 3 +- src/data_readers/CMakeLists.txt | 3 +- .../data_reader_multihead_siamese.cpp | 197 ++++++++++++++++++ src/data_readers/data_reader_triplet.cpp | 2 + src/data_readers/offline_patches_npz.cpp | 33 --- src/data_store/CMakeLists.txt | 3 +- src/data_store/data_store_image.cpp | 2 +- .../data_store_multihead_siamese.cpp | 66 ++++++ src/proto/init_image_data_readers.cpp | 21 +- src/proto/lbann.proto | 3 - src/proto/proto_common.cpp | 8 +- 16 files changed, 451 insertions(+), 63 deletions(-) create mode 100644 include/lbann/data_readers/data_reader_multihead_siamese.hpp create mode 100644 include/lbann/data_store/data_store_multihead_siamese.hpp create mode 100644 src/data_readers/data_reader_multihead_siamese.cpp create mode 100644 src/data_store/data_store_multihead_siamese.cpp diff --git a/include/lbann/data_readers/CMakeLists.txt b/include/lbann/data_readers/CMakeLists.txt index c66f49b6763..156b91bc636 100644 --- a/include/lbann/data_readers/CMakeLists.txt +++ b/include/lbann/data_readers/CMakeLists.txt @@ -31,8 +31,7 @@ set_full_path(THIS_DIR_HEADERS lbann_data_generator.hpp opencv.hpp opencv_extensions.hpp - data_reader_triplet.hpp - data_reader_quadruplet.hpp + data_reader_multihead_siamese.hpp ) # Add the subdirectories diff --git a/include/lbann/data_readers/data_reader_multihead_siamese.hpp b/include/lbann/data_readers/data_reader_multihead_siamese.hpp new file mode 100644 index 00000000000..a0c9557c144 --- /dev/null +++ b/include/lbann/data_readers/data_reader_multihead_siamese.hpp @@ -0,0 +1,97 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +// data_reader_multihead_siamese .hpp .cpp - data reader to use m patches +// generated offline. +//////////////////////////////////////////////////////////////////////////////// + +#ifndef DATA_READER_MULTIHEAD_SIAMESE_HPP +#define DATA_READER_MULTIHEAD_SIAMESE_HPP + +#include "data_reader_multi_images.hpp" +#include "cv_process.hpp" +#include "offline_patches_npz.hpp" +#include +#include +#include +#include + +namespace lbann { +class data_reader_multihead_siamese : public data_reader_multi_images { + public: + using label_t = offline_patches_npz::label_t; + using sample_t = offline_patches_npz::sample_t; + + data_reader_multihead_siamese(const std::shared_ptr& pp, unsigned int nimages, bool shuffle = true); + data_reader_multihead_siamese(const std::shared_ptr& pp, bool shuffle = true); + + data_reader_multihead_siamese(const data_reader_multihead_siamese&); + data_reader_multihead_siamese& operator=(const data_reader_multihead_siamese&); + ~data_reader_multihead_siamese() override; + + data_reader_multihead_siamese* copy() const override { + return new data_reader_multihead_siamese(*this); + } + + std::string get_type() const override { + return "data_reader_multihead_siamese"; + } + + /** Set up imagenet specific input parameters + * If argument is set to 0, then this method does not change the value of + * the corresponding parameter. However, width and height can only be both + * zero or both non-zero. + */ + void set_input_params(const int width, const int height, const int num_ch, + const int num_labels) override; + + // dataset specific functions + void load() override; + + /// Return the sample list of current minibatch + std::vector get_image_list_of_current_mb() const; + + /// Allow read-only access to the entire sample list + std::vector get_image_list() const; + + sample_t get_sample(size_t idx) const { + return m_samples.get_sample(idx); + } + + /// sets up a data_store. + void setup_data_store(model *m) override; + + protected: + void set_defaults() override; + bool fetch_datum(CPUMat& X, int data_id, int mb_idx, int tid) override; + bool fetch_label(CPUMat& Y, int data_id, int mb_idx, int tid) override; + + protected: + offline_patches_npz m_samples; +}; + +} // namespace lbann + +#endif // DATA_READER_MULTIHEAD_SIAMESE_HPP diff --git a/include/lbann/data_readers/data_reader_triplet.hpp b/include/lbann/data_readers/data_reader_triplet.hpp index 0af87319f84..f9ed1fe0879 100644 --- a/include/lbann/data_readers/data_reader_triplet.hpp +++ b/include/lbann/data_readers/data_reader_triplet.hpp @@ -25,6 +25,9 @@ // // data_reader_triplet .hpp .cpp - data reader to use triplet patches // generated offline. +// +// Depreciated and replaced by data_reader_multihead_siamese .hpp .cpp. +// Kept here just for reference. //////////////////////////////////////////////////////////////////////////////// #ifndef DATA_READER_TRIPLET_HPP diff --git a/include/lbann/data_store/data_store_multihead_siamese.hpp b/include/lbann/data_store/data_store_multihead_siamese.hpp new file mode 100644 index 00000000000..c06c15d4bed --- /dev/null +++ b/include/lbann/data_store/data_store_multihead_siamese.hpp @@ -0,0 +1,68 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef __DATA_STORE_MULTIHEAD_SIAMESE_HPP__ +#define __DATA_STORE_MULTIHEAD_SIAMESE_HPP__ + +#include "lbann/data_store/data_store_multi_images.hpp" + +namespace lbann { + +/** + * todo + */ + +class data_store_multihead_siamese : public data_store_multi_images { + public: + + //! ctor + data_store_multihead_siamese(generic_data_reader *reader, model *m) : + data_store_multi_images(reader, m) { + set_name("data_store_multihead_siamese"); + } + + //! copy ctor + data_store_multihead_siamese(const data_store_multihead_siamese&) = default; + + //! operator= + data_store_multihead_siamese& operator=(const data_store_multihead_siamese&) = default; + + data_store_multihead_siamese * copy() const override { return new data_store_multihead_siamese(*this); } + + //! dtor + ~data_store_multihead_siamese() override {}; + + void setup() override; + + protected : + + std::vector get_sample(size_t idx) const override; +}; + +} // namespace lbann + +#endif // __DATA_STORE_MULTIHEAD_SIAMESE_HPP__ diff --git a/include/lbann/data_store/data_store_triplet.hpp b/include/lbann/data_store/data_store_triplet.hpp index 5c004f06722..d55e183bce9 100644 --- a/include/lbann/data_store/data_store_triplet.hpp +++ b/include/lbann/data_store/data_store_triplet.hpp @@ -23,6 +23,8 @@ // implied. See the License for the specific language governing // permissions and limitations under the license. // +// Depreciated and replaced by data_store_multihead_siamese .hpp .cpp. +// Kept here just for reference. //////////////////////////////////////////////////////////////////////////////// #ifndef __DATA_STORE_TRIPLET_HPP__ diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index abfe8538517..d18235e74d6 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -121,8 +121,7 @@ #include "lbann/data_readers/data_reader_mnist.hpp" #include "lbann/data_readers/data_reader_multi_images.hpp" #include "lbann/data_readers/data_reader_mnist_siamese.hpp" -#include "lbann/data_readers/data_reader_triplet.hpp" -#include "lbann/data_readers/data_reader_quadruplet.hpp" +#include "lbann/data_readers/data_reader_multihead_siamese.hpp" #include "lbann/data_readers/data_reader_synthetic.hpp" #include "lbann/data_readers/data_reader_jag.hpp" #include "lbann/data_readers/data_reader_jag_conduit.hpp" diff --git a/src/data_readers/CMakeLists.txt b/src/data_readers/CMakeLists.txt index 151165091cf..738abce54bd 100644 --- a/src/data_readers/CMakeLists.txt +++ b/src/data_readers/CMakeLists.txt @@ -33,8 +33,7 @@ set_full_path(THIS_DIR_SOURCES data_reader_synthetic.cpp data_reader_multi_images.cpp data_reader_mnist_siamese.cpp - data_reader_triplet.cpp - data_reader_quadruplet.cpp + data_reader_multihead_siamese.cpp offline_patches_npz.cpp image_preprocessor.cpp image_utils.cpp diff --git a/src/data_readers/data_reader_multihead_siamese.cpp b/src/data_readers/data_reader_multihead_siamese.cpp new file mode 100644 index 00000000000..5f1812b44b4 --- /dev/null +++ b/src/data_readers/data_reader_multihead_siamese.cpp @@ -0,0 +1,197 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +// data_reader_multihead_siamese .hpp .cpp - data reader to use m patches +// generated offline. +//////////////////////////////////////////////////////////////////////////////// + +#include "lbann/data_readers/data_reader_multihead_siamese.hpp" +#include "lbann/data_readers/image_utils.hpp" +#include "lbann/data_store/data_store_multihead_siamese.hpp" +#include "lbann/utils/file_utils.hpp" +#include +#include +#include + +#include + +namespace lbann { + +data_reader_multihead_siamese::data_reader_multihead_siamese(const std::shared_ptr& pp, unsigned int nimages, bool shuffle) : data_reader_multi_images(pp, shuffle) { + set_defaults(); + m_num_img_srcs = nimages; + m_samples = offline_patches_npz (m_num_img_srcs); +} + +data_reader_multihead_siamese::data_reader_multihead_siamese(const std::shared_ptr& pp, bool shuffle) + : data_reader_multi_images(pp, shuffle) { + set_defaults(); +} + +data_reader_multihead_siamese::data_reader_multihead_siamese(const data_reader_multihead_siamese& rhs) + : data_reader_multi_images(rhs), + m_samples(rhs.m_samples) +{} + +data_reader_multihead_siamese& data_reader_multihead_siamese::operator=(const data_reader_multihead_siamese& rhs) { + // check for self-assignment + if (this == &rhs) { + return (*this); + } + + data_reader_multi_images::operator=(rhs); + m_samples = rhs.m_samples; + + return (*this); +} + +data_reader_multihead_siamese::~data_reader_multihead_siamese() { +} + +void data_reader_multihead_siamese::set_defaults() { + m_image_width = 110; + m_image_height = 110; + m_image_num_channels = 3; + set_linearized_image_size(); + m_num_labels = 20; + m_num_img_srcs = 3; +} + +/** + * Same as the parent class method except the default value of the last argument, + * num_img_srcs, which is 4 here. + */ +void data_reader_multihead_siamese::set_input_params(const int width, const int height, const int num_ch, const int num_labels) { + data_reader_multi_images::set_input_params(width, height, num_ch, num_labels, 4); +} + + +bool data_reader_multihead_siamese::fetch_datum(Mat& X, int data_id, int mb_idx, int tid) { + + std::vector<::Mat> X_v = create_datum_views(X, mb_idx); + + sample_t sample = m_samples.get_sample(data_id); + for(size_t i=0u; i < m_num_img_srcs; ++i) { + int width=0, height=0, img_type=0; + const std::string imagepath = get_file_dir() + sample.first[i]; + bool ret = true; + if (m_data_store != nullptr) { + std::vector *image_buf; + m_data_store->get_data_buf(data_id, image_buf, i); + // This could probably have used image_utils::import_image() + ret = lbann::image_utils::load_image(*image_buf, width, height, img_type, *(m_pps[tid]), X_v[i]); + } else { + ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v[i], m_thread_buffer[tid], &m_thread_cv_buffer[tid]); + } + + if(!ret) { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " + + get_type() + ": image_utils::load_image failed to load - " + + imagepath); + } + if((width * height * CV_MAT_CN(img_type)) != m_image_linearized_size) { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " + + get_type() + ": mismatch data size -- either width, height or channel - " + + imagepath + " [w,h,c]=[" + std::to_string(width) + "x" + std::to_string(height) + + "x" + std::to_string(CV_MAT_CN(img_type)) + "] != " + std::to_string(m_image_linearized_size)); + } + } + + return true; +} + + +bool data_reader_multihead_siamese::fetch_label(Mat& Y, int data_id, int mb_idx, int tid) { + const label_t label = m_samples.get_label(data_id); + Y.Set(label, mb_idx, 1); + return true; +} + + +std::vector data_reader_multihead_siamese::get_image_list_of_current_mb() const { + std::vector ret; + ret.reserve(m_mini_batch_size); + + for (El::Int i = 0; i < m_indices_fetched_per_mb.Height(); ++i) { + El::Int index = m_indices_fetched_per_mb.Get(i, 0); + ret.emplace_back(m_samples.get_sample(index)); + } + return ret; +} + + +std::vector data_reader_multihead_siamese::get_image_list() const { + const size_t num_samples = m_samples.get_num_samples(); + std::vector ret; + ret.reserve(num_samples); + + for (size_t i=0; i < num_samples; ++i) { + ret.emplace_back(m_samples.get_sample(i)); + } + return ret; +} + + +void data_reader_multihead_siamese::load() { + const std::string data_filename = get_data_filename(); + + // To support m_first_n semantic, m_samples.load() takes m_first_n + // as an argument and attempt to shrink the CNPY arrays loaded as needed + if (!m_samples.load(data_filename, m_first_n)) { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " + + get_type() + ": failed to load the file " + data_filename); + } + + size_t num_samples = m_samples.get_num_samples(); + + if (m_first_n > 0) { + num_samples = (static_cast(m_first_n) <= num_samples)? + static_cast(m_first_n) : num_samples; + + m_first_n = num_samples; + set_use_percent(1.0); + set_absolute_sample_count(0u); + } + + // reset indices + m_shuffled_indices.clear(); + + m_shuffled_indices.resize(num_samples); + std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0); + + select_subset_of_data(); +} + +void data_reader_multihead_siamese::setup_data_store(model *m) { + if (m_data_store != nullptr) { + delete m_data_store; + } + m_data_store = new data_store_multihead_siamese(this, m); + if (m_data_store != nullptr) { + m_data_store->setup(); + } +} + +} // namespace lbann diff --git a/src/data_readers/data_reader_triplet.cpp b/src/data_readers/data_reader_triplet.cpp index 4396ba0f4a2..cd93ee96c99 100644 --- a/src/data_readers/data_reader_triplet.cpp +++ b/src/data_readers/data_reader_triplet.cpp @@ -25,6 +25,8 @@ // // data_reader_triplet .hpp .cpp - data reader to use triplet patches // generated offline. +// Depreciated and replaced by data_reader_multihead_siamese .hpp .cpp. +// Kept here just for reference. //////////////////////////////////////////////////////////////////////////////// #include "lbann/data_readers/data_reader_triplet.hpp" diff --git a/src/data_readers/offline_patches_npz.cpp b/src/data_readers/offline_patches_npz.cpp index ce8c702d0ef..2734eb115ab 100644 --- a/src/data_readers/offline_patches_npz.cpp +++ b/src/data_readers/offline_patches_npz.cpp @@ -111,7 +111,6 @@ bool offline_patches_npz::load(const std::string filename, size_t first_n, cnpy::NpyArray d_item_class_list = dataset["item_class_list"]; m_checked_ok = (d_item_class_list.shape.size() == 1u); -std::cout << "d_item_class_list.shape.size()= " << d_item_class_list.shape.size() << " m_checked_ok 1= " << m_checked_ok << "\n"; if (m_checked_ok) { // In case of shrinking to first_n, make sure the size is consistent const size_t num_samples = m_item_root_list.shape[0]; @@ -135,7 +134,6 @@ std::cout << "d_item_class_list.shape.size()= " << d_item_class_list.shape.size( m_checked_ok = m_checked_ok && ( (d_file_root_list.shape.size() == 1u) || ((d_file_root_list.shape.size() == 2u) && m_lbann_format)); -std::cout << "m_checked_ok 2= " << m_checked_ok << "\n"; if (m_checked_ok) { const size_t num_roots = d_file_root_list.shape[0]; m_file_root_list.resize(num_roots); @@ -155,14 +153,12 @@ std::cout << "m_checked_ok 2= " << m_checked_ok << "\n"; dataset.erase(it); // to keep memory footprint as low as possible } } - //for (const auto& fl: m_file_root_list) std::cout << fl << std::endl; { // load the array of dictionary substrings of variant type cnpy::NpyArray d_file_variant_list = dataset["file_variant_list"]; m_checked_ok = m_checked_ok && ( (d_file_variant_list.shape.size() == 1u) || ((d_file_variant_list.shape.size() == 2u) && m_lbann_format)); -std::cout << "m_checked_ok 3= " << m_checked_ok << "\n"; if (m_checked_ok) { const size_t num_variants = d_file_variant_list.shape[0]; m_file_variant_list.resize(num_variants); @@ -182,14 +178,10 @@ std::cout << "m_checked_ok 3= " << m_checked_ok << "\n"; dataset.erase(it); // to keep memory footprint as low as possible } } - //for (const auto& fl: m_file_variant_list) std::cout << fl << std::endl; m_checked_ok = m_checked_ok && check_data(); -std::cout << "m_checked_ok 4= " << m_checked_ok << "\n"; -std::cout << "check_data()= " << check_data() << "\n"; if (!m_checked_ok) { - //std::cout << get_description(); m_item_class_list.clear(); m_file_root_list.clear(); m_file_variant_list.clear(); @@ -213,19 +205,6 @@ bool offline_patches_npz::check_data() const { (m_item_root_list.word_size == sizeof(size_t)) && (m_item_variant_list.word_size == sizeof(size_t)); -std::cout << "m_num_patches= " << m_num_patches << "\n"; -std::cout << "m_item_root_list.shape.size()= " << m_item_root_list.shape.size() <<" should be 2u\n"; -std::cout << "m_item_variant_list.shape.size() = " << m_item_variant_list.shape.size() <<" should be 3u\n"; -std::cout << "m_file_root_list.size() = " << m_file_root_list.size() << " s/b >0u \n"; -std::cout << "m_file_variant_list.size() = " << m_file_variant_list.size() << " s/b >0u \n"; -std::cout << "m_item_root_list.shape[0] = " << m_item_root_list.shape[0] << " s/b " << m_item_class_list.size() << "\n"; -std::cout << "m_item_variant_list.shape[0] = " << m_item_variant_list.shape[0] << " s/b " << m_item_class_list.size() << "\n"; -std::cout << "m_item_root_list.shape[1] = " << m_item_root_list.shape[1] << " s/b " << m_num_patches << "\n"; -std::cout << "m_item_variant_list.shape[1] = " << m_item_variant_list.shape[1] << " s/b " << m_num_patches << "\n"; -std::cout << "m_item_variant_list.shape[2] = " << m_item_variant_list.shape[2] << " s/b >0u \n"; -std::cout << "m_item_root_list.word_size = " << m_item_root_list.word_size<< " s/b " << sizeof(size_t) << "\n"; -std::cout << "m_item_variant_list.word_size = " << m_item_variant_list.word_size << " s/b " << sizeof(size_t) <<" \n"; - return ok; } @@ -253,37 +232,25 @@ offline_patches_npz::sample_t offline_patches_npz::get_sample(const size_t idx) std::vector file_names; -std::cout << "-- Process for " << m_num_patches << " patches\n"; for (size_t p = 0u; p < m_num_patches; ++p) { const size_t root = cnpy_utils::data(m_item_root_list, {idx, p}); -std::cout << "\troot == " << root << "\n"; - if (root >= m_file_root_list.size()) { using std::to_string; throw lbann_exception("offline_patches_npz: invalid file_root_list index: " + to_string(root) + " >= " + to_string(m_file_root_list.size())); } std::string file_name = m_file_root_list.at(root); -std::cout << "\troot file_name : " << file_name << "\n"; const size_t* variant = &(cnpy_utils::data(m_item_variant_list, {idx, p, 0u})); const int ve = m_item_variant_list.shape.back()-1; -std::cout << "\t\tm_item_variant_list.shape.back()= " << m_item_variant_list.shape.back() << "\n"; -std::cout << "\tve == " << ve << "\n"; for (int i = 0; i < ve; ++i) { - std::cout << "}}} "<< file_name << " += " << m_file_variant_list.at(variant[i]) <<" + " << m_variant_divider << "\n"; file_name += m_file_variant_list.at(variant[i]) + m_variant_divider; } -std::cout << "\t]] "<< file_name << " += " << m_file_variant_list.at(variant[ve]) << "\n"; file_name += m_file_variant_list.at(variant[ve]); file_names.push_back(file_name); } -std::cout << "== A Sample\n"; -for(size_t i=0; i &s = t.second; for (auto idx : s) { //note: for imagenet_reader, m_num_img_srcs = 1; - // for other readers (multi, triplet) it is larger, probably three + // for other readers (multi, siamese) it is larger, probably three for (size_t k=0; k +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +//////////////////////////////////////////////////////////////////////////////// + +#include "lbann/data_store/data_store_multihead_siamese.hpp" +#include "lbann/data_readers/data_reader_multihead_siamese.hpp" +#include "lbann/utils/exception.hpp" +#include "lbann/utils/options.hpp" +#include "lbann/utils/timer.hpp" + +namespace lbann { + +std::vector data_store_multihead_siamese::get_sample(size_t idx) const { + const data_reader_multihead_siamese *reader = dynamic_cast(m_reader); + data_reader_multihead_siamese::sample_t sample = reader->get_sample(idx); + return sample.first; +} + +void data_store_multihead_siamese::setup() { + double tm1 = get_time(); + if (m_rank == 0) { + std::cerr << "starting data_store_multihead_siamese::setup() for data reader with role: " << m_reader->get_role() << std::endl; + } + + set_name("data_store_multihead_siamese"); + + //sanity check + data_reader_multihead_siamese *reader = dynamic_cast(m_reader); + if (reader == nullptr) { + std::stringstream err; + err << __FILE__ << " " << __LINE__ << " :: " + << "dynamic_cast(m_reader) failed"; + throw lbann_exception(err.str()); + } + + data_store_multi_images::setup(); + + if (m_rank == 0) { + std::cerr << "TIME for data_store_multihead_siamese setup: " << get_time() - tm1 << std::endl; + } +} + +} // namespace lbann diff --git a/src/proto/init_image_data_readers.cpp b/src/proto/init_image_data_readers.cpp index c05cb403d65..4bcf170ea08 100644 --- a/src/proto/init_image_data_readers.cpp +++ b/src/proto/init_image_data_readers.cpp @@ -319,7 +319,7 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const bool mast std::shared_ptr pp; // set up the image preprocessor if ((name == "imagenet") || (name == "jag_conduit") || (name == "jag_conduit_hdf5") || - (name == "quadruplet") || (name == "triplet") || (name == "mnist_siamese") || + (name == "multihead_siamese") || (name == "mnist_siamese") || (name == "multi_images") || (name == "moving_mnist")) { pp = std::make_shared(); } else if (name == "imagenet_patches") { @@ -345,18 +345,8 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const bool mast reader = new imagenet_reader_patches(ppp, shuffle); } else if (name == "imagenet") { reader = new imagenet_reader(pp, shuffle); - } else if (name == "triplet") { - reader = new data_reader_triplet(pp, shuffle); - } else if (name == "quadruplet") { - //int n_img_srcs = pb_readme.num_image_srcs(); - const lbann_data::DataReader& d_reader = pb_readme.data_reader(); - const lbann_data::Reader& rme = d_reader.reader(); - - int n_img_srcs = rme.num_heads(); - //int n_img_srcs = pb_readme.num_heads(); - std::cout << ">>> n_img_srcs= " << n_img_srcs << "\n"; - reader = new data_reader_quadruplet(pp, n_img_srcs, shuffle); - //reader = new data_reader_quadruplet(pp, 4u, shuffle); + } else if (name == "multihead_siamese") { + reader = new data_reader_multihead_siamese(pp, pb_readme.num_image_srcs(), shuffle); } else if (name == "mnist_siamese") { reader = new data_reader_mnist_siamese(pp, shuffle); } else if (name == "multi_images") { @@ -539,6 +529,11 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const bool mast throw lbann_exception(err.str()); } multi_image_dr_ptr->set_input_params(width, height, channels, n_labels, n_img_srcs); + } else if(name == "multihead_siamese") { + const int n_img_srcs = pb_readme.num_image_srcs(); + data_reader_multi_images* multi_image_dr_ptr + = dynamic_cast(image_data_reader_ptr); + multi_image_dr_ptr->set_input_params(width, height, channels, n_labels, n_img_srcs); } else { image_data_reader_ptr->set_input_params(width, height, channels, n_labels); } diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index c345d4f6ef8..95dfcf27eda 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -30,9 +30,6 @@ message Reader { int64 first_n = 200; double percent_of_data_to_use = 12; - // for multihead Siamese model - int32 num_heads = 211; - //for GAN model bool gan_labelling = 201; int32 gan_label_value = 202; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 3b70349354c..f8da5c6d8a9 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -82,7 +82,7 @@ void init_data_readers(lbann::lbann_comm *comm, const lbann_data::LbannPB& p, st init_org_image_data_reader(readme, master, reader); set_up_generic_preprocessor = false; } else if ((name == "imagenet") || (name == "imagenet_patches") || - (name == "triplet") || (name == "quadruplet") || + (name == "multihead_siamese") || (name == "mnist_siamese") || (name == "multi_images")) { init_image_data_reader(readme, master, reader); set_up_generic_preprocessor = false; @@ -373,10 +373,8 @@ void init_data_readers(lbann::lbann_comm *comm, const lbann_data::LbannPB& p, st reader_validation = new imagenet_reader(*dynamic_cast(reader)); } else if (name == "imagenet_patches") { reader_validation = new imagenet_reader_patches(*dynamic_cast(reader)); - } else if (name == "triplet") { - reader_validation = new data_reader_triplet(*dynamic_cast(reader)); - } else if (name == "quadruplet") { - reader_validation = new data_reader_quadruplet(*dynamic_cast(reader)); + } else if (name == "multihead_siamese") { + reader_validation = new data_reader_multihead_siamese(*dynamic_cast(reader)); } else if (name == "mnist_siamese") { reader_validation = new data_reader_mnist_siamese(*dynamic_cast(reader)); } else if (name == "multi_images") { From 1ed91b078761be8deb89709b4395639346d4d58a Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 24 Jan 2019 15:05:42 -0800 Subject: [PATCH 003/443] Models store layers in unique_ptrs. The layer "factory" (and related functions) return unique_ptrs rather than raw pointers. --- .../lbann/models/directed_acyclic_graph.hpp | 32 +- include/lbann/models/model.hpp | 124 ++--- include/lbann/proto/factories.hpp | 22 +- include/lbann/utils/graph.hpp | 63 +-- src/models/directed_acyclic_graph.cpp | 23 +- src/models/model.cpp | 453 +++++++++--------- src/proto/factories/layer_factory.cpp | 305 ++++++------ src/proto/factories/layer_graph_factory.cpp | 29 +- src/proto/factories/model_factory.cpp | 15 +- src/utils/graph.cpp | 123 +++-- 10 files changed, 605 insertions(+), 584 deletions(-) diff --git a/include/lbann/models/directed_acyclic_graph.hpp b/include/lbann/models/directed_acyclic_graph.hpp index 56d8f8fc44b..671cf66b100 100644 --- a/include/lbann/models/directed_acyclic_graph.hpp +++ b/include/lbann/models/directed_acyclic_graph.hpp @@ -22,59 +22,47 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. -// -// directed_acyclic_graph .hpp .cpp - Directed acyclic graph neural network models //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_MODEL_DIRECTED_ACYCLIC_GRAPH_HPP -#define LBANN_MODEL_DIRECTED_ACYCLIC_GRAPH_HPP +#ifndef LBANN_MODELS_DIRECTED_ACYCLIC_GRAPH_HPP_INCLUDED +#define LBANN_MODELS_DIRECTED_ACYCLIC_GRAPH_HPP_INCLUDED #include "lbann/models/model.hpp" #include "lbann/layers/layer.hpp" namespace lbann { -/** Directed acyclic graph neural network model. */ +/** Neural network model with a DAG layer graph. */ class directed_acyclic_graph_model : public model { public: - /** Constructor. */ directed_acyclic_graph_model(lbann_comm *comm, - int max_mini_batch_size, + El::Int max_mini_batch_size, objective_function *obj_fn, optimizer *default_optimizer); - - /** Copy constructor. */ directed_acyclic_graph_model(const directed_acyclic_graph_model& other) = default; - - /** Copy assignment operator. */ directed_acyclic_graph_model& operator=(const directed_acyclic_graph_model& other) = default; - - /** Destructor. */ ~directed_acyclic_graph_model() override = default; - - /** Create copy. */ directed_acyclic_graph_model* copy() const override { return new directed_acyclic_graph_model(*this); } - - /** Get model type. */ std::string get_type() const override { return "directed acyclic graph"; } protected: - /** For general DAG models, users need to manually specify each layer to - * freeze in the model description prototext. + /** @details For general DAG models, users need to manually specify + * each layer to freeze in the model description prototext. */ void freeze_layers_under_frozen_surface() override {} /** Set up layer execution order. + * * Called in setup function. A topological sort applied is to the - * layer list so that we can traverse a directed acyclic graph + * layer list so that we can traverse the directed acyclic graph * without violating dependencies. */ void setup_layer_execution_order() override; }; -} // namespace lbann +} // namespace lbann -#endif // LBANN_MODEL_DIRECTED_ACYCLIC_GRAPH_HPP +#endif // LBANN_MODELS_DIRECTED_ACYCLIC_GRAPH_HPP_INCLUDED diff --git a/include/lbann/models/model.hpp b/include/lbann/models/model.hpp index df0146fed30..5694d22abb0 100644 --- a/include/lbann/models/model.hpp +++ b/include/lbann/models/model.hpp @@ -22,12 +22,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. -// -// lbann_model .hpp .cpp - Abstract class for neural network training models //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_MODEL_HPP -#define LBANN_MODEL_HPP +#ifndef LBANN_MODELS_MODEL_HPP_INCLUDED +#define LBANN_MODELS_MODEL_HPP_INCLUDED #include "lbann/base.hpp" #include "lbann/comm.hpp" @@ -48,44 +46,38 @@ namespace lbann { -// Forward-declare this. +// Forward declarations class lbann_callback; -/** Base class for LBANN models. */ +/** Base class for neural network models. */ class model { public: - /** Constructor. */ model(lbann_comm *comm, - int mini_batch_size, + El::Int mini_batch_size, objective_function *obj_fn, optimizer* default_optimizer = nullptr); - - /** Copy constructor. */ model(const model& other); - /** Copy assignment operator. */ model& operator=(const model& other); - /** Destructor. */ virtual ~model(); - /** Copy model. */ virtual model* copy() const = 0; - /** Return the model's type. */ + /** Return model type's name. + * + * The model type name should be a brief, human-readable + * description. + */ virtual std::string get_type() const = 0; - /** Set the model's name; this is an arbitrary string - * that may be useful in multi-model scenarios, e.g, - * LTFB, jag + /** Set model instance's name. + * + * Each model should have a unique, preferably human-readable, + * name. */ void set_name(std::string name); - /** Return the model's name; this is an arbitrary string - * that may be useful in multi-model scenarios, e.g, - * LTFB, jag - */ - std::string get_name() const { - return m_name; - } + /** Return model instance's name. */ + std::string get_name() const { return m_name; } /** Human-readable description. */ virtual description get_description() const; @@ -94,7 +86,7 @@ class model { virtual void setup(std::shared_ptr io_thread_pool); /** Add layer to model. */ - virtual void add_layer(Layer *layer); + virtual void add_layer(std::unique_ptr l); /** Add weights to model. */ void add_weights(weights *w); @@ -125,11 +117,14 @@ class model { return m_metrics; } - /** Set the model's layers. */ - void set_layers(std::vector& layers); - - /** Return the model's layers. */ - virtual const std::vector& get_layers() const { return m_layers; } + /** @brief Return list of layers in model. + * @details The list is in execution order for forward propagation. + */ + std::vector get_layers(); + /** @brief Return list of layers in model. + * @details The list is in execution order for forward propagation. + */ + const std::vector get_layers() const; const std::vector get_weights() const; @@ -280,18 +275,17 @@ class model { int m_current_step; int m_current_validation_step; int m_current_testing_step; - /** - * Maximum possible minibatch size supported by layers in this model. - * Note that this is local to the particular model, not across multiple - * models. + /** @details Maximum possible minibatch size supported by layers in + * this model. Note that this is local to the particular model, + * not across multiple models. */ int m_max_mini_batch_size; /** Size of the current mini-batch in the model. */ int m_current_mini_batch_size; - /** - * The "effective" size of a minibatch. - * This is the size of the minibatch across all models and used for e.g. - * correctly averaging gradients from multiple models. + /** The "effective" size of a minibatch. + * + * This is the size of the minibatch across all models and used for + * e.g. correctly averaging gradients from multiple models. */ int m_effective_mini_batch_size; /** current phase (multiple of epoch counts) in training a model */ @@ -302,21 +296,19 @@ class model { std::vector m_callbacks; /** Default optimizer. + * * If a layer needs to construct an optimizer during setup, it will * make a copy of the default optimizer. */ optimizer *m_default_optimizer; /** List of model metrics. + * * A metric can be used to evaluate the performance of the model * without affecting the training process. */ std::vector m_metrics; - /** List of layers in model. - * The list is in execution order for forward propagation. - */ - std::vector m_layers; /** List of weights in model. */ std::vector m_weights; @@ -329,39 +321,53 @@ class model { /** Check if the model execution mode is valid. */ virtual bool is_execution_mode_valid(execution_mode mode) const; - /** Reorder layers. */ - virtual void permute_layers(const std::vector& permutation); + /** Reorder layer list with a gather. + * + * The new layer list is the same length as @c gather_indices and + * its entries are given by + * @f[ \text{new\_list}[i] = \text{old\_list}[\text{gather\_indices}[i]] @f] + * + * Since entries in the layer list must be unique, this will fail + * if @c gather_indices has any repeated entries. + */ + void reorder_layers(const std::vector& gather_indices); /** Remap pointers. + * * Layer and weights pointers are remapped using the provided * maps. If a pointer is not a key in the corresponding map, the * pointer is not changed. */ - virtual void remap_pointers(const std::unordered_map& layer_map, - const std::unordered_map& weights_map); - - /** In case that a layer is frozen, also freeze layers that precede it if that - * makes senses for the particular model, such as sequential or siamese. - * For othe models, users can manually control the behaivor by indicating - * whether to freeze each layer in the model description prototext. + virtual void remap_pointers(const std::unordered_map& layer_map, + const std::unordered_map& weights_map); + + /** @details In case that a layer is frozen, also freeze layers that + * precede it if that makes senses for the particular model, such + * as sequential or siamese. For othe models, users can manually + * control the behaivor by indicating whether to freeze each layer + * in the model description prototext. */ virtual void freeze_layers_under_frozen_surface(); /** Set up topology of layer graph. + * * Called in setup function. All layers in connected component of * layer graph are added to the model and all parent/child * relationships between layers are reciprocated. */ virtual void setup_layer_topology(); /** Set up layer execution order. + * * Called in setup function. */ virtual void setup_layer_execution_order(); /** Set up layers. + * * Called in setup function. */ virtual void setup_layers(); /** Set up weights. + * * Called in setup function. All weights being used by layers or * the objective function are added to the model and all unused * weights are deleted. @@ -382,6 +388,7 @@ class model { /** Backward propagation step. */ virtual void backward_prop(); /** Clear each optimizer's gradient. + * * This must be called before training forward prop since layers * set an optimizer flag during forward prop. */ @@ -391,6 +398,7 @@ class model { /** Update layers step. */ virtual bool update_layers(); /** Reconcile weight values. + * * If weight values are duplicated across multiple processes, they * are set to the average across the processes. */ @@ -443,9 +451,13 @@ class model { private: - /** Search layer graph and add all connected layers. */ - void add_connected_layers(); + /** @brief List of layers in model. + * @details The list is in execution order for forward propagation. + */ + std::vector> m_layers; + /** Insert evaluation layers where needed. + * * If an objective function layer term or a layer metric * corresponds to a layer that is not an evaluation layer, an * evaluation layer is added as a child of the original layer and @@ -454,11 +466,13 @@ class model { */ void add_evaluation_layers(); /** Insert dummy layers after layers with too few children. + * * If a layer expects more child layers than it has, add dummy * layers until it has enough children. */ void add_dummy_layers(); /** Insert split layers after layers with too many children. + * * If a layer expects one child layer but has multiple, add a split * layer. The split layer will be the original layer's child and * the split layer's children will be the original children. @@ -466,6 +480,6 @@ class model { void add_split_layers(); }; -} // namespace lbann +} // namespace lbann -#endif // LBANN_MODEL_HPP +#endif // LBANN_MODELS_MODEL_HPP_INCLUDED diff --git a/include/lbann/proto/factories.hpp b/include/lbann/proto/factories.hpp index d8bc94bb272..f92721b7b28 100644 --- a/include/lbann/proto/factories.hpp +++ b/include/lbann/proto/factories.hpp @@ -24,8 +24,8 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_PROTO_FACTORIES_HPP -#define LBANN_PROTO_FACTORIES_HPP +#ifndef LBANN_PROTO_FACTORIES_HPP_INCLUDED +#define LBANN_PROTO_FACTORIES_HPP_INCLUDED #include "lbann/proto/proto_common.hpp" #include "lbann/data_readers/data_reader.hpp" @@ -40,16 +40,18 @@ model* construct_model(lbann_comm* comm, const lbann_data::Model& proto_model); /** Construct a layer graph specified with a prototext. */ -std::vector construct_layer_graph(lbann_comm* comm, - const std::map& data_readers, - const lbann_data::Model& proto_model); +std::vector> construct_layer_graph( + lbann_comm* comm, + const std::map& data_readers, + const lbann_data::Model& proto_model); /** Construct a layer specified with prototext. */ template -Layer* construct_layer(lbann_comm* comm, - const std::map& data_readers, - int num_parallel_readers, - const lbann_data::Layer& proto_layer); +std::unique_ptr construct_layer( + lbann_comm* comm, + const std::map& data_readers, + int num_parallel_readers, + const lbann_data::Layer& proto_layer); /** Construct weights specified with prototext. */ weights* construct_weights(lbann_comm* comm, @@ -104,4 +106,4 @@ std::set parse_set(std::string str) { } // namespace proto } // namespace lbann -#endif // LBANN_PROTO_FACTORIES_HPP +#endif // LBANN_PROTO_FACTORIES_HPP_INCLUDED diff --git a/include/lbann/utils/graph.hpp b/include/lbann/utils/graph.hpp index 4282f129501..052b6af7119 100644 --- a/include/lbann/utils/graph.hpp +++ b/include/lbann/utils/graph.hpp @@ -28,84 +28,91 @@ #include #include #include +#include "lbann/base.hpp" namespace lbann { namespace graph { /** Print the nodes and edges of a graph to an output stream. */ -void print(const std::set& nodes, - const std::map>& edges, +void print(const std::set& nodes, + const std::map>& edges, std::ostream& os = std::cout); /** Get nodes adjacent to a given node. */ -std::set get_neighbors(int node, - const std::map>& edges); +std::set get_neighbors(El::Int node, + const std::map>& edges); -/** Check whether a graph is a closure. - * A closure is a set of nodes with no edges to nodes outside the - * set. +/** @details A closure is a set of nodes with no edges to nodes + * outside the set. */ -bool is_closure(const std::set& nodes, - const std::map>& edges); +bool is_closure(const std::set& nodes, + const std::map>& edges); /** Check whether a graph is topologically sorted. + * * A topologically sorted graph has no edges going from a node to an * earlier node. The graph must be a directed acyclic graph. */ -bool is_topologically_sorted(const std::set& nodes, - const std::map>& edges); +bool is_topologically_sorted(const std::set& nodes, + const std::map>& edges); /** Check whether a directed graph is cyclic. */ -bool is_cyclic(const std::set& nodes, - const std::map>& edges); +bool is_cyclic(const std::set& nodes, + const std::map>& edges); /** Construct the transpose of a graph. + * * Reverses the direction of edges in the graph and returns the new * set of edges. */ -std::map> transpose(const std::set& nodes, - const std::map>& edges); +std::map> transpose(const std::set& nodes, + const std::map>& edges); /** Construct an induced subgraph. + * * Removes edges to nodes outside the set of nodes and returns the * new set of edges. */ -std::map> induce_subgraph(const std::set& nodes, - const std::map>& edges); +std::map> induce_subgraph(const std::set& nodes, + const std::map>& edges); /** Perform a breadth-first search starting from a given root node. + * * The search order is deterministic. */ -std::vector breadth_first_search(int root, - const std::map>& edges); +std::vector breadth_first_search(El::Int root, + const std::map>& edges); /** Perform a depth-first search starting from a given root node. + * * A depth-first search post-order is returned. The search order is * deterministic. */ -std::vector depth_first_search(int root, - const std::map>& edges); +std::vector depth_first_search(El::Int root, + const std::map>& edges); /** Topologically sort a graph. + * * A topologically sorted graph has no edges going from a node to an * earlier node. The sort is deterministic and does not affect graphs * that are already topologically sorted. */ -std::vector topological_sort(const std::set& nodes, - const std::map>& edges); +std::vector topological_sort(const std::set& nodes, + const std::map>& edges); /** Construct the condensation of a graph. + * * The condensation of a graph is constructed by determining the * strongly connected components, i.e. sets of nodes that are * reachable from all nodes in the set, and coalescing them into * single nodes. The condensation is a DAG and will be topologically * sorted. */ -void condensation(const std::set& nodes, - const std::map>& edges, - std::map>& components, - std::set& condensation_nodes, - std::map>& condensation_edges); +void condensation(const std::set& nodes, + const std::map>& edges, + std::map>& components, + std::set& condensation_nodes, + std::map>& condensation_edges); } } diff --git a/src/models/directed_acyclic_graph.cpp b/src/models/directed_acyclic_graph.cpp index 79984549e49..3fb25f2cd52 100644 --- a/src/models/directed_acyclic_graph.cpp +++ b/src/models/directed_acyclic_graph.cpp @@ -30,7 +30,7 @@ namespace lbann { directed_acyclic_graph_model::directed_acyclic_graph_model(lbann_comm *comm, - int mini_batch_size, + El::Int mini_batch_size, objective_function *obj_fn, optimizer* default_optimizer) : model(comm, mini_batch_size, obj_fn, default_optimizer) {} @@ -39,16 +39,17 @@ void directed_acyclic_graph_model::setup_layer_execution_order() { // Construct layer graph // Note: Each layer depends on its parent layers and its hint layer. - std::set nodes; - std::map> edges; - const int num_layers = m_layers.size(); - std::unordered_map layer_indices; - for (int node = 0; node < num_layers; ++node) { + const auto& layers = this->get_layers(); + const El::Int num_layers = layers.size(); + std::set nodes; + std::map> edges; + std::unordered_map layer_indices; + for (El::Int node = 0; node < num_layers; ++node) { nodes.insert(node); - layer_indices[m_layers[node]] = node; + layer_indices[layers[node]] = node; } - for (int node = 0; node < num_layers; ++node) { - const auto& l = m_layers[node]; + for (El::Int node = 0; node < num_layers; ++node) { + const auto& l = layers[node]; for (const auto& child : l->get_child_layers()) { edges[node].insert(layer_indices[child]); } @@ -59,9 +60,9 @@ void directed_acyclic_graph_model::setup_layer_execution_order() { // Topologically sort layers const auto& sorted_order = graph::topological_sort(nodes, edges); - permute_layers(sorted_order); + reorder_layers(sorted_order); model::setup_layer_execution_order(); } -} // namespace lbann +} // namespace lbann diff --git a/src/models/model.cpp b/src/models/model.cpp index 4f6785adb5e..c1dc6e48d09 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -50,7 +50,8 @@ namespace lbann { namespace { -bool layer_has_name(std::string name, const std::vector& layers) { +bool layer_has_name(std::string name, + const std::vector>& layers) { for (const auto& l : layers) { if (l->get_name() == name) { return true; @@ -66,7 +67,7 @@ bool layer_has_name(std::string name, const std::vector& layers) { //////////////////////////////////////////////////////////// model::model(lbann_comm *comm, - int mini_batch_size, + El::Int mini_batch_size, objective_function *obj_fn, optimizer* default_optimizer) : m_objective_function(obj_fn), @@ -110,7 +111,6 @@ model::model(const model& other) : m_objective_function = other.m_objective_function; m_metrics = other.m_metrics; m_callbacks = other.m_callbacks; - m_layers = other.m_layers; m_weights = other.m_weights; if (m_objective_function != nullptr) { m_objective_function = m_objective_function->copy(); @@ -121,12 +121,15 @@ model::model(const model& other) : for (auto& cb : m_callbacks) { cb = cb->copy(); } - std::unordered_map layer_map; - for (auto& l : m_layers) { - auto&& l_copy = l->copy(); - layer_map[l] = l_copy; - l_copy->set_model(this); - l = l_copy; + std::unordered_map layer_map; + m_layers.reserve(other.m_layers.size()); + for (const auto& ptr : other.m_layers) { + if (!ptr) { LBANN_ERROR("unexpected null pointer"); } + auto* old_layer = ptr.get(); + auto* new_layer = old_layer->copy(); + new_layer->set_model(this); + m_layers.emplace_back(new_layer); + layer_map[old_layer] = new_layer; } std::unordered_map weights_map; for (auto& w : m_weights) { @@ -144,7 +147,6 @@ model& model::operator=(const model& other) { if (m_objective_function != nullptr) { delete m_objective_function; } for (const auto& m : m_metrics) { delete m; } for (const auto& cb : m_callbacks) { delete cb; } - for (const auto& l : m_layers) { delete l; } for (const auto& w : m_weights) { delete w; } // Shallow copies @@ -165,7 +167,6 @@ model& model::operator=(const model& other) { m_objective_function = other.m_objective_function; m_metrics = other.m_metrics; m_callbacks = other.m_callbacks; - m_layers = other.m_layers; m_weights = other.m_weights; if (m_objective_function != nullptr) { m_objective_function = m_objective_function->copy(); @@ -176,10 +177,16 @@ model& model::operator=(const model& other) { for (auto& cb : m_callbacks) { cb = cb->copy(); } - std::unordered_map layer_map; - for (auto& l : m_layers) { - l = layer_map[l] = l->copy(); - l->set_model(this); + std::unordered_map layer_map; + m_layers.clear(); + m_layers.reserve(other.m_layers.size()); + for (const auto& ptr : other.m_layers) { + if (!ptr) { LBANN_ERROR("unexpected null pointer"); } + auto* old_layer = ptr.get(); + auto* new_layer = old_layer->copy(); + new_layer->set_model(this); + m_layers.emplace_back(new_layer); + layer_map[old_layer] = new_layer; } std::unordered_map weights_map; for (auto& w : m_weights) { @@ -193,7 +200,6 @@ model& model::operator=(const model& other) { model::~model() { if (m_objective_function) { delete m_objective_function; } if (m_default_optimizer != nullptr) { delete m_default_optimizer; } - for (const auto& l : m_layers) { delete l; } for (const auto& w : m_weights) { delete w; } for (const auto& m : m_metrics) { delete m; } for (const auto& cb : m_callbacks) { delete cb; } @@ -203,7 +209,7 @@ model::~model() { // Model specification //////////////////////////////////////////////////////////// -void model::add_layer(Layer* l) { +void model::add_layer(std::unique_ptr l) { std::stringstream err; // Check for null pointer @@ -226,7 +232,8 @@ void model::add_layer(Layer* l) { } // Add layer to model - m_layers.push_back(l); + m_layers.emplace_back(std::move(l)); + m_layers.back()->set_model(this); } @@ -274,25 +281,6 @@ void model::add_metric(metric *m) { m_metrics.push_back(m); } -void model::set_name(std::string name) { - m_name = name; -} - -void model::set_layers(std::vector& layers) { - - // Delete old layers - for (const auto& layer : m_layers) { - delete layer; - } - m_layers.clear(); - - // Add new layers - for (const auto& layer : layers) { - add_layer(layer); - } - -} - std::vector model::get_weights() { std::vector weights_list; for (const auto& w : m_weights) { @@ -309,6 +297,23 @@ const std::vector model::get_weights() const { return weights_list; } +std::vector model::get_layers() { + std::vector layer_list; + layer_list.reserve(m_layers.size()); + for (const auto& ptr : m_layers) { + layer_list.push_back(ptr.get()); + } + return layer_list; +} +const std::vector model::get_layers() const { + std::vector layer_list; + layer_list.reserve(m_layers.size()); + for (const auto& ptr : m_layers) { + layer_list.push_back(ptr.get()); + } + return layer_list; +} + void model::replace_weights(std::vector& new_weights) { // Check that number of weights is valid @@ -323,8 +328,8 @@ void model::replace_weights(std::vector& new_weights) { // Replace weights in list std::vector old_weights(m_weights.begin(), m_weights.begin() + new_weights.size()); - std::unordered_map weights_map; - std::unordered_map layer_map; + std::unordered_map weights_map; + std::unordered_map layer_map; for (size_t i = 0; i < new_weights.size(); ++i) { m_weights[i] = weights_map[old_weights[i]] = new_weights[i]; } @@ -364,24 +369,59 @@ optimizer* model::create_optimizer() const { } bool model::is_execution_mode_valid(execution_mode mode) const { - for (const auto& layer : m_layers) { - const auto *input = dynamic_cast(layer); - if (input != nullptr - && !input->is_execution_mode_valid(mode)) { + for (const auto& l : m_layers) { + const auto* input = dynamic_cast(l.get()); + if (input != nullptr && !input->is_execution_mode_valid(mode)) { return false; } } return true; } -void model::permute_layers(const std::vector& permutation) { - std::vector reordered_layers(permutation.size()); - for (size_t i = 0; i < permutation.size(); ++i) { - reordered_layers[i] = m_layers[permutation[i]]; +void model::reorder_layers(const std::vector& gather_indices) { + std::stringstream err; + + // Check that gather indices are in valid range + const El::Int num_layers = m_layers.size(); + if (std::any_of(gather_indices.begin(), gather_indices.end(), + [num_layers](El::Int index) { + return index < 0 || index >= num_layers; + })) { + err << "attempted to reorder layer list for " + << "model \"" << get_name() << "\" " + << "with invalid gather index"; + LBANN_ERROR(err.str()); + } + + // Reorder layers + std::vector> reordered_layers(gather_indices.size()); + for (size_t i = 0; i < gather_indices.size(); ++i) { + reordered_layers[i] = std::move(m_layers[gather_indices[i]]); } m_layers = std::move(reordered_layers); + + // Check that layer list has no null pointers + for (const auto& l : m_layers) { + if (l == nullptr) { + err << "found a null pointer in the layer list for " + << "model \"" << get_name() << "\" after reordering"; + LBANN_ERROR(err.str()); + } + } + } +void model::set_name(std::string name) { + if (name.empty()) { + std::stringstream err; + err << "attempted to rename model \"" << get_name() << "\" " + << "with empty string"; + LBANN_ERROR(err.str()); + } + m_name = std::move(name); +} + + description model::get_description() const { // Construct description object @@ -390,7 +430,7 @@ description model::get_description() const { // Layer topology description layer_topology_desc("Layer topology:"); - for (const auto* l : m_layers) { + for (const auto& l : m_layers) { std::stringstream ss; if (l == nullptr) { ss << "unknown layer: {} -> {}"; @@ -434,7 +474,7 @@ description model::get_description() const { // Layer details description layer_details_desc("Layer details:"); - for (const auto* l : m_layers) { + for (const auto& l : m_layers) { if (l == nullptr) { layer_details_desc.add("unknown layer"); } else { @@ -518,7 +558,7 @@ void model::remap_pointers(const std::unordered_map& layer_map, void model::freeze_layers_under_frozen_surface() { bool freezing = false; for (size_t i = m_layers.size(); i-- > 0u; ) { - auto& l = m_layers[i]; + auto* l = m_layers[i].get(); if (dynamic_cast(l) != nullptr) { if (l->is_frozen()) { throw lbann_exception("Frozen io_layer!"); @@ -565,17 +605,51 @@ void model::setup(std::shared_ptr io_thread_pool) { } void model::setup_layer_topology() { + std::stringstream err; - // Search layer graph and add all connected layers - add_connected_layers(); + // Check that layer list is valid + // Note: Throws an exception if the layer list contains a null + // pointer, if it contains two layers with the same name, or if a + // layer has a pointer to a layer in a different model. + std::unordered_set layer_set; + std::unordered_set name_set; + for (const auto& l : m_layers) { + if (l == nullptr) { + err << "model \"" << get_name() << "\" " + << "has a null pointer in its layer list"; + LBANN_ERROR(err.str()); + } else if (name_set.count(l->get_name()) > 0) { + err << "model \"" << get_name() << "\" " + << "has multiple layers named \"" << l->get_name() << "\""; + LBANN_ERROR(err.str()); + } + name_set.insert(l->get_name()); + layer_set.insert(l.get()); + } + for (const auto& l : m_layers) { + for (const auto& ptr : l->get_layer_pointers()) { + if (ptr != nullptr && layer_set.count(ptr) < 1) { + err << "layer \"" << l->get_name() << "\" " + << "(in model \"" << get_name() << "\") " + << "has a pointer to layer " << ptr->get_name() << "\" "; + const auto* other_model = ptr->get_model(); + if (other_model == nullptr) { + err << "(not in a model)"; + } else { + err << "(in model \"" << other_model->get_name() << "\")"; + } + LBANN_ERROR(err.str()); + } + } + } // Make sure parent/child relationships are reciprocated - for (const auto& layer : m_layers) { - for (const auto& parent : layer->get_parent_layers()) { - const_cast(parent)->add_child_layer(layer); + for (const auto& l : m_layers) { + for (const auto& parent : l->get_parent_layers()) { + const_cast(parent)->add_child_layer(l.get()); } - for (const auto& child : layer->get_child_layers()) { - const_cast(child)->add_parent_layer(layer); + for (const auto& child : l->get_child_layers()) { + const_cast(child)->add_parent_layer(l.get()); } } @@ -584,61 +658,49 @@ void model::setup_layer_topology() { add_dummy_layers(); add_split_layers(); - // Check that layer names are unique - std::unordered_set names; - for (const auto& l : m_layers) { - if (names.count(l->get_name()) > 0) { - std::stringstream err; - err << "model \"" << get_name() << "\" " - << "has multiple layers named \"" << l->get_name() << "\""; - LBANN_ERROR(err.str()); - } - names.insert(l->get_name()); - } - } void model::setup_layer_execution_order() { // Find input layers - const int num_layers = m_layers.size(); - std::vector input_layers, other_layers; - for (int i = 0; i < num_layers; ++i) { - if (dynamic_cast(m_layers[i]) != nullptr) { + const El::Int num_layers = m_layers.size(); + std::vector input_layers, other_layers; + for (El::Int i = 0; i < num_layers; ++i) { + if (dynamic_cast(m_layers[i].get()) != nullptr) { input_layers.push_back(i); } else { other_layers.push_back(i); } } - // Permute layers so input layers are executed first - std::vector permutation; - permutation.insert(permutation.end(), - input_layers.begin(), input_layers.end()); - permutation.insert(permutation.end(), - other_layers.begin(), other_layers.end()); - permute_layers(permutation); + // Reorder layers so input layers are executed first + std::vector gather_indices; + gather_indices.insert(gather_indices.end(), + input_layers.begin(), input_layers.end()); + gather_indices.insert(gather_indices.end(), + other_layers.begin(), other_layers.end()); + reorder_layers(gather_indices); } void model::setup_layers() { - for (const auto& layer : m_layers) { - layer->set_model(this); - layer->setup(); - layer->check_setup(); + for (const auto& l : m_layers) { + l->set_model(this); + l->setup(); + l->check_setup(); } } void model::setup_weights() { // List of used and unused weights - std::unordered_set weights_set(m_weights.begin(), - m_weights.end()); - std::set unused_weights(m_weights.begin(), - m_weights.end()); + std::unordered_set weights_set(m_weights.begin(), + m_weights.end()); + std::set unused_weights(m_weights.begin(), + m_weights.end()); // Find weights used by layers - for (const auto* l : m_layers) { + for (const auto& l : m_layers) { for (const auto& w : l->get_weights()) { if (weights_set.count(w) == 0) { m_weights.push_back(w); @@ -668,82 +730,45 @@ void model::setup_weights() { } -void model::add_connected_layers() { - - // Initialize breadth-first search queue with layer list - std::queue layer_queue; - std::unordered_set layer_set; - for (const auto& layer : m_layers) { - layer_queue.push(layer); - layer_set.insert(layer); - } - - // Visit nodes in search queue until it is exhausted - while (!layer_queue.empty()) { - const Layer *layer = layer_queue.front(); - layer_queue.pop(); - - // Find neighbors of current node - std::vector relatives; - for (const auto& parent : layer->get_parent_layers()) { - relatives.push_back(parent); - } - for (const auto& child : layer->get_child_layers()) { - relatives.push_back(child); - } - - // Add neighbors to search queue if they aren't in the layer list - for (const auto& relative : relatives) { - if (layer_set.count(relative) == 0) { - add_layer(const_cast(relative)); - layer_queue.push(relative); - layer_set.insert(relative); - } - } - - } - -} - void model::add_evaluation_layers() { + std::stringstream err; + const auto& layer_pointers = get_layers(); // Add evaluation layers corresponding to objective function layer terms for (auto* t : m_objective_function->get_terms()) { auto* term = dynamic_cast(t); if (term != nullptr) { - auto* l = &term->get_layer(); - const size_t pos = (std::find(m_layers.begin(), m_layers.end(), l) - - m_layers.begin()); - if (pos >= m_layers.size()) { - std::stringstream err; - err << "an objective function layer term in " - << "model \"" << get_name() << "\" corresponds to " - << "layer \"" << l->get_name() << "\", " - << "which isn't in the model"; + auto& l = term->get_layer(); + if (std::find(layer_pointers.begin(), layer_pointers.end(), &l) + == layer_pointers.end()) { + err << "model \"" << get_name() << "\" " + << "has an objective function layer term corresponding to " + << "layer \"" << l.get_name() << "\", " + << "which isn't in the model's list of layers"; LBANN_ERROR(err.str()); } - if (dynamic_cast(l) == nullptr) { + if (dynamic_cast(&l) == nullptr) { // Create evaluation layer - auto* eval = abstract_evaluation_layer::construct( - l->get_comm(), - l->get_data_layout(), - l->get_device_allocation()); + std::unique_ptr eval(abstract_evaluation_layer::construct( + l.get_comm(), + l.get_data_layout(), + l.get_device_allocation())); // Set evaluation layer name El::Int name_index = 1; - std::string name = l->get_name() + "_eval"; + std::string name = l.get_name() + "_eval"; while (layer_has_name(name, m_layers)) { name_index++; - name = l->get_name() + "_eval" + std::to_string(name_index); + name = l.get_name() + "_eval" + std::to_string(name_index); } eval->set_name(name); // Add evaluation layer to model - l->add_child_layer(eval); - eval->add_parent_layer(l); + l.add_child_layer(eval.get()); + eval->add_parent_layer(&l); term->set_layer(*eval); - add_layer(eval); + add_layer(std::move(eval)); } } @@ -753,38 +778,36 @@ void model::add_evaluation_layers() { for (auto* m : m_metrics) { auto* met = dynamic_cast(m); if (met != nullptr) { - auto* l = &met->get_layer(); - const size_t pos = (std::find(m_layers.begin(), m_layers.end(), l) - - m_layers.begin()); - if (pos >= m_layers.size()) { - std::stringstream err; + auto& l = met->get_layer(); + if (std::find(layer_pointers.begin(), layer_pointers.end(), &l) + == layer_pointers.end()) { err << "layer metric \"" << met->name() << "\" " - << "corresponds to layer \"" << l->get_name() << "\", " + << "corresponds to layer \"" << l.get_name() << "\", " << "which is not in model \"" << get_name() << "\""; LBANN_ERROR(err.str()); } - if (dynamic_cast(l) == nullptr) { + if (dynamic_cast(&l) == nullptr) { // Create evaluation layer - auto* eval = abstract_evaluation_layer::construct( - l->get_comm(), - l->get_data_layout(), - l->get_device_allocation()); + std::unique_ptr eval(abstract_evaluation_layer::construct( + l.get_comm(), + l.get_data_layout(), + l.get_device_allocation())); // Set evaluation layer name El::Int name_index = 1; - std::string name = l->get_name() + "_eval"; + std::string name = l.get_name() + "_eval"; while (layer_has_name(name, m_layers)) { name_index++; - name = l->get_name() + "_eval" + std::to_string(name_index); + name = l.get_name() + "_eval" + std::to_string(name_index); } eval->set_name(name); // Add evaluation layer to model - l->add_child_layer(eval); - eval->add_parent_layer(l); + l.add_child_layer(eval.get()); + eval->add_parent_layer(&l); met->set_layer(*eval); - add_layer(eval); + add_layer(std::move(eval)); } } @@ -796,48 +819,48 @@ void model::add_dummy_layers() { // Add dummy layers until all layers have enough children for (size_t i = 0; i < m_layers.size(); ++i) { - auto layer = m_layers[i]; - while (layer->get_num_children() < layer->get_expected_num_child_layers()) { + auto& l = *m_layers[i]; + while (l.get_num_children() < l.get_expected_num_child_layers()) { // Create dummy layer - Layer *dummy = nullptr; + std::unique_ptr dummy; using args_tuple = std::tuple; - args_tuple args(layer->get_data_layout(), layer->get_device_allocation()); + args_tuple args(l.get_data_layout(), l.get_device_allocation()); if (args == args_tuple(data_layout::DATA_PARALLEL, El::Device::CPU)) { - dummy = new dummy_layer(m_comm); + dummy.reset(new dummy_layer(m_comm)); } if (args == args_tuple(data_layout::MODEL_PARALLEL, El::Device::CPU)) { - dummy = new dummy_layer(m_comm); + dummy.reset(new dummy_layer(m_comm)); } #ifdef LBANN_HAS_GPU if (args == args_tuple(data_layout::DATA_PARALLEL, El::Device::GPU)) { - dummy = new dummy_layer(m_comm); + dummy.reset(new dummy_layer(m_comm)); } if (args == args_tuple(data_layout::MODEL_PARALLEL, El::Device::GPU)) { - dummy = new dummy_layer(m_comm); + dummy.reset(new dummy_layer(m_comm)); } #endif // LBANN_HAS_GPU if (dummy == nullptr) { std::stringstream err; err << "could not construct dummy layer corresponding to " - << "layer \"" << layer->get_name() << "\" " + << "layer \"" << l.get_name() << "\" " << "in model \"" << get_name() << "\""; LBANN_ERROR(err.str()); } // Set dummy layer name El::Int name_index = 1; - std::string name = layer->get_name() + "_dummy"; + std::string name = l.get_name() + "_dummy"; while (layer_has_name(name, m_layers)) { name_index++; - name = layer->get_name() + "_dummy" + std::to_string(name_index); + name = l.get_name() + "_dummy" + std::to_string(name_index); } dummy->set_name(name); // Add dummy layer to model - layer->add_child_layer(dummy); - dummy->add_parent_layer(layer); - add_layer(dummy); + l.add_child_layer(dummy.get()); + dummy->add_parent_layer(&l); + add_layer(std::move(dummy)); } } @@ -846,45 +869,45 @@ void model::add_dummy_layers() { void model::add_split_layers() { for (size_t i = 0; i < m_layers.size(); ++i) { - auto layer = m_layers[i]; + auto& layer = *m_layers[i]; // Add split layer if layer expects one child but has multiple - auto& children = layer->get_child_layers(); - if (layer->get_expected_num_child_layers() == 1 + auto& children = layer.get_child_layers(); + if (layer.get_expected_num_child_layers() == 1 && children.size() != 1) { // Create split layer - Layer *split = nullptr; + std::unique_ptr split; using args_tuple = std::tuple; - args_tuple args(layer->get_data_layout(), layer->get_device_allocation()); + args_tuple args(layer.get_data_layout(), layer.get_device_allocation()); if (args == args_tuple(data_layout::DATA_PARALLEL, El::Device::CPU)) { - split = new split_layer(m_comm); + split.reset(new split_layer(m_comm)); } if (args == args_tuple(data_layout::MODEL_PARALLEL, El::Device::CPU)) { - split = new split_layer(m_comm); + split.reset(new split_layer(m_comm)); } #ifdef LBANN_HAS_GPU if (args == args_tuple(data_layout::DATA_PARALLEL, El::Device::GPU)) { - split = new split_layer(m_comm); + split.reset(new split_layer(m_comm)); } if (args == args_tuple(data_layout::MODEL_PARALLEL, El::Device::GPU)) { - split = new split_layer(m_comm); + split.reset(new split_layer(m_comm)); } #endif // LBANN_HAS_GPU if (split == nullptr) { std::stringstream err; err << "could not construct split layer corresponding to " - << "layer \"" << layer->get_name() << "\" " + << "layer \"" << layer.get_name() << "\" " << "in model \"" << get_name() << "\""; LBANN_ERROR(err.str()); } // Set split layer name El::Int name_index = 1; - std::string name = layer->get_name() + "_split"; + std::string name = layer.get_name() + "_split"; while (layer_has_name(name, m_layers)) { name_index++; - name = layer->get_name() + "_split" + std::to_string(name_index); + name = layer.get_name() + "_split" + std::to_string(name_index); } split->set_name(name); @@ -894,16 +917,16 @@ void model::add_split_layers() { split->add_child_layer(child); auto& child_parents = child->get_parent_layers(); std::replace(child_parents.begin(), child_parents.end(), - layer, split); + &layer, split.get()); } // Setup relationship between current layer and split layer children.clear(); - layer->add_child_layer(split); - split->add_parent_layer(layer); + layer.add_child_layer(split.get()); + split->add_parent_layer(&layer); // Add split layer to layer list - add_layer(split); + add_layer(std::move(split)); } @@ -913,7 +936,7 @@ void model::add_split_layers() { int model::get_num_iterations_per_epoch(execution_mode mode) const { generic_input_layer* input = nullptr; for (auto&& l : m_layers) { - input = dynamic_cast(l); + input = dynamic_cast(l.get()); if (input != nullptr) { break; } } if (input == nullptr) { @@ -967,8 +990,8 @@ void model::collect_indices(execution_mode mode) { } void model::collect_background_data_fetch(execution_mode mode) { - for (const auto& layer : m_layers) { - auto *input = dynamic_cast(layer); + for (const auto& l : m_layers) { + auto *input = dynamic_cast(l.get()); if (input != nullptr) { input->collect_background_data_fetch(mode); } @@ -1101,23 +1124,23 @@ void model::clear_gradients() { void model::forward_prop(execution_mode mode) { do_model_forward_prop_begin_cbs(mode); - for (const auto& layer : m_layers) { - do_layer_forward_prop_begin_cbs(mode, layer); - layer->forward_prop(); - do_layer_forward_prop_end_cbs(mode, layer); + for (const auto& l : m_layers) { + do_layer_forward_prop_begin_cbs(mode, l.get()); + l->forward_prop(); + do_layer_forward_prop_end_cbs(mode, l.get()); } do_model_forward_prop_end_cbs(mode); } void model::backward_prop() { do_model_backward_prop_begin_cbs(); - for (int l = m_layers.size() - 1; l >= 0; --l) { + for (El::Int i = m_layers.size()-1; i >= 0; --i) { // Perform backward prop step on current layer - Layer *layer = m_layers[l]; - do_layer_backward_prop_begin_cbs(layer); - layer->back_prop(); - do_layer_backward_prop_end_cbs(layer); + auto& l = *m_layers[i]; + do_layer_backward_prop_begin_cbs(&l); + l.back_prop(); + do_layer_backward_prop_end_cbs(&l); // Terminate early if all gradients have been computed bool all_gradients_computed = true; @@ -1136,13 +1159,13 @@ void model::backward_prop() { void model::update_weights() { do_model_optimize_begin_cbs(); - for (int i = m_weights.size() - 1; i >= 0; --i) { - auto& w = m_weights[i]; - optimizer* opt = w->get_optimizer(); + for (El::Int i = m_weights.size()-1; i >= 0; --i) { + auto& w = *m_weights[i]; + optimizer* opt = w.get_optimizer(); if (opt != nullptr) { - do_weight_optimize_begin_cbs(w); + do_weight_optimize_begin_cbs(&w); opt->step(); - do_weight_optimize_end_cbs(w); + do_weight_optimize_end_cbs(&w); } } do_model_optimize_end_cbs(); @@ -1150,15 +1173,15 @@ void model::update_weights() { bool model::update_layers() { bool finished = true; - for (int l = m_layers.size() - 1; l >= 0; --l) { - finished = m_layers[l]->update() && finished; + for (El::Int i = m_layers.size()-1; i >= 0; --i) { + finished = m_layers[i]->update() && finished; } return finished; } void model::reconcile_weight_values() { std::vector reqs(m_weights.size()); - for (int i = m_weights.size() - 1; i >= 0; --i) { + for (El::Int i = m_weights.size()-1; i >= 0; --i) { m_weights[i]->reconcile_values(reqs[i]); } for (auto& req : reqs) { m_comm->wait(req); } @@ -1427,8 +1450,8 @@ void model::do_weight_optimize_end_cbs(weights *w) { //////////////////////////////////////////////////////////// void model::summarize_stats(lbann_summary& summarizer) { - for (const auto& layer : m_layers) { - layer->summarize_stats(summarizer, get_cur_step()); + for (const auto& l : m_layers) { + l->summarize_stats(summarizer, get_cur_step()); } summarizer.reduce_scalar("objective", m_objective_function->get_mean_value(m_execution_mode), @@ -1454,8 +1477,8 @@ void model::summarize_stats(lbann_summary& summarizer) { } void model::summarize_matrices(lbann_summary& summarizer) { - for (const auto& layer : m_layers) { - layer->summarize_matrices(summarizer, get_cur_step()); + for (const auto& l : m_layers) { + l->summarize_matrices(summarizer, get_cur_step()); } } diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 4fffdef5f0c..2daf0038365 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -30,45 +30,24 @@ namespace lbann { namespace proto { -#define LAYOUT_ERR(layer_name, layer_type) \ - { \ - std::stringstream s; \ - s << "\nlayer type: " << layer_type << " layer name: " << layer_name << " -- is only supported for data_layout::DATA_PARALLEL"; \ - LBANN_ERROR(s.str()); \ - } - -#define DEVICE_ERR(layer_name, layer_type, layout, Dev) \ - { \ - if (layout != data_layout::DATA_PARALLEL) { \ - LAYOUT_ERR(layer_name, layer_type) \ - } else if (Dev != El::Device::CPU) { \ - std::stringstream s; \ - s << "\nlayer type: " << layer_type " layer name: " << layer_name << " -- is only supported for El::Device::CPU; it looks like you're attempting to run with a cuda build. You should be able to run by adding --disable_cuda to your command line (in which case you won't be using GPUs, which may not be what you want)";\ - LBANN_ERROR(s.str()); \ - } else { \ - std::stringstream s; \ - s << "\nsomething is weird with data_layout and/or El::Device but we can't determine what."; \ - LBANN_ERROR(s.str()); \ - } \ - } - std::vector get_slice_points_from_reader(const generic_data_reader* dr, const std::string& var_category, bool& is_supported); -template -Layer* construct_layer(lbann_comm* comm, - const std::map& data_readers, - int num_parallel_readers, - const lbann_data::Layer& proto_layer) { +template +std::unique_ptr construct_layer( + lbann_comm* comm, + const std::map& data_readers, + int num_parallel_readers, + const lbann_data::Layer& proto_layer) { std::stringstream err; // Convenience macro to construct layers with no parameters -#define CONSTRUCT_LAYER(name) \ - do { \ - if (proto_layer.has_##name()) { \ - return new name##_layer(comm); \ - } \ +#define CONSTRUCT_LAYER(name) \ + do { \ + if (proto_layer.has_##name()) { \ + return lbann::make_unique>(comm); \ + } \ } while (false) // Input layers @@ -82,20 +61,21 @@ Layer* construct_layer(lbann_comm* comm, if (mode_str == "reconstruction") { target_mode = data_reader_target_mode::RECONSTRUCTION; } if (mode_str == "na" || mode_str == "NA" || mode_str == "N/A") { target_mode = data_reader_target_mode::NA; } if (io_buffer == "partitioned") { - return new input_layer(comm, - num_parallel_readers, - data_readers, - !params.data_set_per_model(), - target_mode); + return lbann::make_unique>( + comm, + num_parallel_readers, + data_readers, + !params.data_set_per_model(), + target_mode); } } // Target layers if (proto_layer.has_target()) { - return new target_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_reconstruction()) { - return new reconstruction_layer(comm); + return lbann::make_unique>(comm); } // Fully connected layer @@ -163,11 +143,12 @@ Layer* construct_layer(lbann_comm* comm, num_neurons = dr->get_linearized_data_size(); } } - return new fully_connected_layer(comm, - num_neurons, - params.transpose(), - nullptr, - params.has_bias()); + return lbann::make_unique>( + comm, + num_neurons, + params.transpose(), + nullptr, + params.has_bias()); } // Convolution and deconvolution layer @@ -179,6 +160,10 @@ Layer* construct_layer(lbann_comm* comm, if (num_groups == 0) { num_groups = 1; } + if (Layout != data_layout::DATA_PARALLEL) { + LBANN_ERROR("convolution layer is only supported with " + "a data-parallel layout"); + } if (params.has_vectors()) { const auto& dims = parse_list(params.conv_dims()); const auto& pads = parse_list(params.conv_pads()); @@ -187,13 +172,9 @@ Layer* construct_layer(lbann_comm* comm, if (dilations.empty()) { dilations.resize(dims.size(), 1); } - if (layout == data_layout::DATA_PARALLEL) { - return new convolution_layer( - comm, dims.size(), num_output_channels, - dims, pads, strides, dilations, num_groups, bias - ); - } - LAYOUT_ERR(proto_layer.name(), "convolution"); + return lbann::make_unique>( + comm, dims.size(), num_output_channels, + dims, pads, strides, dilations, num_groups, bias); } else { const auto& num_dims = params.num_dims(); const auto& dim = params.conv_dims_i(); @@ -203,13 +184,9 @@ Layer* construct_layer(lbann_comm* comm, if (dilation == 0) { dilation = 1; } - if (layout == data_layout::DATA_PARALLEL) { - return new convolution_layer( - comm, num_dims, num_output_channels, - dim, pad, stride, dilation, num_groups, bias - ); - } - LAYOUT_ERR(proto_layer.name(), "convolution"); + return lbann::make_unique>( + comm, num_dims, num_output_channels, + dim, pad, stride, dilation, num_groups, bias); } } if (proto_layer.has_deconvolution()) { @@ -227,6 +204,10 @@ Layer* construct_layer(lbann_comm* comm, } num_output_channels = dr->get_linearized_data_size(); } + if (Layout != data_layout::DATA_PARALLEL) { + LBANN_ERROR("deconvolution layer is only supported with " + "a data-parallel layout"); + } if (params.has_vectors()) { const auto& dims = parse_list(params.conv_dims()); const auto& pads = parse_list(params.conv_pads()); @@ -235,13 +216,9 @@ Layer* construct_layer(lbann_comm* comm, if (dilations.empty()) { dilations.resize(dims.size(), 1); } - if (layout == data_layout::DATA_PARALLEL) { - return new deconvolution_layer( - comm, dims.size(), num_output_channels, - dims, pads, strides, dilations, num_groups, bias - ); - } - LAYOUT_ERR(proto_layer.name(), "deconvolution"); + return lbann::make_unique>( + comm, dims.size(), num_output_channels, + dims, pads, strides, dilations, num_groups, bias); } else { const auto& num_dims = params.num_dims(); const auto& dim = params.conv_dims_i(); @@ -251,13 +228,9 @@ Layer* construct_layer(lbann_comm* comm, if (dilation == 0) { dilation = 1; } - if (layout == data_layout::DATA_PARALLEL) { - return new deconvolution_layer( - comm, num_dims, num_output_channels, - dim, pad, stride, dilation, num_groups, bias - ); - } - LAYOUT_ERR(proto_layer.name(), "deconvolution"); + return lbann::make_unique>( + comm, num_dims, num_output_channels, + dim, pad, stride, dilation, num_groups, bias); } } @@ -276,22 +249,22 @@ Layer* construct_layer(lbann_comm* comm, } dims.push_back(dr->get_linearized_data_size()); } - return new reshape_layer(comm, dims); + return lbann::make_unique>(comm, dims); } if (proto_layer.has_sum()) { - return new sum_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_weighted_sum()) { const auto& params = proto_layer.weighted_sum(); const auto& scaling_factors = parse_list(params.scaling_factors()); - return new weighted_sum_layer(comm, scaling_factors); + return lbann::make_unique>(comm, scaling_factors); } if (proto_layer.has_split()) { - return new split_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_concatenation()) { const auto& axis = proto_layer.concatenation().concatenation_axis(); - return new concatenation_layer(comm, axis); + return lbann::make_unique>(comm, axis); } if (proto_layer.has_slice()) { const auto& params = proto_layer.slice(); @@ -337,25 +310,24 @@ Layer* construct_layer(lbann_comm* comm, LBANN_ERROR(err.str()); return nullptr; } - return new slice_layer(comm, - params.slice_axis(), - slice_points); + return lbann::make_unique>( + comm, params.slice_axis(), slice_points); } if (proto_layer.has_hadamard()) { - return new hadamard_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_constant()) { const auto& params = proto_layer.constant(); const auto& dims = parse_list(params.num_neurons()); - return new constant_layer(comm, params.value(), dims); + return lbann::make_unique>(comm, params.value(), dims); } if (proto_layer.has_gaussian()) { const auto& params = proto_layer.gaussian(); const auto& dims = parse_list(params.neuron_dims()); if (params.mean() == 0 && params.stdev() == 0) { - return new gaussian_layer(comm, dims); + return lbann::make_unique>(comm, dims); } else { - return new gaussian_layer(comm, + return lbann::make_unique>(comm, dims, params.mean(), params.stdev()); @@ -364,22 +336,23 @@ Layer* construct_layer(lbann_comm* comm, if (proto_layer.has_bernoulli()) { const auto& params = proto_layer.bernoulli(); const auto& dims = parse_list(params.neuron_dims()); - return new bernoulli_layer(comm, - dims, - params.prob()); + return lbann::make_unique>( + comm, dims, params.prob()); } if (proto_layer.has_uniform()) { const auto& params = proto_layer.uniform(); const auto& dims = parse_list(params.neuron_dims()); if (params.min() == 0 && params.max() == 0) { - return new uniform_layer(comm, dims); + return lbann::make_unique>(comm, dims); } else { - return new uniform_layer(comm, dims, params.min(), params.max()); + return lbann::make_unique>( + comm, dims, params.min(), params.max()); } } if (proto_layer.has_zero()) { const auto& params = proto_layer.zero(); - return new zero_layer(comm, params.first_half(), params.second_half()); + return lbann::make_unique>( + comm, params.first_half(), params.second_half()); } if (proto_layer.has_pooling()) { const auto& params = proto_layer.pooling(); @@ -388,34 +361,32 @@ Layer* construct_layer(lbann_comm* comm, if (mode_str == "max" ) { mode = pool_mode::max; } if (mode_str == "average" ) { mode = pool_mode::average; } if (mode_str == "average_no_pad" ) { mode = pool_mode::average_no_pad; } + if (Layout != data_layout::DATA_PARALLEL) { + LBANN_ERROR("pooling layer is only supported with " + "a data-parallel layout"); + } if (params.has_vectors()) { const auto& dims = parse_list(params.pool_dims()); const auto& pads = parse_list(params.pool_pads()); const auto& strides = parse_list(params.pool_strides()); - if (layout == data_layout::DATA_PARALLEL) { - return new pooling_layer( - comm, dims.size(), dims, pads, strides, mode - ); - } - LAYOUT_ERR(proto_layer.name(), "pooling"); + return lbann::make_unique>( + comm, dims.size(), dims, pads, strides, mode); } else { const auto& num_dims = params.num_dims(); const auto& dim = params.pool_dims_i(); const auto& pad = params.pool_pads_i(); const auto& stride = params.pool_strides_i(); - if (layout == data_layout::DATA_PARALLEL) { - return new pooling_layer( - comm, num_dims, dim, pad, stride, mode - ); - } - LAYOUT_ERR(proto_layer.name(), "pooling"); + return lbann::make_unique>( + comm, num_dims, dim, pad, stride, mode); } } if (proto_layer.has_unpooling()) { - if (layout == data_layout::DATA_PARALLEL && Dev == El::Device::CPU) { - return new unpooling_layer(comm); + if (Layout == data_layout::DATA_PARALLEL && Device == El::Device::CPU) { + return lbann::make_unique>(comm); + } else { + LBANN_ERROR("unpooling layer is only supported with " + "a data-parallel layout and on CPU"); } - DEVICE_ERR(proto_layer.name(), "unpooling", layout, Dev); } if (proto_layer.has_reduction()) { const auto& params = proto_layer.reduction(); @@ -423,72 +394,80 @@ Layer* construct_layer(lbann_comm* comm, reduction_mode mode = reduction_mode::INVALID; if (mode_str == "sum" || mode_str.empty()) { mode = reduction_mode::SUM; } if (mode_str == "average") { mode = reduction_mode::AVERAGE; } - if (layout == data_layout::DATA_PARALLEL) { - return new reduction_layer(comm, mode); + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>(comm, mode); + } else { + LBANN_ERROR("reduction layer is only supported with " + "a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "reduction"); } if (proto_layer.has_evaluation()) { - return new evaluation_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_crop()) { const auto& params = proto_layer.crop(); const auto& dims = parse_list(params.dims()); - if (layout == data_layout::DATA_PARALLEL) { - return new crop_layer(comm, dims); + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>(comm, dims); + } else { + LBANN_ERROR("crop layer is only supported with " + "a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "crop"); } if (proto_layer.has_categorical_random()) { - if (layout == data_layout::DATA_PARALLEL - && Dev == El::Device::CPU) { - return new categorical_random_layer(comm); + if (Layout == data_layout::DATA_PARALLEL + && Device == El::Device::CPU) { + return lbann::make_unique>(comm); + } else { + LBANN_ERROR("categorical random layer is only supported on CPU"); } - DEVICE_ERR(proto_layer.name(), "categorical_random", layout, Dev); } if (proto_layer.has_discrete_random()) { const auto& params = proto_layer.discrete_random(); const auto& values = parse_list(params.values()); const auto& dims = parse_list(params.dims()); - if (layout == data_layout::DATA_PARALLEL - && Dev == El::Device::CPU) { - return new discrete_random_layer( - comm, values, dims); + if (Layout == data_layout::DATA_PARALLEL + && Device == El::Device::CPU) { + return lbann::make_unique>( + comm, values, dims); + } else { + LBANN_ERROR("discrete random layer is only supported on CPU"); } - DEVICE_ERR(proto_layer.name(), "discrete_random", layout, Dev); } if (proto_layer.has_dummy()) { - return new dummy_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_stop_gradient()) { - return new stop_gradient_layer(comm); + return lbann::make_unique>(comm); } if (proto_layer.has_in_top_k()) { const auto& params = proto_layer.in_top_k(); - return new in_top_k_layer(comm, params.k()); + return lbann::make_unique>(comm, params.k()); } if (proto_layer.has_sort()) { const auto& params = proto_layer.sort(); - if (layout == data_layout::DATA_PARALLEL) { - return new sort_layer(comm, params.descending()); + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>(comm, params.descending()); + } else { + LBANN_ERROR("sort layer is only supported with " + "a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "sort"); } if (proto_layer.has_weights_layer()) { const auto& params = proto_layer.weights_layer(); const auto& dims = parse_list(params.dims()); - return new weights_layer(comm, dims); + return lbann::make_unique>(comm, dims); } if (proto_layer.has_tessellate()) { const auto& params = proto_layer.tessellate(); const auto& dims = parse_list(params.dims()); - return new tessellate_layer(comm, dims); + return lbann::make_unique>(comm, dims); } // Regularizer layers if (proto_layer.has_batch_normalization()) { const auto& params = proto_layer.batch_normalization(); - if (layout == data_layout::DATA_PARALLEL) { + if (Layout == data_layout::DATA_PARALLEL) { const auto& aggr_str = params.stats_aggregation(); batch_normalization_stats_aggregation aggr = batch_normalization_stats_aggregation::local; @@ -512,29 +491,33 @@ Layer* construct_layer(lbann_comm* comm, if (epsilon == 0.0) { epsilon = 1e-5; } - return new batch_normalization_layer( + return lbann::make_unique>( comm, decay, epsilon, aggr); - } - LAYOUT_ERR(proto_layer.name(), "batch_normalization"); + } else { + LBANN_ERROR("batch normalization layer is only supported with " + "a data-parallel layout"); + } } if (proto_layer.has_dropout()) { const auto& params = proto_layer.dropout(); - return new dropout(comm, params.keep_prob()); + return lbann::make_unique>(comm, params.keep_prob()); } if (proto_layer.has_local_response_normalization()) { const auto& params = proto_layer.local_response_normalization(); - if (layout == data_layout::DATA_PARALLEL) { - return new local_response_normalization_layer( + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>( comm, params.window_width(), params.lrn_alpha(), params.lrn_beta(), params.lrn_k()); + } else { + LBANN_ERROR("local response normalization layer is only supported " + "with a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "local_response_normalization"); } if (proto_layer.has_selu_dropout()) { const auto& params = proto_layer.selu_dropout(); @@ -542,9 +525,9 @@ Layer* construct_layer(lbann_comm* comm, const auto& alpha = params.alpha(); const auto& scale = params.scale(); if (alpha != 0.0 && scale != 0.0) { - return new selu_dropout(comm, keep_prob, alpha, scale); + return lbann::make_unique>(comm, keep_prob, alpha, scale); } else { - return new selu_dropout(comm, keep_prob); + return lbann::make_unique>(comm, keep_prob); } } @@ -598,7 +581,7 @@ Layer* construct_layer(lbann_comm* comm, CONSTRUCT_LAYER(logical_xor); if (proto_layer.has_clamp()) { const auto& params = proto_layer.clamp(); - return new clamp_layer(comm, params.min(), params.max()); + return lbann::make_unique>(comm, params.min(), params.max()); } // Activation layers @@ -606,9 +589,9 @@ Layer* construct_layer(lbann_comm* comm, const auto& params = proto_layer.elu(); const auto& alpha = params.alpha(); if (alpha != 0) { - return new elu_layer(comm, alpha); + return lbann::make_unique>(comm, alpha); } else { - return new elu_layer(comm); + return lbann::make_unique>(comm); } } CONSTRUCT_LAYER(identity); @@ -616,9 +599,9 @@ Layer* construct_layer(lbann_comm* comm, const auto& params = proto_layer.leaky_relu(); const auto& negative_slope = params.negative_slope(); if (negative_slope != 0) { - return new leaky_relu_layer(comm, negative_slope); + return lbann::make_unique>(comm, negative_slope); } else { - return new leaky_relu_layer(comm); + return lbann::make_unique>(comm); } } CONSTRUCT_LAYER(log_sigmoid); @@ -637,7 +620,7 @@ Layer* construct_layer(lbann_comm* comm, CONSTRUCT_LAYER(mean_absolute_error); if (proto_layer.has_top_k_categorical_accuracy()) { const auto& params = proto_layer.top_k_categorical_accuracy(); - return new top_k_categorical_accuracy_layer(comm, params.k()); + return lbann::make_unique>(comm, params.k()); } CONSTRUCT_LAYER(l2_norm2); CONSTRUCT_LAYER(l1_norm); @@ -650,29 +633,31 @@ Layer* construct_layer(lbann_comm* comm, // Image layers if (proto_layer.has_bilinear_resize()) { const auto& params = proto_layer.bilinear_resize(); - if (layout == data_layout::DATA_PARALLEL) { - return new bilinear_resize_layer( - comm, - params.height(), - params.width()); + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>( + comm, params.height(), params.width()); + } else { + LBANN_ERROR("bilinear resize layer is only supported with " + "a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "bilinear_resize"); } // Miscellaneous layers if (proto_layer.has_covariance()) { const auto& params = proto_layer.covariance(); - return new covariance_layer(comm, params.biased()); + return lbann::make_unique>(comm, params.biased()); } if (proto_layer.has_variance()) { const auto& params = proto_layer.variance(); - return new variance_layer(comm, params.biased()); + return lbann::make_unique>(comm, params.biased()); } if (proto_layer.has_channelwise_mean()) { - if (layout == data_layout::DATA_PARALLEL) { - return new channelwise_mean_layer(comm); + if (Layout == data_layout::DATA_PARALLEL) { + return lbann::make_unique>(comm); + } else { + LBANN_ERROR("channel-wise mean layer is only supported with " + "a data-parallel layout"); } - LAYOUT_ERR(proto_layer.name(), "channelwise_mean"); } // Throw exception if layer has not been constructed @@ -683,26 +668,26 @@ Layer* construct_layer(lbann_comm* comm, } // Template instantiation -template Layer* construct_layer( +template std::unique_ptr construct_layer( lbann_comm* comm, const std::map& data_readers, int num_parallel_readers, const lbann_data::Layer& proto_layer ); -template Layer* construct_layer( +template std::unique_ptr construct_layer( lbann_comm* comm, const std::map& data_readers, int num_parallel_readers, const lbann_data::Layer& proto_layer ); #ifdef LBANN_HAS_GPU -template Layer* construct_layer( +template std::unique_ptr construct_layer( lbann_comm* comm, const std::map& data_readers, int num_parallel_readers, const lbann_data::Layer& proto_layer ); -template Layer* construct_layer( +template std::unique_ptr construct_layer( lbann_comm* comm, const std::map& data_readers, int num_parallel_readers, diff --git a/src/proto/factories/layer_graph_factory.cpp b/src/proto/factories/layer_graph_factory.cpp index fc7a829b295..a29798c9386 100644 --- a/src/proto/factories/layer_graph_factory.cpp +++ b/src/proto/factories/layer_graph_factory.cpp @@ -185,13 +185,15 @@ void setup_unpooling_pointers(lbann_comm* comm, } // namespace -std::vector construct_layer_graph(lbann_comm* comm, - const std::map& data_readers, - const lbann_data::Model& proto_model) { +std::vector> construct_layer_graph( + lbann_comm* comm, + const std::map& data_readers, + const lbann_data::Model& proto_model) { std::stringstream err; // List of layers - std::vector layers; + std::vector> layers; + layers.reserve(proto_model.layer_size()); // Map from names to layer pointers std::unordered_map names_to_layers; @@ -241,7 +243,7 @@ std::vector construct_layer_graph(lbann_comm* comm, #endif // LBANN_HAS_GPU // Construct layer - Layer* l = nullptr; + std::unique_ptr l; #define TEMPLATE_INSTANTIATION(T_layout, T_device) \ do { \ if (layout == T_layout && device == T_device) { \ @@ -275,7 +277,7 @@ std::vector construct_layer_graph(lbann_comm* comm, err << "layer name \"" << name << "\" is not unique"; LBANN_ERROR(err.str()); } - names_to_layers[name] = l; + names_to_layers[name] = l.get(); if (proto_layer.freeze()) { #ifdef LBANN_DEBUG @@ -286,18 +288,21 @@ std::vector construct_layer_graph(lbann_comm* comm, l->freeze(); } // Add layer to list - layers.push_back(l); + layers.emplace_back(std::move(l)); } // Setup pointers between layers - setup_parents_and_children(comm, layers, names_to_layers, proto_model); - setup_hints(layers, names_to_layers, proto_model); - setup_target_pointers(comm, layers, names_to_layers, proto_model); - setup_unpooling_pointers(comm, layers, names_to_layers, proto_model); + std::vector layer_pointers; + layer_pointers.reserve(layers.size()); + for (auto&& ptr : layers) { layer_pointers.push_back(ptr.get()); } + setup_parents_and_children(comm, layer_pointers, names_to_layers, proto_model); + setup_hints(layer_pointers, names_to_layers, proto_model); + setup_target_pointers(comm, layer_pointers, names_to_layers, proto_model); + setup_unpooling_pointers(comm, layer_pointers, names_to_layers, proto_model); // Optionally Set num_neurons = num_labels - setup_fc_num_neurons(layers, data_readers, proto_model); + setup_fc_num_neurons(layer_pointers, data_readers, proto_model); // Return layer list return layers; diff --git a/src/proto/factories/model_factory.cpp b/src/proto/factories/model_factory.cpp index 71c621f8278..5d92a9e1802 100644 --- a/src/proto/factories/model_factory.cpp +++ b/src/proto/factories/model_factory.cpp @@ -237,11 +237,16 @@ model* construct_model(lbann_comm* comm, auto&& layer_list = construct_layer_graph(comm, data_readers, proto_model); + std::vector layer_pointers; + layer_pointers.reserve(layer_list.size()); + for (auto&& ptr : layer_list) { + layer_pointers.push_back(ptr.get()); + } // Construct objective function const auto& proto_obj = proto_model.objective_function(); auto&& obj = construct_objective_function(proto_obj); - assign_layers_to_objective_function(layer_list, *obj, proto_obj); + assign_layers_to_objective_function(layer_pointers, *obj, proto_obj); // Construct weights std::vector weights_list; @@ -250,7 +255,7 @@ model* construct_model(lbann_comm* comm, proto_opt, proto_model.weights(i))); } - assign_weights_to_layers(layer_list, weights_list, proto_model); + assign_weights_to_layers(layer_pointers, weights_list, proto_model); assign_weights_to_objective_function(weights_list, *obj, proto_obj); // Construct metrics @@ -261,7 +266,7 @@ model* construct_model(lbann_comm* comm, params.name(), params.unit())); } - assign_layers_to_metrics(layer_list, metric_list, proto_model); + assign_layers_to_metrics(layer_pointers, metric_list, proto_model); // Construct callbacks std::vector callback_list; @@ -270,14 +275,14 @@ model* construct_model(lbann_comm* comm, callback_list.push_back(construct_callback(comm, proto_model.callback(i), data_readers, - layer_list, + layer_pointers, weights_list, summarizer)); } // Instantiate model auto&& m = instantiate_model(comm, obj, proto_opt, proto_model); - for (auto&& l : layer_list ) { m->add_layer(l); } + for (auto&& l : layer_list ) { m->add_layer(std::move(l)); } for (auto&& w : weights_list ) { m->add_weights(w); } for (auto&& met : metric_list ) { m->add_metric(met); } for (auto&& cb : callback_list) { m->add_callback(cb); } diff --git a/src/utils/graph.cpp b/src/utils/graph.cpp index 9c4eb8cfbc0..949eca3f3ab 100644 --- a/src/utils/graph.cpp +++ b/src/utils/graph.cpp @@ -34,8 +34,8 @@ namespace lbann { namespace graph { -void print(const std::set& nodes, - const std::map>& edges, +void print(const std::set& nodes, + const std::map>& edges, std::ostream& os) { for (const auto& node : nodes) { os << "node " << node << " neighbors :"; @@ -45,18 +45,18 @@ void print(const std::set& nodes, os << "\n"; } } - -std::set get_neighbors(int node, - const std::map>& edges) { + +std::set get_neighbors(El::Int node, + const std::map>& edges) { if (edges.count(node) > 0) { return edges.at(node); } else { - return std::set(); + return {}; } } -bool is_closure(const std::set& nodes, - const std::map>& edges) { +bool is_closure(const std::set& nodes, + const std::map>& edges) { for (const auto& node : nodes) { for (const auto& neighbor : get_neighbors(node, edges)) { if (nodes.count(neighbor) == 0) { @@ -67,12 +67,10 @@ bool is_closure(const std::set& nodes, return true; } -bool is_topologically_sorted(const std::set& nodes, - const std::map>& edges) { +bool is_topologically_sorted(const std::set& nodes, + const std::map>& edges) { if (!is_closure(nodes, edges)) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " << "graph is not a closure"; - throw lbann_exception(err.str()); + LBANN_ERROR("graph is not a closure"); } for (const auto& node : nodes) { const auto& neighbors = get_neighbors(node, edges); @@ -83,14 +81,12 @@ bool is_topologically_sorted(const std::set& nodes, return true; } -bool is_cyclic(const std::set& nodes, - const std::map>& edges) { +bool is_cyclic(const std::set& nodes, + const std::map>& edges) { // Check that graph is valid if (!is_closure(nodes, edges)) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " << "graph is not a closure"; - throw lbann_exception(err.str()); + LBANN_ERROR("graph is not a closure"); } // Topologically sorted graphs are not cyclic @@ -99,8 +95,8 @@ bool is_cyclic(const std::set& nodes, } // Perform depth-first searches to detect cycles - std::unordered_map is_visited, is_sorted; - std::stack search_stack; + std::unordered_map is_visited, is_sorted; + std::stack search_stack; for (auto&& it = nodes.rbegin(); it != nodes.rend(); ++it) { search_stack.push(*it); } @@ -123,17 +119,15 @@ bool is_cyclic(const std::set& nodes, } } return false; - + } -std::map> transpose(const std::set& nodes, - const std::map>& edges) { +std::map> transpose(const std::set& nodes, + const std::map>& edges) { if (!is_closure(nodes, edges)) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " << "graph is not a closure"; - throw lbann_exception(err.str()); + LBANN_ERROR("attempted to transpose a graph that is not a closure"); } - std::map> transpose_edges; + std::map> transpose_edges; for (const auto& node : nodes) { for (const auto& neighbor : get_neighbors(node, edges)) { transpose_edges[neighbor].insert(node); @@ -142,9 +136,9 @@ std::map> transpose(const std::set& nodes, return transpose_edges; } -std::map> induce_subgraph(const std::set& nodes, - const std::map>& edges) { - std::map> induced_edges; +std::map> induce_subgraph(const std::set& nodes, + const std::map>& edges) { + std::map> induced_edges; for (const auto& node : nodes) { for (const auto& neighbor : get_neighbors(node, edges)) { if (nodes.count(neighbor) > 0) { @@ -155,13 +149,13 @@ std::map> induce_subgraph(const std::set& nodes, return induced_edges; } -std::vector breadth_first_search(int root, - const std::map>& edges) { +std::vector breadth_first_search(El::Int root, + const std::map>& edges) { // Initialize data structures - std::unordered_map is_visited; - std::vector sorted_nodes; - std::queue search_queue; + std::unordered_map is_visited; + std::vector sorted_nodes; + std::queue search_queue; search_queue.push(root); // Visit nodes until search queue is exhausted @@ -182,13 +176,13 @@ std::vector breadth_first_search(int root, } -std::vector depth_first_search(int root, - const std::map>& edges) { +std::vector depth_first_search(El::Int root, + const std::map>& edges) { // Initialize data structures - std::unordered_map is_visited, is_sorted; - std::vector sorted_nodes; - std::stack search_stack; + std::unordered_map is_visited, is_sorted; + std::vector sorted_nodes; + std::stack search_stack; search_stack.push(root); // Visit nodes until search stack is exhausted @@ -219,29 +213,26 @@ std::vector depth_first_search(int root, } -std::vector topological_sort(const std::set& nodes, - const std::map>& edges) { +std::vector topological_sort(const std::set& nodes, + const std::map>& edges) { // Check that graph is valid if (!is_closure(nodes, edges)) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " << "graph is not a closure"; - throw lbann_exception(err.str()); + LBANN_ERROR("attempted to topologically sort " + "a graph that is not a closure"); } if (is_cyclic(nodes, edges)) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " << "graph is cyclic"; - throw lbann_exception(err.str()); + LBANN_ERROR("attempted to topologically sort a cyclic graph"); } // Return original order if already sorted if (is_topologically_sorted(nodes, edges)) { - return std::vector(nodes.begin(), nodes.end()); + return std::vector(nodes.begin(), nodes.end()); } // Perform depth-first searches on nodes - std::stack sorted_stack; - std::unordered_map is_sorted; + std::stack sorted_stack; + std::unordered_map is_sorted; for (const auto& root : nodes) { if (!is_sorted[root]) { const auto& dfs = depth_first_search(root, edges); @@ -255,26 +246,26 @@ std::vector topological_sort(const std::set& nodes, } // Reverse DFS post-order is topologically sorted - std::vector sorted_nodes; + std::vector sorted_nodes; while (!sorted_stack.empty()) { sorted_nodes.push_back(sorted_stack.top()); sorted_stack.pop(); } return sorted_nodes; - + } -void condensation(const std::set& nodes, - const std::map>& edges, - std::map>& components, - std::set& condensation_nodes, - std::map>& condensation_edges) { +void condensation(const std::set& nodes, + const std::map>& edges, + std::map>& components, + std::set& condensation_nodes, + std::map>& condensation_edges) { // Initialize data structures for unsorted condensation - std::unordered_map> unsorted_components; - std::unordered_map unsorted_component_assignments; - std::set unsorted_condensation_nodes; - std::map> unsorted_condensation_edges; + std::unordered_map> unsorted_components; + std::unordered_map unsorted_component_assignments; + std::set unsorted_condensation_nodes; + std::map> unsorted_condensation_edges; // Find strongly connected components with Kosaraju's algorithm // Note: First sort nodes by DFS post-order. Then, pick root nodes @@ -282,8 +273,8 @@ void condensation(const std::set& nodes, // DFS that visits a node determines the strongly connected // component it belongs to. const auto& transpose_edges = transpose(nodes, edges); - std::stack dfs_stack; - std::unordered_map is_sorted, is_condensed; + std::stack dfs_stack; + std::unordered_map is_sorted, is_condensed; for (const auto& root : nodes) { if (!is_sorted[root]) { for (const auto& node : depth_first_search(root, edges)) { @@ -298,7 +289,7 @@ void condensation(const std::set& nodes, const auto& root = dfs_stack.top(); dfs_stack.pop(); if (!is_condensed[root]) { - const int index = unsorted_condensation_nodes.size(); + const El::Int index = unsorted_condensation_nodes.size(); unsorted_condensation_nodes.insert(index); for (const auto& node : depth_first_search(root, transpose_edges)) { if (!is_condensed[node]) { @@ -332,7 +323,7 @@ void condensation(const std::set& nodes, for (size_t i = 0; i < unsorted_condensation_nodes.size(); ++i) { condensation_nodes.insert(i); } - std::unordered_map unsorted_to_sorted; + std::unordered_map unsorted_to_sorted; for (const auto& component : condensation_nodes) { const auto& unsorted_component = sorted_to_unsorted[component]; unsorted_to_sorted[unsorted_component] = component; From 74398dadada5f54f3d8d47a6617f071e8979d0c3 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 24 Jan 2019 17:51:37 -0800 Subject: [PATCH 004/443] Refactoring model class to minimize contact with smart pointers. Instead of directly accessing `m_layers`, prefer to call `Layer& get_layer(Int)`. --- .../lbann/models/directed_acyclic_graph.hpp | 9 +- include/lbann/models/model.hpp | 60 ++- src/models/model.cpp | 415 +++++++++--------- 3 files changed, 246 insertions(+), 238 deletions(-) diff --git a/include/lbann/models/directed_acyclic_graph.hpp b/include/lbann/models/directed_acyclic_graph.hpp index 671cf66b100..6178afcea3c 100644 --- a/include/lbann/models/directed_acyclic_graph.hpp +++ b/include/lbann/models/directed_acyclic_graph.hpp @@ -34,7 +34,7 @@ namespace lbann { /** Neural network model with a DAG layer graph. */ class directed_acyclic_graph_model : public model { - public: +public: directed_acyclic_graph_model(lbann_comm *comm, El::Int max_mini_batch_size, @@ -46,12 +46,7 @@ class directed_acyclic_graph_model : public model { directed_acyclic_graph_model* copy() const override { return new directed_acyclic_graph_model(*this); } std::string get_type() const override { return "directed acyclic graph"; } - protected: - - /** @details For general DAG models, users need to manually specify - * each layer to freeze in the model description prototext. - */ - void freeze_layers_under_frozen_surface() override {} +protected: /** Set up layer execution order. * diff --git a/include/lbann/models/model.hpp b/include/lbann/models/model.hpp index 5694d22abb0..0041167c37a 100644 --- a/include/lbann/models/model.hpp +++ b/include/lbann/models/model.hpp @@ -117,6 +117,12 @@ class model { return m_metrics; } + /** Size of model's list of layers. */ + El::Int get_num_layers() const noexcept; + /** @param pos Position in model's list of layers. */ + Layer& get_layer(El::Int pos); + /** @param pos Position in model's list of layers. */ + const Layer& get_layer(El::Int pos) const; /** @brief Return list of layers in model. * @details The list is in execution order for forward propagation. */ @@ -341,13 +347,18 @@ class model { virtual void remap_pointers(const std::unordered_map& layer_map, const std::unordered_map& weights_map); - /** @details In case that a layer is frozen, also freeze layers that - * precede it if that makes senses for the particular model, such - * as sequential or siamese. For othe models, users can manually + /** @brief + * + * In case that a layer is frozen, also freeze layers that precede + * it if that makes senses for the particular model, such as + * sequential or siamese. For othe models, users can manually * control the behaivor by indicating whether to freeze each layer * in the model description prototext. + * + * For general DAG models, users need to manually specify each + * layer to freeze in the model description prototext. */ - virtual void freeze_layers_under_frozen_surface(); + virtual void freeze_layers_under_frozen_surface() {} /** Set up topology of layer graph. * @@ -404,9 +415,9 @@ class model { */ virtual void reconcile_weight_values(); - //////////////////////////////////////////////////////////// + // =========================================== // Callbacks - //////////////////////////////////////////////////////////// + // =========================================== /** Execute callbacks at start of training. */ virtual void do_train_begin_cbs(); @@ -456,28 +467,45 @@ class model { */ std::vector> m_layers; + // =========================================== + // Functions to add utility layers + // =========================================== + /** Insert evaluation layers where needed. * - * If an objective function layer term or a layer metric - * corresponds to a layer that is not an evaluation layer, an - * evaluation layer is added as a child of the original layer and - * set as the corresponding layer to the layer term or layer - * metric. + * If a @c lbann::layer_term or @c lbann::layer_metric corresponds + * to a layer that is not an evaluation_layer, an evaluation layer + * is created and added to the model. + * + * @param layer_set Layers in model. Updated with any newly + * created layers. + * @param layer_names Names of layers in model. Updated with any + * newly created layers. */ - void add_evaluation_layers(); + void add_evaluation_layers(std::unordered_set& layer_set, + std::unordered_set& layer_names); + /** Insert dummy layers after layers with too few children. * * If a layer expects more child layers than it has, add dummy * layers until it has enough children. + * + * @param layer_set Layers in model. Updated with any newly + * created layers. + * @param layer_names Names of layers in model. Updated with any + * newly created layers. */ - void add_dummy_layers(); + void add_dummy_layers(std::unordered_set& layer_names); /** Insert split layers after layers with too many children. * * If a layer expects one child layer but has multiple, add a split - * layer. The split layer will be the original layer's child and - * the split layer's children will be the original children. + * layer to the model. + * + * @param layer_names Names of layers in model. Updated with any + * newly created layers. */ - void add_split_layers(); + void add_split_layers(std::unordered_set& layer_names); + }; } // namespace lbann diff --git a/src/models/model.cpp b/src/models/model.cpp index c1dc6e48d09..f71b5b5d4c5 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -48,23 +48,9 @@ namespace lbann { -namespace { - -bool layer_has_name(std::string name, - const std::vector>& layers) { - for (const auto& l : layers) { - if (l->get_name() == name) { - return true; - } - } - return false; -} - -} // namespace - -//////////////////////////////////////////////////////////// +// ============================================= // Constructors and destructor -//////////////////////////////////////////////////////////// +// ============================================= model::model(lbann_comm *comm, El::Int mini_batch_size, @@ -124,14 +110,14 @@ model::model(const model& other) : std::unordered_map layer_map; m_layers.reserve(other.m_layers.size()); for (const auto& ptr : other.m_layers) { - if (!ptr) { LBANN_ERROR("unexpected null pointer"); } + if (ptr == nullptr) { LBANN_ERROR("unexpected null pointer"); } auto* old_layer = ptr.get(); auto* new_layer = old_layer->copy(); new_layer->set_model(this); m_layers.emplace_back(new_layer); layer_map[old_layer] = new_layer; } - std::unordered_map weights_map; + std::unordered_map weights_map; for (auto& w : m_weights) { auto&& w_copy = w->copy(); weights_map[w] = w_copy; @@ -181,14 +167,14 @@ model& model::operator=(const model& other) { m_layers.clear(); m_layers.reserve(other.m_layers.size()); for (const auto& ptr : other.m_layers) { - if (!ptr) { LBANN_ERROR("unexpected null pointer"); } + if (ptr == nullptr) { LBANN_ERROR("unexpected null pointer"); } auto* old_layer = ptr.get(); auto* new_layer = old_layer->copy(); new_layer->set_model(this); m_layers.emplace_back(new_layer); layer_map[old_layer] = new_layer; } - std::unordered_map weights_map; + std::unordered_map weights_map; for (auto& w : m_weights) { w = weights_map[w] = w->copy(); } @@ -205,9 +191,9 @@ model::~model() { for (const auto& cb : m_callbacks) { delete cb; } } -//////////////////////////////////////////////////////////// +// ============================================= // Model specification -//////////////////////////////////////////////////////////// +// ============================================= void model::add_layer(std::unique_ptr l) { std::stringstream err; @@ -224,11 +210,14 @@ void model::add_layer(std::unique_ptr l) { // bottleneck. If it is, consider maintaining a hash table // containing all layer names (and properly updating it during // copies and pointer remaps). - if (layer_has_name(l->get_name(), m_layers)) { - err << "attempted to add layer \"" << l->get_name() << "\" to " - << "model \"" << get_name() << "\", " - << "but the model already contains a layer with that name"; - LBANN_ERROR(err.str()); + const auto& name = l->get_name(); + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (get_layer(i).get_name() == name) { + err << "attempted to add layer \"" << name << "\" to " + << "model \"" << get_name() << "\", " + << "but the model already contains a layer with that name"; + LBANN_ERROR(err.str()); + } } // Add layer to model @@ -297,6 +286,27 @@ const std::vector model::get_weights() const { return weights_list; } +El::Int model::get_num_layers() const noexcept { + return m_layers.size(); +} +Layer& model::get_layer(El::Int pos) { + // Item 3, p. 23 in "Effective C++", 3rd ed., by Scott Meyers + return const_cast(static_cast(*this).get_layer(pos)); +} +const Layer& model::get_layer(El::Int pos) const { + std::stringstream err; + if (pos < 0 || pos >= get_num_layers()) { + err << "could not access layer in model \"" << get_name() << "\" " + << "(requested index " << pos << ", " + << "but there are " << get_num_layers() << " layers)"; + LBANN_ERROR(err.str()); + } else if (m_layers[pos] == nullptr) { + err << "model \"" << get_name() << "\" " + << "has a null pointer in its layer list"; + LBANN_ERROR(err.str()); + } + return *m_layers[pos]; +} std::vector model::get_layers() { std::vector layer_list; layer_list.reserve(m_layers.size()); @@ -369,8 +379,8 @@ optimizer* model::create_optimizer() const { } bool model::is_execution_mode_valid(execution_mode mode) const { - for (const auto& l : m_layers) { - const auto* input = dynamic_cast(l.get()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + const auto* input = dynamic_cast(&get_layer(i)); if (input != nullptr && !input->is_execution_mode_valid(mode)) { return false; } @@ -382,7 +392,7 @@ void model::reorder_layers(const std::vector& gather_indices) { std::stringstream err; // Check that gather indices are in valid range - const El::Int num_layers = m_layers.size(); + const auto& num_layers = get_num_layers(); if (std::any_of(gather_indices.begin(), gather_indices.end(), [num_layers](El::Int index) { return index < 0 || index >= num_layers; @@ -430,43 +440,40 @@ description model::get_description() const { // Layer topology description layer_topology_desc("Layer topology:"); - for (const auto& l : m_layers) { + for (El::Int k = 0; k < get_num_layers(); ++k) { + const auto& l = get_layer(k); std::stringstream ss; - if (l == nullptr) { - ss << "unknown layer: {} -> {}"; - } else { - ss << l->get_name() << " (" << l->get_type() << "): {"; - const auto& parents = l->get_parent_layers(); - const auto& children = l->get_child_layers(); - for (size_t i = 0; i < parents.size(); ++i) { - ss << (i > 0 ? ", " : ""); - if (parents[i] == nullptr) { - ss << "unknown layer"; - } else { - ss << parents[i]->get_name() << " ("; - const auto& dims = l->get_input_dims(i); - for (size_t j = 0; j < dims.size(); ++j) { - ss << (j > 0 ? "x" : "") << dims[j]; - } - ss << ")"; + ss << l.get_name() << " (" << l.get_type() << "): {"; + const auto& parents = l.get_parent_layers(); + const auto& children = l.get_child_layers(); + for (size_t i = 0; i < parents.size(); ++i) { + ss << (i > 0 ? ", " : ""); + if (parents[i] == nullptr) { + ss << "unknown layer"; + } else { + ss << parents[i]->get_name() << " ("; + const auto& dims = l.get_input_dims(i); + for (size_t j = 0; j < dims.size(); ++j) { + ss << (j > 0 ? "x" : "") << dims[j]; } + ss << ")"; } - ss << "} -> {"; - for (size_t i = 0; i < children.size(); ++i) { - ss << (i > 0 ? ", " : ""); - if (children[i] == nullptr) { - ss << "unknown layer"; - } else { - ss << children[i]->get_name() << " ("; - const auto& dims = l->get_output_dims(i); - for (size_t j = 0; j < dims.size(); ++j) { - ss << (j > 0 ? "x" : "") << dims[j]; - } - ss << ")"; + } + ss << "} -> {"; + for (size_t i = 0; i < children.size(); ++i) { + ss << (i > 0 ? ", " : ""); + if (children[i] == nullptr) { + ss << "unknown layer"; + } else { + ss << children[i]->get_name() << " ("; + const auto& dims = l.get_output_dims(i); + for (size_t j = 0; j < dims.size(); ++j) { + ss << (j > 0 ? "x" : "") << dims[j]; } + ss << ")"; } - ss << "}"; } + ss << "}"; layer_topology_desc.add(ss.str()); } desc.add(std::string{}); @@ -474,12 +481,8 @@ description model::get_description() const { // Layer details description layer_details_desc("Layer details:"); - for (const auto& l : m_layers) { - if (l == nullptr) { - layer_details_desc.add("unknown layer"); - } else { - layer_details_desc.add(l->get_description()); - } + for (El::Int i = 0; i < get_num_layers(); ++i) { + layer_details_desc.add(get_layer(i).get_description()); } desc.add(std::string{}); desc.add(layer_details_desc); @@ -503,8 +506,8 @@ description model::get_description() const { } -void model::remap_pointers(const std::unordered_map& layer_map, - const std::unordered_map& weights_map) { +void model::remap_pointers(const std::unordered_map& layer_map, + const std::unordered_map& weights_map) { // Fix pointers in objective function if (m_objective_function != nullptr) { @@ -536,46 +539,29 @@ void model::remap_pointers(const std::unordered_map& layer_map, } // Fix pointers in layers - for (const auto& l : m_layers) { - auto layer_pointers = l->get_layer_pointers(); - for (auto& layer_pointer : layer_pointers) { - if (layer_map.count(layer_pointer) > 0) { - layer_pointer = layer_map.at(layer_pointer); + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + auto layer_pointers = l.get_layer_pointers(); + auto weights_pointers = l.get_weights(); + for (auto& ptr : layer_pointers) { + if (layer_map.count(ptr) > 0) { + ptr = layer_map.at(ptr); } } - l->set_layer_pointers(layer_pointers); - auto weights_pointers = l->get_weights(); - for (auto& weights_pointer : weights_pointers) { - if (weights_map.count(weights_pointer) > 0) { - weights_pointer = weights_map.at(weights_pointer); + for (auto& ptr : weights_pointers) { + if (weights_map.count(ptr) > 0) { + ptr = weights_map.at(ptr); } } - l->set_weights(weights_pointers); + l.set_layer_pointers(layer_pointers); + l.set_weights(weights_pointers); } } -void model::freeze_layers_under_frozen_surface() { - bool freezing = false; - for (size_t i = m_layers.size(); i-- > 0u; ) { - auto* l = m_layers[i].get(); - if (dynamic_cast(l) != nullptr) { - if (l->is_frozen()) { - throw lbann_exception("Frozen io_layer!"); - } - continue; - } - if (!freezing) { - freezing = l->is_frozen(); - } else { - l->freeze(); - } - } -} - -//////////////////////////////////////////////////////////// +// ============================================= // Setup -//////////////////////////////////////////////////////////// +// ============================================= void model::setup(std::shared_ptr io_thread_pool) { // Setup I/O threads - set up before setting up the layers (input @@ -608,35 +594,32 @@ void model::setup_layer_topology() { std::stringstream err; // Check that layer list is valid - // Note: Throws an exception if the layer list contains a null - // pointer, if it contains two layers with the same name, or if a - // layer has a pointer to a layer in a different model. + // Note: Throws an exception if the layer list contains two layers + // with the same name or if a layer has a pointer to a layer in a + // different model. std::unordered_set layer_set; - std::unordered_set name_set; - for (const auto& l : m_layers) { - if (l == nullptr) { + std::unordered_set layer_names; + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + if (layer_names.count(l.get_name()) > 0) { err << "model \"" << get_name() << "\" " - << "has a null pointer in its layer list"; - LBANN_ERROR(err.str()); - } else if (name_set.count(l->get_name()) > 0) { - err << "model \"" << get_name() << "\" " - << "has multiple layers named \"" << l->get_name() << "\""; + << "has multiple layers named \"" << l.get_name() << "\""; LBANN_ERROR(err.str()); } - name_set.insert(l->get_name()); - layer_set.insert(l.get()); + layer_set.insert(&l); + layer_names.insert(l.get_name()); } - for (const auto& l : m_layers) { - for (const auto& ptr : l->get_layer_pointers()) { - if (ptr != nullptr && layer_set.count(ptr) < 1) { - err << "layer \"" << l->get_name() << "\" " + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + for (const auto& ptr : l.get_layer_pointers()) { + if (ptr != nullptr && layer_set.count(ptr) == 0) { + err << "layer \"" << l.get_name() << "\" " << "(in model \"" << get_name() << "\") " << "has a pointer to layer " << ptr->get_name() << "\" "; - const auto* other_model = ptr->get_model(); - if (other_model == nullptr) { + if (ptr->get_model() == nullptr) { err << "(not in a model)"; } else { - err << "(in model \"" << other_model->get_name() << "\")"; + err << "(in model \"" << ptr->get_model()->get_name() << "\")"; } LBANN_ERROR(err.str()); } @@ -644,29 +627,29 @@ void model::setup_layer_topology() { } // Make sure parent/child relationships are reciprocated - for (const auto& l : m_layers) { - for (const auto& parent : l->get_parent_layers()) { - const_cast(parent)->add_child_layer(l.get()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + for (auto* parent : l.get_parent_layers()) { + const_cast(parent)->add_child_layer(&l); } - for (const auto& child : l->get_child_layers()) { - const_cast(child)->add_parent_layer(l.get()); + for (auto* child : l.get_child_layers()) { + const_cast(child)->add_parent_layer(&l); } } // Add utility layers - add_evaluation_layers(); - add_dummy_layers(); - add_split_layers(); + add_evaluation_layers(layer_set, layer_names); + add_dummy_layers(layer_names); + add_split_layers(layer_names); } void model::setup_layer_execution_order() { // Find input layers - const El::Int num_layers = m_layers.size(); std::vector input_layers, other_layers; - for (El::Int i = 0; i < num_layers; ++i) { - if (dynamic_cast(m_layers[i].get()) != nullptr) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (dynamic_cast(&get_layer(i)) != nullptr) { input_layers.push_back(i); } else { other_layers.push_back(i); @@ -684,10 +667,11 @@ void model::setup_layer_execution_order() { } void model::setup_layers() { - for (const auto& l : m_layers) { - l->set_model(this); - l->setup(); - l->check_setup(); + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + l.set_model(this); + l.setup(); + l.check_setup(); } } @@ -700,8 +684,8 @@ void model::setup_weights() { m_weights.end()); // Find weights used by layers - for (const auto& l : m_layers) { - for (const auto& w : l->get_weights()) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + for (const auto& w : get_layer(i).get_weights()) { if (weights_set.count(w) == 0) { m_weights.push_back(w); weights_set.insert(w); @@ -730,18 +714,17 @@ void model::setup_weights() { } -void model::add_evaluation_layers() { +void model::add_evaluation_layers(std::unordered_set& layer_set, + std::unordered_set& layer_names) { std::stringstream err; - const auto& layer_pointers = get_layers(); // Add evaluation layers corresponding to objective function layer terms for (auto* t : m_objective_function->get_terms()) { auto* term = dynamic_cast(t); if (term != nullptr) { auto& l = term->get_layer(); - if (std::find(layer_pointers.begin(), layer_pointers.end(), &l) - == layer_pointers.end()) { - err << "model \"" << get_name() << "\" " + if (layer_set.count(&l) == 0) { + err << "model \"" << get_name() << "\" " << "has an objective function layer term corresponding to " << "layer \"" << l.get_name() << "\", " << "which isn't in the model's list of layers"; @@ -758,12 +741,16 @@ void model::add_evaluation_layers() { // Set evaluation layer name El::Int name_index = 1; std::string name = l.get_name() + "_eval"; - while (layer_has_name(name, m_layers)) { + while (layer_names.count(name) > 0) { name_index++; name = l.get_name() + "_eval" + std::to_string(name_index); } eval->set_name(name); + // Update workspace objects + layer_set.insert(eval.get()); + layer_names.insert(eval->get_name()); + // Add evaluation layer to model l.add_child_layer(eval.get()); eval->add_parent_layer(&l); @@ -779,8 +766,7 @@ void model::add_evaluation_layers() { auto* met = dynamic_cast(m); if (met != nullptr) { auto& l = met->get_layer(); - if (std::find(layer_pointers.begin(), layer_pointers.end(), &l) - == layer_pointers.end()) { + if (layer_set.count(&l) == 0) { err << "layer metric \"" << met->name() << "\" " << "corresponds to layer \"" << l.get_name() << "\", " << "which is not in model \"" << get_name() << "\""; @@ -797,12 +783,16 @@ void model::add_evaluation_layers() { // Set evaluation layer name El::Int name_index = 1; std::string name = l.get_name() + "_eval"; - while (layer_has_name(name, m_layers)) { + while (layer_names.count(name) > 0) { name_index++; name = l.get_name() + "_eval" + std::to_string(name_index); } eval->set_name(name); + // Update workspace objects + layer_set.insert(eval.get()); + layer_names.insert(eval->get_name()); + // Add evaluation layer to model l.add_child_layer(eval.get()); eval->add_parent_layer(&l); @@ -815,11 +805,9 @@ void model::add_evaluation_layers() { } -void model::add_dummy_layers() { - - // Add dummy layers until all layers have enough children - for (size_t i = 0; i < m_layers.size(); ++i) { - auto& l = *m_layers[i]; +void model::add_dummy_layers(std::unordered_set& layer_names) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); while (l.get_num_children() < l.get_expected_num_child_layers()) { // Create dummy layer @@ -851,11 +839,12 @@ void model::add_dummy_layers() { // Set dummy layer name El::Int name_index = 1; std::string name = l.get_name() + "_dummy"; - while (layer_has_name(name, m_layers)) { + while (layer_names.count(name) > 0) { name_index++; name = l.get_name() + "_dummy" + std::to_string(name_index); } dummy->set_name(name); + layer_names.insert(name); // Add dummy layer to model l.add_child_layer(dummy.get()); @@ -864,22 +853,20 @@ void model::add_dummy_layers() { } } - } -void model::add_split_layers() { - for (size_t i = 0; i < m_layers.size(); ++i) { - auto& layer = *m_layers[i]; +void model::add_split_layers(std::unordered_set& layer_names) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); // Add split layer if layer expects one child but has multiple - auto& children = layer.get_child_layers(); - if (layer.get_expected_num_child_layers() == 1 - && children.size() != 1) { + auto& children = l.get_child_layers(); + if (l.get_expected_num_child_layers() == 1 && children.size() != 1) { // Create split layer std::unique_ptr split; using args_tuple = std::tuple; - args_tuple args(layer.get_data_layout(), layer.get_device_allocation()); + args_tuple args(l.get_data_layout(), l.get_device_allocation()); if (args == args_tuple(data_layout::DATA_PARALLEL, El::Device::CPU)) { split.reset(new split_layer(m_comm)); } @@ -897,19 +884,20 @@ void model::add_split_layers() { if (split == nullptr) { std::stringstream err; err << "could not construct split layer corresponding to " - << "layer \"" << layer.get_name() << "\" " + << "layer \"" << l.get_name() << "\" " << "in model \"" << get_name() << "\""; LBANN_ERROR(err.str()); } // Set split layer name El::Int name_index = 1; - std::string name = layer.get_name() + "_split"; - while (layer_has_name(name, m_layers)) { + std::string name = l.get_name() + "_split"; + while (layer_names.count(name) > 0) { name_index++; - name = layer.get_name() + "_split" + std::to_string(name_index); + name = l.get_name() + "_split" + std::to_string(name_index); } split->set_name(name); + layer_names.insert(name); // Setup relationships between split layer and child layers for (auto&& const_child : children) { @@ -917,13 +905,13 @@ void model::add_split_layers() { split->add_child_layer(child); auto& child_parents = child->get_parent_layers(); std::replace(child_parents.begin(), child_parents.end(), - &layer, split.get()); + &l, split.get()); } // Setup relationship between current layer and split layer children.clear(); - layer.add_child_layer(split.get()); - split->add_parent_layer(&layer); + l.add_child_layer(split.get()); + split->add_parent_layer(&l); // Add split layer to layer list add_layer(std::move(split)); @@ -934,21 +922,18 @@ void model::add_split_layers() { } int model::get_num_iterations_per_epoch(execution_mode mode) const { - generic_input_layer* input = nullptr; - for (auto&& l : m_layers) { - input = dynamic_cast(l.get()); - if (input != nullptr) { break; } - } - if (input == nullptr) { - return 0; - } else { - return input->get_num_iterations_per_epoch(mode); + for (El::Int i = 0; i < get_num_layers(); ++i) { + const auto* input = dynamic_cast(&get_layer(i)); + if (input != nullptr) { + return input->get_num_iterations_per_epoch(mode); + } } + return 0; } -//////////////////////////////////////////////////////////// +// ============================================= // Evaluation and training -//////////////////////////////////////////////////////////// +// ============================================= void model::evaluate(execution_mode mode, int num_batches) { @@ -978,9 +963,9 @@ void model::evaluate(execution_mode mode, int num_batches) { void model::collect_indices(execution_mode mode) { reset_mode_and_model(mode); while (true) { - m_layers[0]->forward_prop(); + get_layer(0).forward_prop(); bool finished = true; - finished = m_layers[0]->update() && finished; + finished = get_layer(0).update() && finished; if (finished) { break; } @@ -990,13 +975,12 @@ void model::collect_indices(execution_mode mode) { } void model::collect_background_data_fetch(execution_mode mode) { - for (const auto& l : m_layers) { - auto *input = dynamic_cast(l.get()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto *input = dynamic_cast(&get_layer(i)); if (input != nullptr) { input->collect_background_data_fetch(mode); } } - return; } void model::train(int num_epochs, int num_batches) { @@ -1032,8 +1016,8 @@ void model::train(int num_epochs, int num_batches) { // that each layer points to this model void model::reset_mode_and_model(execution_mode mode) { set_execution_mode(mode); - for (const auto& l : m_layers) { - l->set_model(this); + for (El::Int i = 0; i < get_num_layers(); ++i) { + get_layer(i).set_model(this); } } @@ -1124,20 +1108,21 @@ void model::clear_gradients() { void model::forward_prop(execution_mode mode) { do_model_forward_prop_begin_cbs(mode); - for (const auto& l : m_layers) { - do_layer_forward_prop_begin_cbs(mode, l.get()); - l->forward_prop(); - do_layer_forward_prop_end_cbs(mode, l.get()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto& l = get_layer(i); + do_layer_forward_prop_begin_cbs(mode, &l); + l.forward_prop(); + do_layer_forward_prop_end_cbs(mode, &l); } do_model_forward_prop_end_cbs(mode); } void model::backward_prop() { do_model_backward_prop_begin_cbs(); - for (El::Int i = m_layers.size()-1; i >= 0; --i) { + for (El::Int i = get_num_layers()-1; i >= 0; --i) { // Perform backward prop step on current layer - auto& l = *m_layers[i]; + auto& l = get_layer(i); do_layer_backward_prop_begin_cbs(&l); l.back_prop(); do_layer_backward_prop_end_cbs(&l); @@ -1173,8 +1158,8 @@ void model::update_weights() { bool model::update_layers() { bool finished = true; - for (El::Int i = m_layers.size()-1; i >= 0; --i) { - finished = m_layers[i]->update() && finished; + for (El::Int i = get_num_layers()-1; i >= 0; --i) { + finished = get_layer(i).update() && finished; } return finished; } @@ -1187,9 +1172,9 @@ void model::reconcile_weight_values() { for (auto& req : reqs) { m_comm->wait(req); } } -//////////////////////////////////////////////////////////// +// ============================================= // Callbacks -//////////////////////////////////////////////////////////// +// ============================================= void model::do_train_begin_cbs() { for (const auto& cb : m_callbacks) { @@ -1445,13 +1430,13 @@ void model::do_weight_optimize_end_cbs(weights *w) { } } -//////////////////////////////////////////////////////////// +// ============================================= // Summarizer -//////////////////////////////////////////////////////////// +// ============================================= void model::summarize_stats(lbann_summary& summarizer) { - for (const auto& l : m_layers) { - l->summarize_stats(summarizer, get_cur_step()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + get_layer(i).summarize_stats(summarizer, get_cur_step()); } summarizer.reduce_scalar("objective", m_objective_function->get_mean_value(m_execution_mode), @@ -1477,14 +1462,14 @@ void model::summarize_stats(lbann_summary& summarizer) { } void model::summarize_matrices(lbann_summary& summarizer) { - for (const auto& l : m_layers) { - l->summarize_matrices(summarizer, get_cur_step()); + for (El::Int i = 0; i < get_num_layers(); ++i) { + get_layer(i).summarize_matrices(summarizer, get_cur_step()); } } -//////////////////////////////////////////////////////////// +// ============================================= // Checkpointing -//////////////////////////////////////////////////////////// +// ============================================= /* struct used to serialize mode fields in file and MPI transfer */ struct lbann_model_header { @@ -1521,8 +1506,8 @@ bool model::save_to_checkpoint_shared(persist& p) { w->save_to_checkpoint_shared(p); } - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->save_to_checkpoint_shared(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).save_to_checkpoint_shared(p)) { return false; } } @@ -1541,8 +1526,8 @@ bool model::save_to_checkpoint_shared(persist& p) { for (weights *w : m_weights) { w->save_to_checkpoint_shared(p); } - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->save_to_checkpoint_shared(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).save_to_checkpoint_shared(p)) { return false; } } @@ -1602,8 +1587,8 @@ bool model::load_from_checkpoint_shared(persist& p) { } // read in each layer - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->load_from_checkpoint_shared(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).load_from_checkpoint_shared(p)) { return false; } } @@ -1637,8 +1622,8 @@ bool model::save_to_checkpoint_distributed(persist& p){ w->save_to_checkpoint_distributed(p); } - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->save_to_checkpoint_distributed(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).save_to_checkpoint_distributed(p)) { return false; } } @@ -1654,8 +1639,8 @@ bool model::save_to_checkpoint_distributed(persist& p){ p.write_uint64(persist_type::validate, "current_validataion_step", (uint64_t) m_current_validation_step); save_rng_to_checkpoint_shared(p, m_comm); - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->save_to_checkpoint_distributed(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).save_to_checkpoint_distributed(p)) { return false; } } @@ -1698,8 +1683,8 @@ bool model::load_from_checkpoint_distributed(persist& p){ w->load_from_checkpoint_distributed(p); } - for (size_t l = 0; l < m_layers.size(); l++) { - if (! m_layers[l]->load_from_checkpoint_distributed(p)) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + if (!get_layer(i).load_from_checkpoint_distributed(p)) { return false; } } From f5e9fda851f1aa8ce77b14c3fd92182533ac71c2 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama Date: Thu, 31 Jan 2019 14:48:55 -0800 Subject: [PATCH 005/443] Add numpy npz data reader --- include/lbann/data_readers/CMakeLists.txt | 1 + .../data_readers/data_reader_numpy_npz.hpp | 111 ++++++++++ include/lbann/lbann.hpp | 1 + ...ata_reader_mnist_numpy_npz_int16.prototext | 67 ++++++ src/data_readers/CMakeLists.txt | 1 + src/data_readers/data_reader_numpy_npz.cpp | 205 ++++++++++++++++++ src/proto/lbann.proto | 1 + src/proto/proto_common.cpp | 6 + 8 files changed, 393 insertions(+) create mode 100644 include/lbann/data_readers/data_reader_numpy_npz.hpp create mode 100644 model_zoo/data_readers/data_reader_mnist_numpy_npz_int16.prototext create mode 100644 src/data_readers/data_reader_numpy_npz.cpp diff --git a/include/lbann/data_readers/CMakeLists.txt b/include/lbann/data_readers/CMakeLists.txt index 425f7e3672a..5de41c0213f 100644 --- a/include/lbann/data_readers/CMakeLists.txt +++ b/include/lbann/data_readers/CMakeLists.txt @@ -24,6 +24,7 @@ set_full_path(THIS_DIR_HEADERS data_reader_moving_mnist.hpp data_reader_nci.hpp data_reader_numpy.hpp + data_reader_numpy_npz.hpp data_reader_pilot2_molecular.hpp data_reader_synthetic.hpp image_preprocessor.hpp diff --git a/include/lbann/data_readers/data_reader_numpy_npz.hpp b/include/lbann/data_readers/data_reader_numpy_npz.hpp new file mode 100644 index 00000000000..c52a1ce3c02 --- /dev/null +++ b/include/lbann/data_readers/data_reader_numpy_npz.hpp @@ -0,0 +1,111 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +// lbann_data_reader_numpy_npz .hpp .cpp - generic_data_reader class for numpy .npz dataset +//////////////////////////////////////////////////////////////////////////////// + +#ifndef LBANN_DATA_READER_NUMPY_NPZ_HPP +#define LBANN_DATA_READER_NUMPY_NPZ_HPP + +#include "data_reader.hpp" +#include "data_reader_numpy.hpp" +#include + +namespace lbann { + /** + * Data reader for data stored in numpy (.npz) files. + * This assumes that the file contains "data", "labels" (optional), + * and "responses" (optional) whose the zero'th axis is the sample axis. + */ + class numpy_npz_reader : public generic_data_reader { + public: + numpy_npz_reader(const bool shuffle); + // These need to be explicit because of some issue with the cnpy copy + // constructor/assignment operator not linking correctly otherwise. + numpy_npz_reader(const numpy_npz_reader&); + numpy_npz_reader& operator=(const numpy_npz_reader&); + ~numpy_npz_reader() override {} + + numpy_npz_reader* copy() const override { return new numpy_npz_reader(*this); } + + std::string get_type() const override { + return "numpy_npz_reader"; + } + + /// Set whether to fetch labels. + void set_has_labels(bool b) { m_has_labels = b; } + /// Set whether to fetch responses. + void set_has_responses(bool b) { m_has_responses = b; } + /// Set a scaling factor for int16 data. + void set_scaling_factor_int16(DataType s) { m_scaling_factor_int16 = s; } + + void load() override; + + int get_num_labels() const override { return m_num_labels; } + int get_linearized_data_size() const override { return m_num_features; } + int get_linearized_label_size() const override { return m_num_labels; } + const std::vector get_data_dims() const override { + std::vector dims(m_data.shape.begin() + 1, + m_data.shape.end()); + return dims; + } + + protected: + bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; + bool fetch_label(CPUMat& Y, int data_id, int mb_idx) override; + bool fetch_response(CPUMat& Y, int data_id, int mb_idx) override; + + /// Number of samples. + int m_num_samples = 0; + /// Number of features in each sample. + int m_num_features = 0; + /// Number of label classes. + int m_num_labels = 0; + /// Number of features in each response. + int m_num_response_features = 0; + /// Whether to fetch a label from the last column. + bool m_has_labels = true; + /// Whether to fetch a response from the last column. + bool m_has_responses = false; + /** + * Underlying numpy data. + * Note raw data is managed with shared smart pointer semantics (relevant + * for copying). + */ + cnpy::NpyArray m_data, m_labels, m_responses; + + // A constant to be multiplied when data is converted + // from int16 to DataType. + DataType m_scaling_factor_int16 = 1.0; + + private: + // Keys to retrieve data, labels, responses from a given .npz file. + static const std::string NPZ_KEY_DATA, NPZ_KEY_LABELS, NPZ_KEY_RESPONSES; + + }; + +} // namespace lbann + +#endif // LBANN_DATA_READER_NUMPY_HPP diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index c6e93d2a69c..a9fa3341e65 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -120,6 +120,7 @@ #include "lbann/data_readers/data_reader_jag_conduit_hdf5.hpp" #include "lbann/data_readers/data_reader_nci.hpp" #include "lbann/data_readers/data_reader_numpy.hpp" +#include "lbann/data_readers/data_reader_numpy_npz.hpp" #include "lbann/data_readers/data_reader_csv.hpp" #include "lbann/data_readers/data_reader_merge_samples.hpp" #include "lbann/data_readers/data_reader_merge_features.hpp" diff --git a/model_zoo/data_readers/data_reader_mnist_numpy_npz_int16.prototext b/model_zoo/data_readers/data_reader_mnist_numpy_npz_int16.prototext new file mode 100644 index 00000000000..55b4f5c8068 --- /dev/null +++ b/model_zoo/data_readers/data_reader_mnist_numpy_npz_int16.prototext @@ -0,0 +1,67 @@ +data_reader { + reader { + name: "numpy_npz" + role: "train" + shuffle: true + data_filedir: "/p/lscratchh/brainusr/datasets/MNIST/numpy/train_int16.npz" + validation_percent: 0.1 + absolute_sample_count: 0 + percent_of_data_to_use: 1.0 + disable_responses: true + num_labels: 10 + scaling_factor_int16: 0.000030518509476 # 1 / 0x7FFF + + image_preprocessor { + normalizer { + scale: true + subtract_mean: false + unit_variance: false + z_score: false + } + augmenter { + horizontal_flip: false + vertical_flip: false + rotation: 0 + horizontal_shift: 0 + vertical_shift: 0 + shear_range: 0 + } + noiser { + disable: true + factor: 0.0 + } + } + } + reader { + name: "numpy_npz" + role: "test" + shuffle: true + data_filedir: "/p/lscratchh/brainusr/datasets/MNIST/numpy/test_int16.npz" + absolute_sample_count: 0 + percent_of_data_to_use: 1.0 + disable_responses: true + num_labels: 10 + scaling_factor_int16: 0.000030518509476 # 1 / 0x7FFF + + image_preprocessor { + normalizer { + scale: true + subtract_mean: false + unit_variance: false + z_score: false + } + augmenter { + horizontal_flip: false + vertical_flip: false + rotation: 0 + horizontal_shift: 0 + vertical_shift: 0 + shear_range: 0 + } + noiser { + disable: true + factor: 0.0 + } + } + } +} diff --git a/src/data_readers/CMakeLists.txt b/src/data_readers/CMakeLists.txt index 5d9d67f0395..36c3ff1456a 100644 --- a/src/data_readers/CMakeLists.txt +++ b/src/data_readers/CMakeLists.txt @@ -29,6 +29,7 @@ set_full_path(THIS_DIR_SOURCES data_reader_moving_mnist.cpp data_reader_nci.cpp data_reader_numpy.cpp + data_reader_numpy_npz.cpp data_reader_pilot2_molecular.cpp data_reader_synthetic.cpp data_reader_multi_images.cpp diff --git a/src/data_readers/data_reader_numpy_npz.cpp b/src/data_readers/data_reader_numpy_npz.cpp new file mode 100644 index 00000000000..eb47822c63f --- /dev/null +++ b/src/data_readers/data_reader_numpy_npz.cpp @@ -0,0 +1,205 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +// data_reader_numpy_npz .hpp .cpp - generic_data_reader class for numpy .npz dataset +//////////////////////////////////////////////////////////////////////////////// + +#include "lbann/data_readers/data_reader_numpy_npz.hpp" +#include +#include +#include +#include + +namespace lbann { + const std::string numpy_npz_reader::NPZ_KEY_DATA = "data"; + const std::string numpy_npz_reader::NPZ_KEY_LABELS = "labels"; + const std::string numpy_npz_reader::NPZ_KEY_RESPONSES = "responses"; + + numpy_npz_reader::numpy_npz_reader(const bool shuffle) + : generic_data_reader(shuffle), + m_num_samples(0), + m_num_features(0), + m_num_response_features(0) {} + + numpy_npz_reader::numpy_npz_reader(const numpy_npz_reader& other) : + generic_data_reader(other), + m_num_samples(other.m_num_samples), + m_num_features(other.m_num_features), + m_num_labels(other.m_num_labels), + m_num_response_features(other.m_num_response_features), + m_has_labels(other.m_has_labels), + m_has_responses(other.m_has_responses), + m_data(other.m_data), + m_labels(other.m_labels), + m_responses(other.m_responses), + m_scaling_factor_int16(other.m_scaling_factor_int16) {} + + numpy_npz_reader& numpy_npz_reader::operator=(const numpy_npz_reader& other) { + generic_data_reader::operator=(other); + m_num_samples = other.m_num_samples; + m_num_features = other.m_num_features; + m_num_labels = other.m_num_labels; + m_num_response_features = other.m_num_response_features; + m_has_labels = other.m_has_labels; + m_has_responses = other.m_has_responses; + m_data = other.m_data; + m_labels = other.m_labels; + m_responses = other.m_responses; + m_scaling_factor_int16 = other.m_scaling_factor_int16; + return *this; + } + + void numpy_npz_reader::load() { + std::string infile = get_data_filename(); + // Ensure the file exists. + std::ifstream ifs(infile); + if (!ifs) { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + + " numpy_npz_reader::load() - can't open file : " + infile); + } + ifs.close(); + + const cnpy::npz_t npz = cnpy::npz_load(infile); + + std::vector > npyLoadList; + npyLoadList.push_back(std::forward_as_tuple(true, NPZ_KEY_DATA, m_data)); + npyLoadList.push_back(std::forward_as_tuple(m_has_labels, NPZ_KEY_LABELS, m_labels)); + npyLoadList.push_back(std::forward_as_tuple(m_has_responses, NPZ_KEY_RESPONSES, m_responses)); + for(const auto npyLoad : npyLoadList) { + if(!std::get<0>(npyLoad)) { + continue; + } + const std::string key = std::get<1>(npyLoad); + cnpy::NpyArray &ary = std::get<2>(npyLoad); + const auto i = npz.find(key); + if(i != npz.end()) { + ary = i->second; + } else { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + + " numpy_npz_reader::load() - can't find npz key : " + key); + } + + if(key == NPZ_KEY_DATA) { + m_num_samples = m_data.shape[0]; + } else if(m_num_samples != (int) ary.shape[0]) { + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + + " numpy_npz_reader::load() - the number of samples of data and " + key + " do not match : " + + std::to_string(m_num_samples) + " vs. " + std::to_string(ary.shape[0])); + } + } + + m_num_features = std::accumulate(m_data.shape.begin() + 1, m_data.shape.end(), (unsigned) 1, + std::multiplies()); + if(m_has_responses) + m_num_response_features = std::accumulate(m_responses.shape.begin() + 1, m_responses.shape.end(), (unsigned) 1, + std::multiplies()); + + // Ensure we understand the word size. + if (!(m_data.word_size == 2 || m_data.word_size == 4 || m_data.word_size == 8)) { + throw lbann_exception("numpy_npz_reader: word size " + std::to_string(m_data.word_size) + + " not supported"); + } + + if (m_has_labels) { + // Shift feature count because the last becomes the label. + // Determine number of label classes. + std::unordered_set label_classes; + if (m_labels.word_size != 4) { + throw lbann_exception("numpy_npz_reader: label numpy array should be in int32"); + } + int *data = m_labels.data(); + for (int i = 0; i < m_num_samples; ++i) { + label_classes.insert((int) data[i]); + } + + // Sanity checks. + auto minmax = std::minmax_element(label_classes.begin(), label_classes.end()); + if (*minmax.first != 0) { + throw lbann_exception("numpy_reader: classes are not indexed from 0"); + } + if (*minmax.second != (int) label_classes.size() - 1) { + throw lbann_exception("numpy_reader: label classes are not contiguous"); + } + m_num_labels = label_classes.size(); + } + if (m_has_responses) { + // Last feature becomes the response. + } + + // Reset indices. + m_shuffled_indices.clear(); + m_shuffled_indices.resize(m_num_samples); + std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0); + select_subset_of_data(); + } + + bool numpy_npz_reader::fetch_datum(Mat& X, int data_id, int mb_idx) { + Mat X_v = El::View(X, El::IR(0, X.Height()), El::IR(mb_idx, mb_idx+1)); + + if (m_data.word_size == 2) { + const short *data = m_data.data() + data_id * m_num_features; + DataType *dest = X_v.Buffer(); + + LBANN_OMP_PARALLEL_FOR + for(int j = 0; j < m_num_features; j++) + dest[j] = data[j] * m_scaling_factor_int16; + + } else { + void *data = NULL; + if (m_data.word_size == 4) { + data = (void *) (m_data.data() + data_id * m_num_features); + } else if (m_data.word_size == 8) { + data = (void *) (m_data.data() + data_id * m_num_features); + } + std::memcpy(X_v.Buffer(), data, m_num_features * m_data.word_size); + } + return true; + } + + bool numpy_npz_reader::fetch_label(Mat& Y, int data_id, int mb_idx) { + if (!m_has_labels) { + throw lbann_exception("numpy_npz_reader: do not have labels"); + } + const int label = m_labels.data()[data_id]; + Y(label, mb_idx) = 1; + return true; + } + + bool numpy_npz_reader::fetch_response(Mat& Y, int data_id, int mb_idx) { + void *responses = NULL; + if (m_responses.word_size == 4) { + responses = (void *) (m_responses.data() + + data_id * m_num_response_features); + } else if (m_responses.word_size == 8) { + responses = (void *) (m_responses.data() + + data_id * m_num_response_features); + } + Mat Y_v = El::View(Y, El::IR(0, Y.Height()), El::IR(mb_idx, mb_idx + 1)); + std::memcpy(Y_v.Buffer(), responses, + m_num_response_features * m_responses.word_size); + return true; + } + +} // namespace lbann diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index d7c5ad38488..7beb3862e8e 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -96,6 +96,7 @@ message Reader { int64 num_neighbors = 112; // pilot2_molecular_reader int64 max_neighborhood = 113; // pilot2_molecular_reader int32 num_image_srcs = 114; // data_reader_multi_images + float scaling_factor_int16 = 116; // for numpy_npz_reader with int16 data //------------- start of only for partitioned data sets ------------------ bool is_partitioned = 300; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index f6d59ea7b50..8f06fb44ded 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -180,6 +180,12 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< reader_numpy->set_has_labels(!readme.disable_labels()); reader_numpy->set_has_responses(!readme.disable_responses()); reader = reader_numpy; + } else if (name == "numpy_npz") { + auto* reader_numpy_npz = new numpy_npz_reader(shuffle); + reader_numpy_npz->set_has_labels(!readme.disable_labels()); + reader_numpy_npz->set_has_responses(!readme.disable_responses()); + reader_numpy_npz->set_scaling_factor_int16(readme.scaling_factor_int16()); + reader = reader_numpy_npz; } else if (name == "pilot2_molecular_reader") { pilot2_molecular_reader* reader_pilot2_molecular = new pilot2_molecular_reader(readme.num_neighbors(), readme.max_neighborhood(), shuffle); reader = reader_pilot2_molecular; From 644456c01a847416e8f86d0d67321776bf12235a Mon Sep 17 00:00:00 2001 From: Yosuke Oyama Date: Thu, 31 Jan 2019 15:37:06 -0800 Subject: [PATCH 006/443] Fix a minor bug of the npz reader --- include/lbann/data_readers/data_reader_numpy_npz.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/lbann/data_readers/data_reader_numpy_npz.hpp b/include/lbann/data_readers/data_reader_numpy_npz.hpp index c52a1ce3c02..ca9e88ab5e1 100644 --- a/include/lbann/data_readers/data_reader_numpy_npz.hpp +++ b/include/lbann/data_readers/data_reader_numpy_npz.hpp @@ -64,8 +64,10 @@ namespace lbann { void load() override; int get_num_labels() const override { return m_num_labels; } + int get_num_responses() const override { return get_linearized_response_size(); } int get_linearized_data_size() const override { return m_num_features; } int get_linearized_label_size() const override { return m_num_labels; } + int get_linearized_response_size() const override { return m_num_response_features; } const std::vector get_data_dims() const override { std::vector dims(m_data.shape.begin() + 1, m_data.shape.end()); From a8e3b429466890b4e7daee757af7f0856fa51a2a Mon Sep 17 00:00:00 2001 From: Yosuke Oyama Date: Fri, 1 Feb 2019 09:58:23 -0800 Subject: [PATCH 007/443] Add mnist_to_npy_and_npz.py --- tools/mnist/.gitignore | 2 + tools/mnist/mnist_to_npy_and_npz.py | 86 +++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 tools/mnist/.gitignore create mode 100755 tools/mnist/mnist_to_npy_and_npz.py diff --git a/tools/mnist/.gitignore b/tools/mnist/.gitignore new file mode 100644 index 00000000000..2c9503dc69d --- /dev/null +++ b/tools/mnist/.gitignore @@ -0,0 +1,2 @@ +*.npy +*.npz diff --git a/tools/mnist/mnist_to_npy_and_npz.py b/tools/mnist/mnist_to_npy_and_npz.py new file mode 100755 index 00000000000..f57adc1112d --- /dev/null +++ b/tools/mnist/mnist_to_npy_and_npz.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 + +""" +Convert the MNIST training/test datasets into .npy and .npz files. + +The generated files contain the following tensors: +* train.npy: shape=(60000, 785), dtype=np.float32 +* test.npy: shape=(10000, 785), dtype=np.float32 +* train_int16.npz: + * "data": shape=(60000, 784), dtype=np.int16 + * "labels": shape=(60000, 1), dtype=np.int32 +* test_int16.npz: + * "data": shape=(10000, 784), dtype=np.int16 + * "labels": shape=(10000, 1), dtype=np.int32 + +{train,test}.npy can be used for numpy_reader. +{train,test}_int16.npz can be used for numpy_npz_reader. +""" + +import numpy as np +import argparse +import os + +IMAGE_WIDTH = 28 + +def convert_mnist_to_np_and_npz(imagePath, labelPath, + imageMagicNumber, labelMagicNumber, + out, int16): + with open(imagePath, "rb") as f: + imageBin = f.read() + + assert imageMagicNumber == np.frombuffer(imageBin[ 0: 4], dtype=">u4")[0] + imageCount = np.frombuffer(imageBin[ 4: 8], dtype=">u4")[0] + assert IMAGE_WIDTH == np.frombuffer(imageBin[ 8:12], dtype=">u4")[0] + assert IMAGE_WIDTH == np.frombuffer(imageBin[12:16], dtype=">u4")[0] + pixels = np.frombuffer(imageBin[16:], dtype=">u1") \ + .reshape([imageCount, IMAGE_WIDTH*IMAGE_WIDTH]) + + with open(labelPath, "rb") as f: + labelBin = f.read() + + assert labelMagicNumber == np.frombuffer(labelBin[ 0: 4], dtype=">u4")[0] + assert imageCount == np.frombuffer(labelBin[ 4: 8], dtype=">u4")[0] + labels = np.frombuffer(labelBin[8:], dtype=">u1") \ + .reshape([imageCount, 1]) + + pixels = pixels.astype(np.float32) / 255.0 + labels = labels.astype(np.int32) + + npy = np.concatenate((pixels, labels.astype(np.float32)), axis=1) + + if int16: + pixels = (pixels * 0x7FFF).astype(np.int16) + + np.save("{}.npy".format(out), npy) + np.savez( + "{}{}.npz".format(out, "_int16" if int16 else ""), + data=pixels, + labels=labels) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Convert the MNIST training/test datasets into .npy and .npz files.", + epilog="Usage: ./mnist_to_npy_and_npz.py path/to/mnist/directory") + parser.add_argument( + "mnist_dir", type=str, + help="Path to a directory containing the MNIST dataset (decompressed binary files)") + parser.add_argument( + "--int16", + dest="int16", action="store_const", + const=True, default=True, + help="Convert the image data into int16 (each pixel is multiplied by 0x7FFFF)") + args = parser.parse_args() + + convert_mnist_to_np_and_npz( + os.path.join(args.mnist_dir, "train-images-idx3-ubyte"), + os.path.join(args.mnist_dir, "train-labels-idx1-ubyte"), + 2051, 2049, + "train", + args.int16) + convert_mnist_to_np_and_npz( + os.path.join(args.mnist_dir, "t10k-images-idx3-ubyte"), + os.path.join(args.mnist_dir, "t10k-labels-idx1-ubyte"), + 2051, 2049, + "test", + args.int16) From 8e2e511269ff9f2d973802f03ed3117e60d452e1 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama Date: Fri, 1 Feb 2019 16:15:12 -0800 Subject: [PATCH 008/443] Support merged numpy npz data reader with responses --- include/lbann/data_readers/data_reader_merge_samples.hpp | 4 ++++ src/proto/proto_common.cpp | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/include/lbann/data_readers/data_reader_merge_samples.hpp b/include/lbann/data_readers/data_reader_merge_samples.hpp index f09775b45d0..fb666c152b0 100644 --- a/include/lbann/data_readers/data_reader_merge_samples.hpp +++ b/include/lbann/data_readers/data_reader_merge_samples.hpp @@ -56,12 +56,16 @@ class data_reader_merge_samples : public generic_compound_data_reader { void load() override; int get_num_labels() const override { return m_data_readers[0]->get_num_labels(); } + int get_num_responses() const override { return m_data_readers[0]->get_num_responses(); } int get_linearized_data_size() const override { return m_data_readers[0]->get_linearized_data_size(); } int get_linearized_label_size() const override { return m_data_readers[0]->get_linearized_label_size(); } + int get_linearized_response_size() const override { + return m_data_readers[0]->get_linearized_response_size(); + } const std::vector get_data_dims() const override { return m_data_readers[0]->get_data_dims(); } diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 8f06fb44ded..2c3739495af 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -205,6 +205,13 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< reader_numpy->set_has_labels(!readme.disable_labels()); reader_numpy->set_has_responses(!readme.disable_responses()); npy_readers.push_back(reader_numpy); + } else if (readme.format() == "numpy_npz") { + auto* reader_numpy_npz = new numpy_npz_reader(false); + reader_numpy_npz->set_data_filename(path); + reader_numpy_npz->set_has_labels(!readme.disable_labels()); + reader_numpy_npz->set_has_responses(!readme.disable_responses()); + reader_numpy_npz->set_scaling_factor_int16(readme.scaling_factor_int16()); + npy_readers.push_back(reader_numpy_npz); #ifdef LBANN_HAS_CONDUIT } else if (readme.format() == "jag_conduit") { init_image_data_reader(readme, master, reader); From 153e68bf2b5e4e74c6ad5bc0decd6b4f1717f6bc Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Mon, 4 Feb 2019 14:29:34 -0800 Subject: [PATCH 009/443] Support the merge_skip_overlapped option for merge_sample data readers --- src/proto/lbann.proto | 1 + src/proto/proto_common.cpp | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 7beb3862e8e..f14af12e27a 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -97,6 +97,7 @@ message Reader { int64 max_neighborhood = 113; // pilot2_molecular_reader int32 num_image_srcs = 114; // data_reader_multi_images float scaling_factor_int16 = 116; // for numpy_npz_reader with int16 data + bool merge_skip_overlapped = 117; // for data_reader_merge_samples //------------- start of only for partitioned data sets ------------------ bool is_partitioned = 300; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 2c3739495af..bf5c231b43c 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -196,8 +196,16 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< filedir = filedir + "/"; } auto paths = glob(filedir + readme.data_file_pattern()); + if(readme.merge_skip_overlapped()) { + assert((paths.size()%comm->get_num_trainers()) == 0); + } std::vector npy_readers; - for (const auto path : paths) { + for(auto i = paths.begin(); i != paths.end(); i++) { + const auto path = *i; + if(readme.merge_skip_overlapped() + && (std::distance(paths.begin(), i)%comm->get_procs_per_trainer()) != comm->get_rank_in_trainer()) { + continue; + } if(master) { std::cout << "Loading file: " << path << std::endl; } if (readme.format() == "numpy") { auto *reader_numpy = new numpy_reader(false); From ea64f921c15f0dd9f948c660842408f620eaf971 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Mon, 4 Feb 2019 15:20:45 -0800 Subject: [PATCH 010/443] Refactor npz reader --- .../data_readers/data_reader_numpy_npz.hpp | 1 + src/data_readers/data_reader_numpy_npz.cpp | 28 +++++++++++++------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/include/lbann/data_readers/data_reader_numpy_npz.hpp b/include/lbann/data_readers/data_reader_numpy_npz.hpp index ca9e88ab5e1..81c313acae3 100644 --- a/include/lbann/data_readers/data_reader_numpy_npz.hpp +++ b/include/lbann/data_readers/data_reader_numpy_npz.hpp @@ -38,6 +38,7 @@ namespace lbann { * Data reader for data stored in numpy (.npz) files. * This assumes that the file contains "data", "labels" (optional), * and "responses" (optional) whose the zero'th axis is the sample axis. + * float, double, int16 data-types is accepted for "data". */ class numpy_npz_reader : public generic_data_reader { public: diff --git a/src/data_readers/data_reader_numpy_npz.cpp b/src/data_readers/data_reader_numpy_npz.cpp index eb47822c63f..c62ba3708a3 100644 --- a/src/data_readers/data_reader_numpy_npz.cpp +++ b/src/data_readers/data_reader_numpy_npz.cpp @@ -88,9 +88,12 @@ namespace lbann { npyLoadList.push_back(std::forward_as_tuple(m_has_labels, NPZ_KEY_LABELS, m_labels)); npyLoadList.push_back(std::forward_as_tuple(m_has_responses, NPZ_KEY_RESPONSES, m_responses)); for(const auto npyLoad : npyLoadList) { + // Check whether the tensor have to be loaded. if(!std::get<0>(npyLoad)) { continue; } + + // Load the tensor. const std::string key = std::get<1>(npyLoad); cnpy::NpyArray &ary = std::get<2>(npyLoad); const auto i = npz.find(key); @@ -101,6 +104,7 @@ namespace lbann { " numpy_npz_reader::load() - can't find npz key : " + key); } + // Check whether the labels/responses has the same number of samples. if(key == NPZ_KEY_DATA) { m_num_samples = m_data.shape[0]; } else if(m_num_samples != (int) ary.shape[0]) { @@ -110,11 +114,16 @@ namespace lbann { } } - m_num_features = std::accumulate(m_data.shape.begin() + 1, m_data.shape.end(), (unsigned) 1, + m_num_features = std::accumulate(m_data.shape.begin() + 1, + m_data.shape.end(), + (unsigned) 1, std::multiplies()); - if(m_has_responses) - m_num_response_features = std::accumulate(m_responses.shape.begin() + 1, m_responses.shape.end(), (unsigned) 1, + if(m_has_responses) { + m_num_response_features = std::accumulate(m_responses.shape.begin() + 1, + m_responses.shape.end(), + (unsigned) 1, std::multiplies()); + } // Ensure we understand the word size. if (!(m_data.word_size == 2 || m_data.word_size == 4 || m_data.word_size == 8)) { @@ -123,7 +132,6 @@ namespace lbann { } if (m_has_labels) { - // Shift feature count because the last becomes the label. // Determine number of label classes. std::unordered_set label_classes; if (m_labels.word_size != 4) { @@ -144,9 +152,6 @@ namespace lbann { } m_num_labels = label_classes.size(); } - if (m_has_responses) { - // Last feature becomes the response. - } // Reset indices. m_shuffled_indices.clear(); @@ -159,12 +164,14 @@ namespace lbann { Mat X_v = El::View(X, El::IR(0, X.Height()), El::IR(mb_idx, mb_idx+1)); if (m_data.word_size == 2) { + // Convert int16 to DataType. const short *data = m_data.data() + data_id * m_num_features; DataType *dest = X_v.Buffer(); + // OPTIMIZE LBANN_OMP_PARALLEL_FOR - for(int j = 0; j < m_num_features; j++) - dest[j] = data[j] * m_scaling_factor_int16; + for(int j = 0; j < m_num_features; j++) + dest[j] = data[j] * m_scaling_factor_int16; } else { void *data = NULL; @@ -188,6 +195,9 @@ namespace lbann { } bool numpy_npz_reader::fetch_response(Mat& Y, int data_id, int mb_idx) { + if (!m_has_responses) { + throw lbann_exception("numpy_npz_reader: do not have responses"); + } void *responses = NULL; if (m_responses.word_size == 4) { responses = (void *) (m_responses.data() From 6ec7dd9aaca29ec832fb4682837bf92e0b133908 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Mon, 4 Feb 2019 15:25:44 -0800 Subject: [PATCH 011/443] Update a comment --- include/lbann/data_readers/data_reader_numpy_npz.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/lbann/data_readers/data_reader_numpy_npz.hpp b/include/lbann/data_readers/data_reader_numpy_npz.hpp index 81c313acae3..deda0862faf 100644 --- a/include/lbann/data_readers/data_reader_numpy_npz.hpp +++ b/include/lbann/data_readers/data_reader_numpy_npz.hpp @@ -111,4 +111,4 @@ namespace lbann { } // namespace lbann -#endif // LBANN_DATA_READER_NUMPY_HPP +#endif // LBANN_DATA_READER_NUMPY_NPZ_HPP From 2d8ece9d75b500bdbe65446baf306fd10d6bc5ec Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Tue, 5 Feb 2019 16:04:48 -0800 Subject: [PATCH 012/443] changes for compatibility with hydrogen comm semantics changes --- src/comm.cpp | 18 +++++++++++++----- .../data_reader_pilot2_molecular.cpp | 2 +- src/data_store/generic_data_store.cpp | 2 +- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/comm.cpp b/src/comm.cpp index d3c2538f007..19a2cb6158e 100644 --- a/src/comm.cpp +++ b/src/comm.cpp @@ -190,7 +190,9 @@ void lbann_comm::allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::NCCLBackend>()); + c.template GetComm<::Al::NCCLBackend>( + SyncInfoFromMatrix( + static_cast&>(m)))); } #endif // AL_HAS_NCCL #ifdef AL_HAS_MPI_CUDA @@ -200,7 +202,9 @@ void lbann_comm::allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::MPICUDABackend>(), + c.template GetComm<::Al::MPICUDABackend>( + SyncInfoFromMatrix( + static_cast&>(m))), ::Al::MPICUDAAllreduceAlgorithm::host_transfer); } #endif // AL_HAS_MPI_CUDA @@ -275,7 +279,9 @@ void lbann_comm::nb_allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::NCCLBackend>(), + c.template GetComm<::Al::NCCLBackend>( + SyncInfoFromMatrix( + static_cast&>(m))), req.nccl_req); } #endif // AL_HAS_NCCL @@ -286,7 +292,9 @@ void lbann_comm::nb_allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::MPICUDABackend>(), + c.template GetComm<::Al::MPICUDABackend>( + SyncInfoFromMatrix( + static_cast&>(m))), req.mpicuda_req, ::Al::MPICUDAAllreduceAlgorithm::host_transfer); } @@ -451,7 +459,7 @@ void lbann_comm::setup_node_comm() { auto *node_name_list = new char[hash_comm_size*MPI_MAX_PROCESSOR_NAME]; checkMPI(MPI_Allgather(node_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, node_name_list, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, - hash_comm.comm)); + hash_comm.GetMPIComm())); int node_num = El::mpi::Rank(hash_comm); for(int i=0; iget_trainer_comm().comm); + MPI_Bcast(tmp.data(), 8, MPI_INT, get_compound_rank(), m_comm->get_trainer_comm().GetMPIComm()); m_num_samples = tmp[0]; m_num_samples_per_frame = tmp[1]; m_num_features = tmp[2]; diff --git a/src/data_store/generic_data_store.cpp b/src/data_store/generic_data_store.cpp index 5367aaea3cd..58d64954992 100644 --- a/src/data_store/generic_data_store.cpp +++ b/src/data_store/generic_data_store.cpp @@ -73,7 +73,7 @@ generic_data_store::generic_data_store(generic_data_reader *reader, model *m) : m_master = m_comm->am_world_master(); m_rank = m_comm->get_rank_in_trainer(); m_np = m_comm->get_procs_per_trainer(); - m_mpi_comm = m_comm->get_trainer_comm().comm; + m_mpi_comm = m_comm->get_trainer_comm().GetMPIComm(); m_dir = m_reader->get_file_dir(); From fc0da829b784da481a3d1c26e83c5b0657a13603 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Tue, 5 Feb 2019 16:57:35 -0800 Subject: [PATCH 013/443] next (last?) batch of changes for API break with hydrogen MPI Comm semantics updates --- include/lbann/comm.hpp | 110 +++++++++++++-------------- src/comm.cpp | 18 ++--- src/data_readers/data_reader_csv.cpp | 2 +- 3 files changed, 65 insertions(+), 65 deletions(-) diff --git a/include/lbann/comm.hpp b/include/lbann/comm.hpp index bb88ffb917e..c99b4bb4462 100644 --- a/include/lbann/comm.hpp +++ b/include/lbann/comm.hpp @@ -135,7 +135,7 @@ class lbann_comm { * defaulting to every process in one trainer. */ lbann_comm(int procs_per_trainer = 0, - El::mpi::Comm world = El::mpi::COMM_WORLD); + El::mpi::Comm world = El::mpi::COMM_WORLD.GetMPIComm()); /** Don't allow copying; it doesn't make sense for the communicator. */ lbann_comm(const lbann_comm&) = delete; /** Don't allow assignment; it doesn't make sense for the communicator. */ @@ -241,12 +241,12 @@ class lbann_comm { /// Broadcast a scalar value over an arbitrary communicator template < typename T, bool S = is_instantiated_El_mpi_type::value > - void broadcast(int root, T& val, El::mpi::Comm c); + void broadcast(int root, T& val, const El::mpi::Comm& c); template - void broadcast_custom(int root, T& val, El::mpi::Comm c) const; + void broadcast_custom(int root, T& val, const El::mpi::Comm& c) const; template - void broadcast_native(int root, T& val, El::mpi::Comm c) const; + void broadcast_native(int root, T& val, const El::mpi::Comm& c) const; /// World broadcast of a scalar. template @@ -271,12 +271,12 @@ class lbann_comm { // Default to cpu memory template - void broadcast(const int root, T* data, const int count, El::mpi::Comm c) { + void broadcast(const int root, T* data, const int count, const El::mpi::Comm& c) { broadcast(root, data, count, std::move(c), El::SyncInfo{}); } template < typename T, El::Device D, bool S = is_instantiated_El_mpi_type::value > - void broadcast(const int root, T* data, const int count, El::mpi::Comm c, + void broadcast(const int root, T* data, const int count, const El::mpi::Comm& c, El::SyncInfo const& syncInfo); /// World broadcast of a buffer. @@ -316,7 +316,7 @@ class lbann_comm { * Resize vector<> over an arbitrary communicator to match the one on root. */ template - size_t resize(const int root, std::vector &data, El::mpi::Comm c) { + size_t resize(const int root, std::vector &data, const El::mpi::Comm& c) { auto const rank_c = El::mpi::Rank(c); size_t count = data.size(); El::mpi::Broadcast(&count, 1, root, std::move(c), El::SyncInfo{}); @@ -330,7 +330,7 @@ class lbann_comm { * vector<> for non-root processes will be resized as needed. */ template - void broadcast(const int root, std::vector &data, El::mpi::Comm c) { + void broadcast(const int root, std::vector &data, const El::mpi::Comm& c) { const int count = static_cast(resize(root, data, c)); if (count <= 0) { return; @@ -370,12 +370,12 @@ class lbann_comm { /** Allgather over an arbitrary communicator */ template - void all_gather(const T* src, int src_count, T* rcv, int rcv_count, El::mpi::Comm c) { + void all_gather(const T* src, int src_count, T* rcv, int rcv_count, const El::mpi::Comm& c) { all_gather(src, src_count, rcv, rcv_count, std::move(c), El::SyncInfo{}); } template - void all_gather(const T* src, int src_count, T* rcv, int rcv_count, El::mpi::Comm c, + void all_gather(const T* src, int src_count, T* rcv, int rcv_count, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { El::mpi::AllGather(src, src_count, rcv, rcv_count, std::move(c), syncInfo); } @@ -385,7 +385,7 @@ class lbann_comm { * all vectors must be correctly sized prior to entry. */ template - void all_gather(std::vector &src, std::vector &rcs, std::vector &rcv_counts, std::vector &rcv_disp, El::mpi::Comm c) { + void all_gather(std::vector &src, std::vector &rcs, std::vector &rcv_counts, std::vector &rcv_disp, const El::mpi::Comm& c) { if (src.size() == 0) { std::stringstream err; err << __FILE__ << " " << __LINE__ << " :: " @@ -408,7 +408,7 @@ class lbann_comm { * std::vector &data must be correctly sized prior to entry. */ template - void all_gather(T &src, std::vector &data, El::mpi::Comm c) { + void all_gather(T &src, std::vector &data, const El::mpi::Comm& c) { El::mpi::AllGather(&src, 1, data.data(), 1, std::move(c), El::SyncInfo{}); } @@ -479,14 +479,14 @@ class lbann_comm { } /** Scalar gather (for non-root processes). */ template - void gather(T snd, int root, El::mpi::Comm c) { + void gather(T snd, int root, const El::mpi::Comm& c) { bytes_sent += sizeof(T); El::mpi::Gather(&snd, 1, (T*) nullptr, 0, root, std::move(c), El::SyncInfo{}); } /** Scalar gather (for root processes). */ template - void gather(T snd, T *rcv, El::mpi::Comm c) { + void gather(T snd, T *rcv, const El::mpi::Comm& c) { auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); El::mpi::Gather(&snd, 1, rcv, 1, rank_c, std::move(c), @@ -495,18 +495,18 @@ class lbann_comm { } /** Scalar gather (for root processes). */ template - void gather(T snd, std::vector& rcv, El::mpi::Comm c) { + void gather(T snd, std::vector& rcv, const El::mpi::Comm& c) { gather(snd, rcv.data(), std::move(c)); } /** Scalar-array gather (for non-root processes). */ template - void gather(T *snd, int count, int root, El::mpi::Comm c) + void gather(T *snd, int count, int root, const El::mpi::Comm& c) { gather(snd, count, root, std::move(c), El::SyncInfo{}); } template - void gather(T *snd, int count, int root, El::mpi::Comm c, + void gather(T *snd, int count, int root, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { bytes_sent += sizeof(T) * count; El::mpi::Gather(snd, count, (T*) nullptr, 0, root, std::move(c), @@ -514,11 +514,11 @@ class lbann_comm { } /** Scalar-array gather (for root processes). */ template - void gather(T *snd, int count, T *rcv, El::mpi::Comm c) { + void gather(T *snd, int count, T *rcv, const El::mpi::Comm& c) { gather(snd, count, rcv, std::move(c), El::SyncInfo{}); } template - void gather(T *snd, int count, T *rcv, El::mpi::Comm c, + void gather(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); @@ -527,7 +527,7 @@ class lbann_comm { } /** Scalar scatter (for non-root processes). */ template - T scatter(int root, El::mpi::Comm c) { + T scatter(int root, const El::mpi::Comm& c) { T val = {}; El::mpi::Scatter((T*) nullptr, 1, &val, 1, root, std::move(c), El::SyncInfo{}); @@ -536,7 +536,7 @@ class lbann_comm { } /** Scalar scatter (for root processes). */ template - T scatter(T *snd, El::mpi::Comm c) { + T scatter(T *snd, const El::mpi::Comm& c) { bytes_sent += sizeof(T) * (El::mpi::Size(c) - 1); T val = {}; auto root = El::mpi::Rank(c); @@ -576,14 +576,14 @@ class lbann_comm { } /** Scalar reduce (for non-root processes). */ template - void reduce(T snd, int root, El::mpi::Comm c, El::mpi::Op op = El::mpi::SUM) { + void reduce(T snd, int root, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { bytes_sent += sizeof(T); El::mpi::Reduce(&snd, (T*) NULL, 1, op, root, std::move(c), El::SyncInfo{}); } /** Scalar reduce (for root processes). */ template - T reduce(T snd, El::mpi::Comm c, El::mpi::Op op = El::mpi::SUM) { + T reduce(T snd, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { T val = {}; auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); @@ -596,40 +596,40 @@ class lbann_comm { /** Scalar-array reduce (for non-root processes). */ // Op is "SUM" template - void reduce(T *snd, int count, int root, El::mpi::Comm c) { + void reduce(T *snd, int count, int root, const El::mpi::Comm& c) { reduce(snd, count, root, std::move(c), El::mpi::SUM, El::SyncInfo{}); } template - void reduce(T *snd, int count, int root, El::mpi::Comm c, El::SyncInfo const& syncInfo) { + void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { reduce(snd, count, root, std::move(c), El::mpi::SUM, syncInfo); } template - void reduce(T *snd, int count, int root, El::mpi::Comm c, El::mpi::Op op) { + void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::mpi::Op op) { reduce(snd, count, root, std::move(c), op, El::SyncInfo{}); } template - void reduce(T *snd, int count, int root, El::mpi::Comm c, El::mpi::Op op, El::SyncInfo const& syncInfo) { + void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::mpi::Op op, El::SyncInfo const& syncInfo) { bytes_sent += sizeof(T) * count; El::mpi::Reduce(snd, (T*) NULL, count, op, root, std::move(c), syncInfo); } /** Scalar-array reduce (for root processes). */ template - void reduce(T *snd, int count, T *rcv, El::mpi::Comm c, El::SyncInfo const& syncInfo) { + void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { reduce(snd, count, rcv, std::move(c), El::mpi::SUM, syncInfo); } template - void reduce(T *snd, int count, T *rcv, El::mpi::Comm c) { + void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c) { reduce(snd, count, rcv, std::move(c), El::mpi::SUM, El::SyncInfo{}); } template - void reduce(T *snd, int count, T *rcv, El::mpi::Comm c, El::mpi::Op op) { + void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::mpi::Op op) { reduce(snd, count, rcv, std::move(c), op, El::SyncInfo{}); } template - void reduce(T *snd, int count, T *rcv, El::mpi::Comm c, El::mpi::Op op, El::SyncInfo const& syncInfo) { + void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::mpi::Op op, El::SyncInfo const& syncInfo) { if (snd == rcv) { snd = (T*)MPI_IN_PLACE; } auto const rank_c = El::mpi::Rank(c); auto const size_c = El::mpi::Size(c); @@ -653,7 +653,7 @@ class lbann_comm { } /** Scalar allreduce. */ template - T allreduce(T snd, El::mpi::Comm c, El::mpi::Op op = El::mpi::SUM) { + T allreduce(T snd, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { auto const size_c = El::mpi::Size(c); bytes_sent += sizeof(T); allreduce(&snd, 1, std::move(c), op); @@ -665,7 +665,7 @@ class lbann_comm { // assuming this is intended as a CPU-only call. /** Scalar-array allreduce. */ template - void allreduce(T *snd, int count, T *rcv, El::mpi::Comm c, El::mpi::Op op = El::mpi::SUM) { + void allreduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { auto const size_c = El::mpi::Size(c); bytes_sent += count * sizeof(T); #ifdef LBANN_HAS_ALUMINUM @@ -675,7 +675,7 @@ class lbann_comm { ::Al::MPIAllreduceAlgorithm algo = ::Al::MPIAllreduceAlgorithm::automatic; #endif ::Al::Allreduce<::Al::MPIBackend>( - snd, rcv, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(), algo); + snd, rcv, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), algo); #else El::mpi::AllReduce(snd, rcv, count, op, std::move(c), El::SyncInfo{}); @@ -684,7 +684,7 @@ class lbann_comm { } /** In-place scalar-array allreduce. */ template - void allreduce(T *data, int count, El::mpi::Comm c, El::mpi::Op op = El::mpi::SUM) { + void allreduce(T *data, int count, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { auto const size_c = El::mpi::Size(c); bytes_sent += count * sizeof(T); #ifdef LBANN_HAS_ALUMINUM @@ -694,7 +694,7 @@ class lbann_comm { ::Al::MPIAllreduceAlgorithm algo = ::Al::MPIAllreduceAlgorithm::automatic; #endif ::Al::Allreduce<::Al::MPIBackend>( - data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(), algo); + data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), algo); #else El::mpi::AllReduce(data, count, op, std::move(c), El::SyncInfo{}); @@ -703,18 +703,18 @@ class lbann_comm { } /** Matrix allreduce. */ void allreduce(AbsMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM); /** Matrix allreduce. */ void allreduce(AbsDistMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM); /** Non-blocking matrix allreduce. * If LBANN has not been built with Aluminum, then this calls a * blocking matrix allreduce. */ void nb_allreduce(AbsMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, Al::request& req, El::mpi::Op op = El::mpi::SUM); /** Non-blocking matrix allreduce. @@ -722,7 +722,7 @@ class lbann_comm { * blocking matrix allreduce. */ void nb_allreduce(AbsDistMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, Al::request& req, El::mpi::Op op = El::mpi::SUM); /** Non-blocking in-place scalar-array allreduce. @@ -731,13 +731,13 @@ class lbann_comm { * This currently only supports host pointers (i.e. the MPI backend). */ template - void nb_allreduce(T *data, int count, El::mpi::Comm c, Al::request& req, + void nb_allreduce(T *data, int count, const El::mpi::Comm& c, Al::request& req, El::mpi::Op op = El::mpi::SUM) { #ifdef LBANN_HAS_ALUMINUM bytes_sent += count * sizeof(T); req.mpi_req = Al::mpi_null_req; ::Al::NonblockingAllreduce<::Al::MPIBackend>( - data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(), req.mpi_req); + data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), req.mpi_req); bytes_received += count * sizeof(T) * (El::mpi::Size(c) - 1); #else allreduce(data, count, std::move(c), op); @@ -768,7 +768,7 @@ class lbann_comm { /** Barrier among all processes. */ void global_barrier(); /** Barrier on an arbitrary communicator. */ - void barrier(const El::mpi::Comm c); + void barrier(const El::mpi::Comm& c); /** Send a buffer to rank in trainer. */ template @@ -802,7 +802,7 @@ class lbann_comm { } template void nb_tagged_send(const T *data, int count, int rank, int tag, - El::mpi::Request& req, El::mpi::Comm c) { + El::mpi::Request& req, const El::mpi::Comm& c) { bytes_sent += sizeof(T) * count; El::mpi::TaggedISend(data, count, rank, tag, std::move(c), req); } @@ -866,7 +866,7 @@ class lbann_comm { } template void nb_tagged_recv( T *data, int count, int rank, int tag, - El::mpi::Request& req, El::mpi::Comm c) { + El::mpi::Request& req, const El::mpi::Comm& c) { El::mpi::TaggedIRecv(data, count, rank, tag, std::move(c), req); bytes_received += sizeof(T) * count; } @@ -979,27 +979,27 @@ class lbann_comm { } /** Return the intertrainer communicator. */ - El::mpi::Comm get_intertrainer_comm() const { + const El::mpi::Comm& get_intertrainer_comm() const { return intertrainer_comm; } /** Return the trainer communicator. */ - El::mpi::Comm get_trainer_comm() const { + const El::mpi::Comm& get_trainer_comm() const { return trainer_comm; } /** Return the world communicator. */ - const El::mpi::Comm get_world_comm() const { + const El::mpi::Comm& get_world_comm() const { return world_comm; } /** Return the communicator for this node. */ - const El::mpi::Comm get_node_comm() const { + const El::mpi::Comm& get_node_comm() const { return node_comm; } /** Return true if rank (in comm) is on the local node. */ - bool is_rank_node_local(int rank, const El::mpi::Comm comm) const { + bool is_rank_node_local(int rank, const El::mpi::Comm& comm) const { // Translating to COMM_WORLD is typically constant time. int world_rank = El::mpi::Translate(comm, rank, get_world_comm()); return is_world_rank_on_node(world_rank); @@ -1068,7 +1068,7 @@ class lbann_comm { }; template -void lbann_comm::broadcast(int root, T& val, El::mpi::Comm c) { +void lbann_comm::broadcast(int root, T& val, const El::mpi::Comm& c) { auto const rank_c = El::mpi::Rank(c); if (S) { // Avoid linking error from uninstantiated El::mpi routine if !S by converting T to El::byte @@ -1081,19 +1081,19 @@ void lbann_comm::broadcast(int root, T& val, El::mpi::Comm c) { } template -void lbann_comm::broadcast_native(int root, T& val, El::mpi::Comm c) const { +void lbann_comm::broadcast_native(int root, T& val, const El::mpi::Comm& c) const { El::mpi::Broadcast(val, root, std::move(c), El::SyncInfo{}); } template -void lbann_comm::broadcast_custom(int root, T& val, El::mpi::Comm c) const { +void lbann_comm::broadcast_custom(int root, T& val, const El::mpi::Comm& c) const { const int bytes = static_cast(sizeof(T)); El::mpi::Broadcast(reinterpret_cast(&val), bytes, root, std::move(c), El::SyncInfo{}); } template -void lbann_comm::broadcast(const int root, T* data, const int count, El::mpi::Comm c, El::SyncInfo const& syncInfo) { +void lbann_comm::broadcast(const int root, T* data, const int count, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { auto const rank_c = El::mpi::Rank(c); const int size = static_cast(S? count : sizeof(T)*count); // Avoid linking error from uninstantiated El::mpi routine if !S by converting T to El::byte @@ -1104,7 +1104,7 @@ void lbann_comm::broadcast(const int root, T* data, const int count, El::mpi::Co /// Broadcast std::string over an arbitrary communicator. template<> -void lbann_comm::broadcast(const int root, std::string& str, El::mpi::Comm c); +void lbann_comm::broadcast(const int root, std::string& str, const El::mpi::Comm& c); /** Get the current rank within MPI_COMM_WORLD. * This function is safe to call even if MPI has not initialized or diff --git a/src/comm.cpp b/src/comm.cpp index 19a2cb6158e..04da25ade19 100644 --- a/src/comm.cpp +++ b/src/comm.cpp @@ -121,7 +121,7 @@ void lbann_comm::split_trainers(int ppm) { if (grid != nullptr) { delete grid; } - grid = new Grid(trainer_comm); + grid = new Grid(trainer_comm.GetMPIComm()); } void lbann_comm::intertrainer_sum_matrix(AbsMat& mat) { @@ -135,7 +135,7 @@ void lbann_comm::intertrainer_sum_matrix(AbsDistMat& mat) { } void lbann_comm::allreduce(AbsMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, El::mpi::Op op) { if (El::mpi::Size(c) == 1 || m.Height() < 1 || m.Width() < 1) { return; @@ -182,7 +182,7 @@ void lbann_comm::allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::MPIBackend>()); + c.template GetComm<::Al::MPIBackend>(El::SyncInfo{})); } #ifdef AL_HAS_NCCL if (t == std::type_index(typeid(::Al::NCCLBackend))) { @@ -215,13 +215,13 @@ void lbann_comm::allreduce(AbsMat& m, } void lbann_comm::allreduce(AbsDistMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, El::mpi::Op op) { allreduce(m.Matrix(), std::move(c), op); } void lbann_comm::nb_allreduce(AbsMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, Al::request& req, El::mpi::Op op) { if (El::mpi::Size(c) == 1 || m.Height() < 1 || m.Width() < 1) { @@ -269,7 +269,7 @@ void lbann_comm::nb_allreduce(AbsMat& m, m.Buffer(), local_size, mpi_op_to_al_op(op), - c.template GetComm<::Al::MPIBackend>(), + c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), req.mpi_req); } /// @todo MPI-CUDA backend @@ -306,7 +306,7 @@ void lbann_comm::nb_allreduce(AbsMat& m, } void lbann_comm::nb_allreduce(AbsDistMat& m, - El::mpi::Comm c, + const El::mpi::Comm& c, Al::request& req, El::mpi::Op op) { nb_allreduce(m.Matrix(), std::move(c), req, op); @@ -361,7 +361,7 @@ void lbann_comm::intertrainer_broadcast_matrix(AbsDistMat& mat, int root) { } template<> -void lbann_comm::broadcast(const int root, std::string& str, El::mpi::Comm c) { +void lbann_comm::broadcast(const int root, std::string& str, const El::mpi::Comm& c) { std::vector data(str.begin(), str.end()); broadcast(root, data, std::move(c)); str.assign(data.begin(), data.end()); @@ -382,7 +382,7 @@ void lbann_comm::global_barrier() { barrier(get_world_comm()); } -void lbann_comm::barrier(const El::mpi::Comm c) { +void lbann_comm::barrier(const El::mpi::Comm& c) { El::mpi::Barrier(c); } diff --git a/src/data_readers/data_reader_csv.cpp b/src/data_readers/data_reader_csv.cpp index f970df155c2..60ea89c81c8 100644 --- a/src/data_readers/data_reader_csv.cpp +++ b/src/data_readers/data_reader_csv.cpp @@ -101,7 +101,7 @@ void csv_reader::load() { bool master = m_comm->am_world_master(); setup_ifstreams(); std::ifstream& ifs = *m_ifstreams[0]; - const El::mpi::Comm world_comm = m_comm->get_world_comm(); + const El::mpi::Comm& world_comm = m_comm->get_world_comm(); // Parse the header to determine how many columns there are. // Skip rows if needed. if (master) { From 4b21dd1c28c93a066a1e4b5cbd2e5340a5eb61f5 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 6 Feb 2019 17:01:18 -0800 Subject: [PATCH 014/443] Apply 2to3 to viz.py --- viz/viz.py | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/viz/viz.py b/viz/viz.py index 647b50ffe68..a4e487b3d0c 100755 --- a/viz/viz.py +++ b/viz/viz.py @@ -50,7 +50,7 @@ def parsePrototext(fn) : #===================================================================== if len(argv) < 2 : - print usage + print(usage) exit(9) #parse cmd line @@ -75,10 +75,10 @@ def parsePrototext(fn) : elif t[0] == 'ranksep' : ranksep = float(t[1]) else : - print 'badly formed or unknown cmd line option:', argv[j] - print '================================================================' - print - print usage + print('badly formed or unknown cmd line option:', argv[j]) + print('================================================================') + print() + print(usage) exit(9) #===================================================================== @@ -90,10 +90,10 @@ def fixSequentialParents(layers) : if len(layer.parents()) != 0 : num_layers_with_parents += 1 if len(layer.children()) != 0 : num_layers_with_children += 1 if num_layers_with_parents == 0 : - print - print 'NOTE: this model does not appear to have any parent fields;' - print ' dealing with that ...' - print + print() + print('NOTE: this model does not appear to have any parent fields;') + print(' dealing with that ...') + print() assert(num_layers_with_children == 0) for j in range(1, len(layers)) : layers[j].setParents(layers[j-1]) @@ -115,15 +115,15 @@ def getLinkedLayers(layers) : my_name = layer.name() for x in links : if my_name != x : - if w.has_key(my_name) : + if my_name in w : w[my_name].add(x) - elif w.has_key(x) : + elif x in w : w[x].add(my_name) else : - print 'error' + print('error') exit(9) - for x in w.keys() : + for x in list(w.keys()) : if len(w[x]) > 1 : r.append(w[x]) return r @@ -156,7 +156,7 @@ def getLinkedLayers(layers) : type = layer.type() name_to_type[name] = type for p in parents : - if not edges.has_key(p) : + if p not in edges : edges[p] = set() edges[p].add(name) @@ -167,14 +167,14 @@ def getLinkedLayers(layers) : out.write('graph[ranksep="' + str(ranksep) + '"]\n') #write vertices -for parent in edges.keys() : +for parent in list(edges.keys()) : try : type = name_to_type[parent] label = '' if brief: label = '<' + type + '' else : - label = '<' + type + '
name: ' + parent + label = '<' + type + '
name: ' + parent if full : attr = attributes[parent] if len(attr) : @@ -183,28 +183,28 @@ def getLinkedLayers(layers) : label += x + '
' label += '> ' except : - print '\n\ncaught exception; parent:', parent + print('\n\ncaught exception; parent:', parent) exit(9) out.write(' ' + parent + '[label=' + label + ' shape=' + props.shape(type) + ', style=filled, fillcolor=' + props.color(type) + ']\n') #write edges -for parent in edges.keys() : +for parent in list(edges.keys()) : type = name_to_type[parent] for child in edges[parent] : child_type = name_to_type[child] - if type == 'slice' : + if type == 'slice' : out.write(parent + ' -> ' + child + '[color=red, penwidth=2.0];') - elif type == 'split' : + elif type == 'split' : out.write(parent + ' -> ' + child + '[color=darkorange, penwidth=2.0];') - elif child_type == 'sum' : + elif child_type == 'sum' : out.write(parent + ' -> ' + child + '[color=deepskyblue, penwidth=2.0];') else : out.write(parent + ' -> ' + child + '[];\n') #alternatove to above: use subgraphs -#write linked layer subgraphs +#write linked layer subgraphs n = 0 for x in linked : out.write('subgraph cluster_' + str(n) + ' {\n') @@ -219,6 +219,6 @@ def getLinkedLayers(layers) : #run graphviz cmd = 'dot -T' + output_format + ' graph.dot -o' + output_fn + '.' + output_format -print -print 'about to run:', cmd +print() +print('about to run:', cmd) os.system(cmd) From aac3c319a154ca94e029a9f15da45acd883cd439 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 6 Feb 2019 21:54:05 -0800 Subject: [PATCH 015/443] Move viz docs. --- {viz => scripts/proto/docs/viz}/README | 0 {viz/examples => scripts/proto/examples/viz}/README | 1 - .../proto/examples/viz}/rnn_1.jpg | Bin .../proto/examples/viz}/rnn_1.pdf | Bin .../proto/examples/viz}/rnn_1a.pdf | Bin .../proto/examples/viz}/rnn_2.pdf | Bin .../proto/examples/viz}/rnn_3.jpg | Bin .../proto/examples/viz}/rnn_3.pdf | Bin .../proto/examples/viz}/rnn_4.pdf | Bin 9 files changed, 1 deletion(-) rename {viz => scripts/proto/docs/viz}/README (100%) rename {viz/examples => scripts/proto/examples/viz}/README (99%) rename {viz/examples => scripts/proto/examples/viz}/rnn_1.jpg (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_1.pdf (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_1a.pdf (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_2.pdf (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_3.jpg (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_3.pdf (100%) rename {viz/examples => scripts/proto/examples/viz}/rnn_4.pdf (100%) diff --git a/viz/README b/scripts/proto/docs/viz/README similarity index 100% rename from viz/README rename to scripts/proto/docs/viz/README diff --git a/viz/examples/README b/scripts/proto/examples/viz/README similarity index 99% rename from viz/examples/README rename to scripts/proto/examples/viz/README index 7c322e6d713..7dbc28e662d 100644 --- a/viz/examples/README +++ b/scripts/proto/examples/viz/README @@ -30,4 +30,3 @@ $ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext ranksep=.7 output output: rnn_3.pdf notes: didn't specify properties file, so uses the default 'properties.txt' - diff --git a/viz/examples/rnn_1.jpg b/scripts/proto/examples/viz/rnn_1.jpg similarity index 100% rename from viz/examples/rnn_1.jpg rename to scripts/proto/examples/viz/rnn_1.jpg diff --git a/viz/examples/rnn_1.pdf b/scripts/proto/examples/viz/rnn_1.pdf similarity index 100% rename from viz/examples/rnn_1.pdf rename to scripts/proto/examples/viz/rnn_1.pdf diff --git a/viz/examples/rnn_1a.pdf b/scripts/proto/examples/viz/rnn_1a.pdf similarity index 100% rename from viz/examples/rnn_1a.pdf rename to scripts/proto/examples/viz/rnn_1a.pdf diff --git a/viz/examples/rnn_2.pdf b/scripts/proto/examples/viz/rnn_2.pdf similarity index 100% rename from viz/examples/rnn_2.pdf rename to scripts/proto/examples/viz/rnn_2.pdf diff --git a/viz/examples/rnn_3.jpg b/scripts/proto/examples/viz/rnn_3.jpg similarity index 100% rename from viz/examples/rnn_3.jpg rename to scripts/proto/examples/viz/rnn_3.jpg diff --git a/viz/examples/rnn_3.pdf b/scripts/proto/examples/viz/rnn_3.pdf similarity index 100% rename from viz/examples/rnn_3.pdf rename to scripts/proto/examples/viz/rnn_3.pdf diff --git a/viz/examples/rnn_4.pdf b/scripts/proto/examples/viz/rnn_4.pdf similarity index 100% rename from viz/examples/rnn_4.pdf rename to scripts/proto/examples/viz/rnn_4.pdf From 65bb569c762d5b537aabf602d94c34a0d6ac8a98 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 7 Feb 2019 08:04:54 -0800 Subject: [PATCH 016/443] Move viz scripts --- {viz => scripts/proto/lbann/viz}/layer.py | 0 {viz => scripts/proto/lbann/viz}/properties.py | 0 {viz => scripts/proto/scripts/viz}/viz.py | 1 + 3 files changed, 1 insertion(+) rename {viz => scripts/proto/lbann/viz}/layer.py (100%) rename {viz => scripts/proto/lbann/viz}/properties.py (100%) rename {viz => scripts/proto/scripts/viz}/viz.py (99%) diff --git a/viz/layer.py b/scripts/proto/lbann/viz/layer.py similarity index 100% rename from viz/layer.py rename to scripts/proto/lbann/viz/layer.py diff --git a/viz/properties.py b/scripts/proto/lbann/viz/properties.py similarity index 100% rename from viz/properties.py rename to scripts/proto/lbann/viz/properties.py diff --git a/viz/viz.py b/scripts/proto/scripts/viz/viz.py similarity index 99% rename from viz/viz.py rename to scripts/proto/scripts/viz/viz.py index a4e487b3d0c..bc29c32e47f 100755 --- a/viz/viz.py +++ b/scripts/proto/scripts/viz/viz.py @@ -2,6 +2,7 @@ from sys import * import os import pprint + from properties import * from layer import * From 71097dd05e1051828664da3255dbdc794827dcc5 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 7 Feb 2019 08:05:03 -0800 Subject: [PATCH 017/443] Update viz documentation --- scripts/proto/docs/viz/README | 4 ---- scripts/proto/docs/viz/README.md | 8 ++++++++ 2 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 scripts/proto/docs/viz/README create mode 100644 scripts/proto/docs/viz/README.md diff --git a/scripts/proto/docs/viz/README b/scripts/proto/docs/viz/README deleted file mode 100644 index fe61ea34a67..00000000000 --- a/scripts/proto/docs/viz/README +++ /dev/null @@ -1,4 +0,0 @@ -run: $viz.py -with no arguments for usage - -see the "examples" directory for command lines and outputs diff --git a/scripts/proto/docs/viz/README.md b/scripts/proto/docs/viz/README.md new file mode 100644 index 00000000000..142bbe5b3b3 --- /dev/null +++ b/scripts/proto/docs/viz/README.md @@ -0,0 +1,8 @@ +# How to run +``` +viz.py +``` + +with no arguments for usage. + +see [the example directory](../../examples/viz) for command lines and outputs. From 83f27aa209d2c716cfecd47337052aeda293dca0 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 7 Feb 2019 08:20:47 -0800 Subject: [PATCH 018/443] Update viz scripts and docs. --- scripts/proto/README.md | 4 +- scripts/proto/docs/viz/README.md | 2 +- scripts/proto/examples/viz/README | 32 --------------- scripts/proto/examples/viz/README.md | 40 +++++++++++++++++++ scripts/proto/lbann/viz/properties.py | 6 +-- scripts/proto/scripts/viz/{viz.py => lbviz} | 7 ++-- .../proto/scripts/viz}/properties.txt | 0 .../proto/scripts/viz}/properties_rect.txt | 0 scripts/proto/setup.py | 3 +- viz/.gitignore | 2 - 10 files changed, 53 insertions(+), 43 deletions(-) delete mode 100644 scripts/proto/examples/viz/README create mode 100644 scripts/proto/examples/viz/README.md rename scripts/proto/scripts/viz/{viz.py => lbviz} (97%) rename {viz => scripts/proto/scripts/viz}/properties.txt (100%) rename {viz => scripts/proto/scripts/viz}/properties_rect.txt (100%) delete mode 100644 viz/.gitignore diff --git a/scripts/proto/README.md b/scripts/proto/README.md index a154269661c..c0d45c7044f 100644 --- a/scripts/proto/README.md +++ b/scripts/proto/README.md @@ -10,8 +10,10 @@ any problems or have feature suggestions. * For more details about the LBANN/ONNX converter, see [here](docs/onnx/README.md). -* For more details about the visualization script +* For more details about the *accuracy/loss* visualization script (also known as `lbplot`), see [here](docs/plot/README.md). +* For more details about the *model* visualization script +(also known as `lbviz`, )see [here](docs/viz/README.md). # Setup diff --git a/scripts/proto/docs/viz/README.md b/scripts/proto/docs/viz/README.md index 142bbe5b3b3..4a5d21ce281 100644 --- a/scripts/proto/docs/viz/README.md +++ b/scripts/proto/docs/viz/README.md @@ -1,6 +1,6 @@ # How to run ``` -viz.py +lbviz ``` with no arguments for usage. diff --git a/scripts/proto/examples/viz/README b/scripts/proto/examples/viz/README deleted file mode 100644 index 7dbc28e662d..00000000000 --- a/scripts/proto/examples/viz/README +++ /dev/null @@ -1,32 +0,0 @@ -The following cmds were run in the parent (viz) directory - - - -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt brief=1 ranksep=.7 output=examples/rnn_1 -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt brief=1 ranksep=.7 output=examples/rnn_1 output=jpg - - output: rnn_1.pdf, rnn_1.jpg - notes: - linked layers are enclosed by dotted rectangles - ranksep=.7 increases readability (IMO) - -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt brief=1 output=examples/rnn_1a - - output: rnn_1a.pdf - notes: didn't specify nodesep=.7; harder to interpret (IMO) - -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt ranksep=.7 output=examples/rnn_2 - - output: rnn_2.pdf - notes: same as above, but print layer names as well as types - -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt full=1 ranksep=.7 output=examples/rnn_3 -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext prop=properties_rect.txt full=1 ranksep=.7 output=examples/rnn_3 format=jpg - - output: rnn_3.pdf, rnn_3.jpg - notes: 'full=1' prints all layer attributes - -$ viz.py ../model_zoo/models/char_rnn/model_char_rnn.prototext ranksep=.7 output=examples/rnn_4 - - output: rnn_3.pdf - notes: didn't specify properties file, so uses the default 'properties.txt' diff --git a/scripts/proto/examples/viz/README.md b/scripts/proto/examples/viz/README.md new file mode 100644 index 00000000000..f66e52aa20e --- /dev/null +++ b/scripts/proto/examples/viz/README.md @@ -0,0 +1,40 @@ +The following cmds were run in the LBANN root directory. + +```shell +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 ranksep=.7 output=rnn_1 +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 ranksep=.7 output=rnn_1 output=jpg +``` + +* Output: `rnn_1.pdf`, `rnn_1.jpg` +* Notes: + * linked layers are enclosed by dotted rectangles + * `ranksep=.7` increases readability (IMO) + +```shell +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 output=rnn_1a +``` + +* Output: `rnn_1a.pdf` +* Notes: didn't specify `nodesep=.7`; harder to interpret (IMO) + +```shell +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt ranksep=.7 output=rnn_2 +``` + +* Output: `rnn_2.pdf` +* Notes: same as above, but print layer names as well as types + +```shell +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt full=1 ranksep=.7 output=rnn_3 +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt full=1 ranksep=.7 output=rnn_3 format=jpg +``` + +* Output: `rnn_3.pdf`, `rnn_3.jpg` +* Notes: `full=1` prints all layer attributes + +```shell +$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext ranksep=.7 output=rnn_4 +``` + +* Output: `rnn_3.pdf` +* Notes: didn't specify properties file, so uses the default `properties.txt` diff --git a/scripts/proto/lbann/viz/properties.py b/scripts/proto/lbann/viz/properties.py index b18ed486c10..6e1efeed475 100644 --- a/scripts/proto/lbann/viz/properties.py +++ b/scripts/proto/lbann/viz/properties.py @@ -34,7 +34,7 @@ def __init__(self, fn) : k += 1 def shape(self, name) : - if not self._layers.has_key(name) : + if name not in self._layers : return 'rect' ''' print 'shape(): Nothing known about this layer:', name @@ -45,7 +45,7 @@ def shape(self, name) : return self._layers[name][0] def color(self, name) : - if not self._layers.has_key(name) : + if name not in self._layers : return 'grey' ''' print 'color(): Nothing known about this layer:', name @@ -56,7 +56,7 @@ def color(self, name) : return self._layers[name][1] def arrow(self, name) : - if not self._layers.has_key(name) : + if name not in self._layers : return 'grey' ''' print 'arrow(): Nothing known about this layer:', name diff --git a/scripts/proto/scripts/viz/viz.py b/scripts/proto/scripts/viz/lbviz similarity index 97% rename from scripts/proto/scripts/viz/viz.py rename to scripts/proto/scripts/viz/lbviz index bc29c32e47f..cf428047aa1 100755 --- a/scripts/proto/scripts/viz/viz.py +++ b/scripts/proto/scripts/viz/lbviz @@ -3,8 +3,8 @@ import os import pprint -from properties import * -from layer import * +from lbann.viz.properties import * +from lbann.viz.layer import * usage = ''' usage: %s model_fn.prototext [output=] [format=] [prop=] [full=1] [brief=1] [ranksep=] @@ -18,6 +18,7 @@ Note: some formats may take a while to render, so be patient. "prop" is the name of the properties file; default is "properties.txt" + in the same directory of lbviz. The properties file is a simple text file that lists colors and shapes for the various layer types @@ -57,7 +58,7 @@ def parsePrototext(fn) : #parse cmd line output_fn = "graph" output_format = "pdf" -prop_fn = "properties.txt" +prop_fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), "properties.txt") full = False brief = False ranksep=0 diff --git a/viz/properties.txt b/scripts/proto/scripts/viz/properties.txt similarity index 100% rename from viz/properties.txt rename to scripts/proto/scripts/viz/properties.txt diff --git a/viz/properties_rect.txt b/scripts/proto/scripts/viz/properties_rect.txt similarity index 100% rename from viz/properties_rect.txt rename to scripts/proto/scripts/viz/properties_rect.txt diff --git a/scripts/proto/setup.py b/scripts/proto/setup.py index 621cc107441..9ea3f1b6e87 100755 --- a/scripts/proto/setup.py +++ b/scripts/proto/setup.py @@ -24,7 +24,8 @@ def getLBANNVersion(): author="Lawrence Livermore National Security, LLC.", license="Apache 2.0", packages=["lbann"], - scripts=["scripts/plot/lbplot"], + scripts=["scripts/plot/lbplot", + "scripts/viz/lbviz"], install_requires=["protobuf>=3.6.1", "onnx>=1.3.0", "numpy>=1.16.0", diff --git a/viz/.gitignore b/viz/.gitignore deleted file mode 100644 index 17f6207b207..00000000000 --- a/viz/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -graph.dot -graph.pdf From 3f68ba1a5c87c8ab030f685c0b71eef950f713b6 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 7 Feb 2019 09:37:00 -0800 Subject: [PATCH 019/443] Use the Python-graphviz package --- scripts/proto/scripts/viz/lbviz | 67 ++++++++++++++------------------- scripts/proto/setup.py | 3 +- 2 files changed, 30 insertions(+), 40 deletions(-) diff --git a/scripts/proto/scripts/viz/lbviz b/scripts/proto/scripts/viz/lbviz index cf428047aa1..160fabb755e 100755 --- a/scripts/proto/scripts/viz/lbviz +++ b/scripts/proto/scripts/viz/lbviz @@ -1,7 +1,9 @@ -#!/usr/bin/python +#!/usr/bin/env python3 + from sys import * import os import pprint +from graphviz import Digraph from lbann.viz.properties import * from lbann.viz.layer import * @@ -135,16 +137,13 @@ def getLinkedLayers(layers) : #load properties database props = properties(prop_fn) - #parse the prototext file; 'layers' is a list of Layer objects layers = parsePrototext(argv[1]) - fixSequentialParents(layers) #get list of linked layer sets linked = getLinkedLayers(layers) - #build a couple of maps edges = {} name_to_type = {} @@ -160,17 +159,13 @@ for layer in layers : for p in parents : if p not in edges : edges[p] = set() - edges[p].add(name) + edges[p].add(name) #write the dot file -out = open('graph.dot', 'w') -out.write('digraph xyz {\n') -if ranksep > 0 : - out.write('graph[ranksep="' + str(ranksep) + '"]\n') +g = Digraph(format="pdf") +g.attr("graph", ranksep=str(ranksep)) -#write vertices -for parent in list(edges.keys()) : - try : +for parent in edges.keys(): type = name_to_type[parent] label = '' if brief: @@ -183,12 +178,15 @@ for parent in list(edges.keys()) : label += '
' for x in attr : label += x + '
' - label += '> ' - except : - print('\n\ncaught exception; parent:', parent) - exit(9) - out.write(' ' + parent + '[label=' + label + ' shape=' + props.shape(type) + ', style=filled, fillcolor=' + props.color(type) + ']\n') + label += ' >' + + g.node( + parent, + label=label, + shape=props.shape(type), + style="filled", + fillcolor=props.color(type)) #write edges for parent in list(edges.keys()) : @@ -196,31 +194,22 @@ for parent in list(edges.keys()) : for child in edges[parent] : child_type = name_to_type[child] if type == 'slice' : - out.write(parent + ' -> ' + child + '[color=red, penwidth=2.0];') + g.edge(parent, child, + color="red", penwidth="2.0") elif type == 'split' : - out.write(parent + ' -> ' + child + '[color=darkorange, penwidth=2.0];') + g.edge(parent, child, + color="darkorange", penwidth="2.0") elif child_type == 'sum' : - out.write(parent + ' -> ' + child + '[color=deepskyblue, penwidth=2.0];') + g.edge(parent, child, + color="deepskyblue", penwidth="2.0") else : - out.write(parent + ' -> ' + child + '[];\n') - + g.edge(parent, child) #alternatove to above: use subgraphs #write linked layer subgraphs -n = 0 -for x in linked : - out.write('subgraph cluster_' + str(n) + ' {\n') - out.write(' style=dashed;\n') - n += 1 - for node in x : - out.write(' '+ node + ';\n') - out.write('}\n') - -out.write('}\n') -out.close() - -#run graphviz -cmd = 'dot -T' + output_format + ' graph.dot -o' + output_fn + '.' + output_format -print() -print('about to run:', cmd) -os.system(cmd) +for n, x in enumerate(linked): + with g.subgraph(name="cluster_"+str(n), style="dashed") as sg: + for node in x: + sg.node(node) + +g.render("out") diff --git a/scripts/proto/setup.py b/scripts/proto/setup.py index 9ea3f1b6e87..dce1caf5aea 100755 --- a/scripts/proto/setup.py +++ b/scripts/proto/setup.py @@ -30,7 +30,8 @@ def getLBANNVersion(): "onnx>=1.3.0", "numpy>=1.16.0", "matplotlib>=2.0.2", - "texttable==1.4.0", + "graphviz>=0.10.1", + "texttable>=1.4.0", "nose>=1.3.7"], test_suite="nose.collector", tests_require=["nose"], From 189e4bb4912fc3b18b2bcb5123f7cc2424d1e734 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Fri, 8 Feb 2019 09:30:59 -0800 Subject: [PATCH 020/443] Implement lp.Model.render --- scripts/proto/MANIFEST.in | 2 + scripts/proto/lbann/proto.py | 11 ++ scripts/proto/lbann/viz/__init__.py | 155 ++++++++++++++++++ scripts/proto/lbann/viz/layer.py | 7 +- .../{properties.py => properties/__init__.py} | 2 +- .../viz/properties}/properties.txt | 0 .../viz/properties}/properties_rect.txt | 0 scripts/proto/scripts/viz/lbviz | 155 +++--------------- scripts/proto/setup.py | 1 + 9 files changed, 195 insertions(+), 138 deletions(-) create mode 100644 scripts/proto/MANIFEST.in create mode 100644 scripts/proto/lbann/viz/__init__.py rename scripts/proto/lbann/viz/{properties.py => properties/__init__.py} (98%) rename scripts/proto/{scripts/viz => lbann/viz/properties}/properties.txt (100%) rename scripts/proto/{scripts/viz => lbann/viz/properties}/properties_rect.txt (100%) diff --git a/scripts/proto/MANIFEST.in b/scripts/proto/MANIFEST.in new file mode 100644 index 00000000000..c88a9dd1d7a --- /dev/null +++ b/scripts/proto/MANIFEST.in @@ -0,0 +1,2 @@ +include lbann/viz/properties/properties.txt +include lbann/viz/properties/properties_rect.txt diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index 85472bea296..1d99d8b883b 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -38,6 +38,8 @@ else: raise # Give up. +from lbann.viz import getGraphFromModel + def _add_to_module_namespace(stuff): """Add stuff to the module namespace. @@ -544,6 +546,15 @@ def export_proto(self): return model + def render(self, filename, format="pdf", **kwargs): + """ + Save a vizualized graph of the network to `filename`.`format`. + This function passes `kwargs` to `lbann.viz.getGraphFromModel`. + """ + g = getGraphFromModel(self, format=format, + **kwargs) + g.render(filename) + # ============================================== # Export models # ============================================== diff --git a/scripts/proto/lbann/viz/__init__.py b/scripts/proto/lbann/viz/__init__.py new file mode 100644 index 00000000000..5151e313160 --- /dev/null +++ b/scripts/proto/lbann/viz/__init__.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 + +from sys import * +import os +import os.path +import pprint +from graphviz import Digraph + +from lbann.viz.layer import Layer +from lbann.viz.properties import Properties + +def fixSequentialParents(layers) : + '''a hack for models that don't contain parent and children fields''' + num_layers_with_parents = 0 + num_layers_with_children = 0 + for layer in layers : + if len(layer.parents()) != 0 : num_layers_with_parents += 1 + if len(layer.children()) != 0 : num_layers_with_children += 1 + if num_layers_with_parents == 0 : + print() + print('NOTE: this model does not appear to have any parent fields;') + print(' dealing with that ...') + print() + assert(num_layers_with_children == 0) + for j in range(1, len(layers)) : + layers[j].setParents(layers[j-1]) + +#WARNING: this works for tim's rnn prototext, but may not generalize +def getLinkedLayers(layers) : + r = [] + w = {} + for layer in layers : + my_name = layer.name() + links = layer.linkedLayers() + for x in links : + if my_name == x : + w[my_name] = set([my_name]) + for layer in layers : + links = layer.linkedLayers() + my_name = layer.name() + for x in links : + if my_name != x : + if my_name in w : + w[my_name].add(x) + elif x in w : + w[x].add(my_name) + else : + print('error') + exit(9) + + for x in list(w.keys()) : + if len(w[x]) > 1 : + r.append(w[x]) + return r + +def getGraphFromModel(model, **kwargs): + """ + Create a `graphviz.Digraph` object that represents `model`. + This function passes `kwargs` to `lbann.viz.getGraphFromPrototext`. + """ + + return getGraphFromPrototext(model.export_proto()) + +def getGraphFromPrototext(proto, format="pdf", + props=None, full=False, brief=False, + ranksep=0): + """ + Create a `graphviz.Digraph` object from `proto`. + The `format` argument is used as an extension when the resulting + graph is rendered. + """ + + if props is None: + props = Properties( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "properties", + "properties.txt")) + + layers = [Layer(str(l).strip().split("\n")) for l in proto.layer] + + fixSequentialParents(layers) + + #get list of linked layer sets + linked = getLinkedLayers(layers) + + #build a couple of maps + edges = {} + name_to_type = {} + attributes = {} + for layer in layers : + name = layer.name() + parents = layer.parents() + + #children = layer.children() + attributes[name] = layer.attributes() + type = layer.type() + name_to_type[name] = type + for p in parents : + if p not in edges : + edges[p] = set() + edges[p].add(name) + + #write the dot file + g = Digraph(format=format) + g.attr("graph", ranksep=str(ranksep)) + + for parent in edges.keys(): + type = name_to_type[parent] + label = '' + if brief: + label = '<' + type + '' + else : + label = '<' + type + '
name: ' + parent + if full : + attr = attributes[parent] + if len(attr) : + label += '
' + for x in attr : + label += x + '
' + + label += ' >' + + g.node( + parent, + label=label, + shape=props.shape(type), + style="filled", + fillcolor=props.color(type)) + + #write edges + for parent in list(edges.keys()) : + type = name_to_type[parent] + for child in edges[parent] : + child_type = name_to_type[child] + if type == 'slice' : + g.edge(parent, child, + color="red", penwidth="2.0") + elif type == 'split' : + g.edge(parent, child, + color="darkorange", penwidth="2.0") + elif child_type == 'sum' : + g.edge(parent, child, + color="deepskyblue", penwidth="2.0") + else : + g.edge(parent, child) + + #alternatove to above: use subgraphs + #write linked layer subgraphs + for n, x in enumerate(linked): + with g.subgraph(name="cluster_"+str(n), style="dashed") as sg: + for node in x: + sg.node(node) + + return g diff --git a/scripts/proto/lbann/viz/layer.py b/scripts/proto/lbann/viz/layer.py index c5467a4f9d5..17f50809991 100644 --- a/scripts/proto/lbann/viz/layer.py +++ b/scripts/proto/lbann/viz/layer.py @@ -2,11 +2,16 @@ class Layer : def __init__(self, a) : - self._layer = self.__getLayer(a) + # Since the protobuf parser is somehow hard-corded in this script, + # __getLayer does not match to prototexts generated from lbann.proto. + # self._layer = self.__getLayer(a) + self._layer = a + self._parents = [] self._children = [] self._linked_layers = [] self._attr = [] + for line in self._layer : if line.find('name:') != -1 : t = line.split() diff --git a/scripts/proto/lbann/viz/properties.py b/scripts/proto/lbann/viz/properties/__init__.py similarity index 98% rename from scripts/proto/lbann/viz/properties.py rename to scripts/proto/lbann/viz/properties/__init__.py index 6e1efeed475..d9d65691391 100644 --- a/scripts/proto/lbann/viz/properties.py +++ b/scripts/proto/lbann/viz/properties/__init__.py @@ -1,6 +1,6 @@ import pprint -class properties : +class Properties : def __init__(self, fn) : a = open(fn).readlines() shapes = {} diff --git a/scripts/proto/scripts/viz/properties.txt b/scripts/proto/lbann/viz/properties/properties.txt similarity index 100% rename from scripts/proto/scripts/viz/properties.txt rename to scripts/proto/lbann/viz/properties/properties.txt diff --git a/scripts/proto/scripts/viz/properties_rect.txt b/scripts/proto/lbann/viz/properties/properties_rect.txt similarity index 100% rename from scripts/proto/scripts/viz/properties_rect.txt rename to scripts/proto/lbann/viz/properties/properties_rect.txt diff --git a/scripts/proto/scripts/viz/lbviz b/scripts/proto/scripts/viz/lbviz index 160fabb755e..db987591263 100755 --- a/scripts/proto/scripts/viz/lbviz +++ b/scripts/proto/scripts/viz/lbviz @@ -4,9 +4,11 @@ from sys import * import os import pprint from graphviz import Digraph +import google.protobuf.text_format as txtf -from lbann.viz.properties import * -from lbann.viz.layer import * +from lbann.proto import lbann_pb2 +from lbann.viz import getGraphFromPrototext +from lbann.viz.properties import Properties usage = ''' usage: %s model_fn.prototext [output=] [format=] [prop=] [full=1] [brief=1] [ranksep=] @@ -40,17 +42,6 @@ note: in addition to the output file, an intermediate file called 'graph.dot' will be written ''' % argv[0] - -#===================================================================== -def parsePrototext(fn) : - '''returns a list of Layers''' - a = open(fn).readlines() - r = [] - for j in range(len(a)) : - if (a[j].find('layer {') != -1 or a[j].find('layer{') != -1) and a[j].find('#') == -1 : - r.append(Layer(a[j:])) - return r - #===================================================================== if len(argv) < 2 : @@ -60,7 +51,7 @@ if len(argv) < 2 : #parse cmd line output_fn = "graph" output_format = "pdf" -prop_fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), "properties.txt") +prop_fn = None full = False brief = False ranksep=0 @@ -85,131 +76,23 @@ for j in range(2, len(argv)) : print(usage) exit(9) -#===================================================================== -def fixSequentialParents(layers) : - '''a hack for models that don't contain parent and children fields''' - num_layers_with_parents = 0 - num_layers_with_children = 0 - for layer in layers : - if len(layer.parents()) != 0 : num_layers_with_parents += 1 - if len(layer.children()) != 0 : num_layers_with_children += 1 - if num_layers_with_parents == 0 : - print() - print('NOTE: this model does not appear to have any parent fields;') - print(' dealing with that ...') - print() - assert(num_layers_with_children == 0) - for j in range(1, len(layers)) : - layers[j].setParents(layers[j-1]) +#load properties database +props = Properties(prop_fn) if prop_fn is not None else None +with open(argv[1], "r") as f: + s = f.read().strip() -#===================================================================== -#WARNING: this works for tim's rnn prototext, but may not generalize -def getLinkedLayers(layers) : - r = [] - w = {} - for layer in layers : - my_name = layer.name() - links = layer.linkedLayers() - for x in links : - if my_name == x : - w[my_name] = set([my_name]) - for layer in layers : - links = layer.linkedLayers() - my_name = layer.name() - for x in links : - if my_name != x : - if my_name in w : - w[my_name].add(x) - elif x in w : - w[x].add(my_name) - else : - print('error') - exit(9) - - for x in list(w.keys()) : - if len(w[x]) > 1 : - r.append(w[x]) - return r +pb = lbann_pb2.LbannPB() +txtf.Merge(s, pb) -#===================================================================== +print(pb.model) -#load properties database -props = properties(prop_fn) - -#parse the prototext file; 'layers' is a list of Layer objects -layers = parsePrototext(argv[1]) -fixSequentialParents(layers) - -#get list of linked layer sets -linked = getLinkedLayers(layers) - -#build a couple of maps -edges = {} -name_to_type = {} -attributes = {} -for layer in layers : - name = layer.name() - parents = layer.parents() - - #children = layer.children() - attributes[name] = layer.attributes() - type = layer.type() - name_to_type[name] = type - for p in parents : - if p not in edges : - edges[p] = set() - edges[p].add(name) - -#write the dot file -g = Digraph(format="pdf") -g.attr("graph", ranksep=str(ranksep)) - -for parent in edges.keys(): - type = name_to_type[parent] - label = '' - if brief: - label = '<' + type + '' - else : - label = '<' + type + '
name: ' + parent - if full : - attr = attributes[parent] - if len(attr) : - label += '
' - for x in attr : - label += x + '
' - - label += ' >' - - g.node( - parent, - label=label, - shape=props.shape(type), - style="filled", - fillcolor=props.color(type)) - -#write edges -for parent in list(edges.keys()) : - type = name_to_type[parent] - for child in edges[parent] : - child_type = name_to_type[child] - if type == 'slice' : - g.edge(parent, child, - color="red", penwidth="2.0") - elif type == 'split' : - g.edge(parent, child, - color="darkorange", penwidth="2.0") - elif child_type == 'sum' : - g.edge(parent, child, - color="deepskyblue", penwidth="2.0") - else : - g.edge(parent, child) - -#alternatove to above: use subgraphs -#write linked layer subgraphs -for n, x in enumerate(linked): - with g.subgraph(name="cluster_"+str(n), style="dashed") as sg: - for node in x: - sg.node(node) +g = getGraphFromPrototext( + pb.model, + output_format, + props, + full, + brief, + ranksep) g.render("out") diff --git a/scripts/proto/setup.py b/scripts/proto/setup.py index dce1caf5aea..f0a613f64c8 100755 --- a/scripts/proto/setup.py +++ b/scripts/proto/setup.py @@ -35,4 +35,5 @@ def getLBANNVersion(): "nose>=1.3.7"], test_suite="nose.collector", tests_require=["nose"], + include_package_data=True ) From 5e00756594ccae36b8adb4633251b6a92e376e57 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Fri, 8 Feb 2019 12:50:37 -0800 Subject: [PATCH 021/443] Added function to pad a string to a fixed length with user defined leading characters. Updated the JAG Conduit data store to pad out the sample list index to a fixed length for testing the behavior of the per sample data store exchange. --- include/lbann/utils/file_utils.hpp | 13 +++++++++++++ src/data_readers/data_reader_jag_conduit.cpp | 8 ++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/lbann/utils/file_utils.hpp b/include/lbann/utils/file_utils.hpp index f654a0a1615..4b882ef3cd5 100644 --- a/include/lbann/utils/file_utils.hpp +++ b/include/lbann/utils/file_utils.hpp @@ -76,6 +76,19 @@ inline void __swapEndianInt(unsigned int& ui) { ui = ((ui >> 24) | ((ui<<8) & 0x00FF0000) | ((ui>>8) & 0x0000FF00) | (ui << 24)); } +// The generic approach +template +std::basic_string pad(const std::basic_string& s, + typename std::basic_string::size_type n, T c) { + if (n > s.length()) { + std::string t = s; + t.insert(t.begin(), n - t.length(), c); + return t; + }else { + return s; + } +} + namespace file { /** @brief Wrapper around @c dirname. diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index a3c997ddb18..17b1d645865 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -1084,7 +1084,7 @@ data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& s for (const auto& emi_tag : m_emi_image_keys) { const std::string conduit_field = m_output_image_prefix + emi_tag; - const std::string conduit_obj = '/' + std::to_string(sample_id) + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1207,7 +1207,7 @@ std::vector data_reader_jag_conduit::get_scal for(const auto key: m_scalar_keys) { std::string conduit_field = m_output_scalar_prefix + key; - std::string conduit_obj = '/' + std::to_string(sample_id) + '/' + conduit_field; + std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1238,7 +1238,7 @@ std::vector data_reader_jag_conduit::get_input // avoid some overhead by taking advantage of the fact that all the variables are of the same type for(const auto key: m_input_keys) { const std::string conduit_field = m_input_prefix + key; - const std::string conduit_obj = '/' + std::to_string(sample_id) + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1256,7 +1256,7 @@ std::vector data_reader_jag_conduit::get_input } else { for(const auto key: m_input_keys) { const std::string conduit_field = m_input_prefix + key; - const std::string conduit_obj = '/' + std::to_string(sample_id) + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj From c6ca76a4b3550db28137a09bf899eb4a48df5090 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Fri, 8 Feb 2019 14:22:30 -0800 Subject: [PATCH 022/443] test --- src/data_store/data_store_jag.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 2b9987446bf..71e1afc8d76 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -36,7 +36,6 @@ #include "lbann/models/model.hpp" #include - namespace lbann { std::ofstream debug; From 4234f22edd9ff2c1a8ba24805c84e740bdb35872 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Fri, 8 Feb 2019 15:20:59 -0800 Subject: [PATCH 023/443] added m_ds_indices. This was formerly called m_all_minibatch_indices. added build_ds_indices(), which fills in m_ds_indices. and m_owner; this was formerly called exchange_ds_indices(). The old version employed all-to-all communication. The new version does not use any communication. --- include/lbann/data_store/data_store_jag.hpp | 8 +++- src/data_store/data_store_jag.cpp | 46 ++++++--------------- 2 files changed, 19 insertions(+), 35 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index 2a0dd7302b8..f087cfde027 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -110,8 +110,12 @@ protected : /// called by exchange_data void build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out); - /// fills in m_owner, which maps an index to the owning processor; - void exchange_ds_indices(); + /// m_ds_indices[j] contains the sample indices (data store (ds) indices) + // for the samples that P_j owns + std::vector> m_ds_indices; + + /// fills in m_ds_indices and m_owner + void build_ds_indices(); }; } // namespace lbann diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 71e1afc8d76..1fedf6e048c 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -66,6 +66,7 @@ void data_store_jag::setup() { } generic_data_store::setup(); + build_ds_indices(); m_super_node = options::get()->get_bool("super_node"); if (m_master) { @@ -105,8 +106,6 @@ void data_store_jag::setup_data_store_buffers() { m_recv_buffer.resize(m_np); m_reconstituted.resize(m_data.size()); - - exchange_ds_indices(); } // this gets called at the beginning of each epoch (except for epoch 0) @@ -373,7 +372,6 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) m_recv_buffer.resize(sz); m_status.resize(sz); - exchange_ds_indices(); // sanity check /* int n = 0; @@ -585,37 +583,19 @@ debug << "TOTAL Time to unpack incoming data: " << get_time() - tmw << "\n"; debug.close(); debug.open(b, std::ios::app); } -void data_store_jag::exchange_ds_indices() { - std::vector counts(m_np); - int my_num_indices = m_data.size(); - m_comm->trainer_all_gather(my_num_indices, counts); - - //setup data structures to exchange minibatch indices with all processors - //displacement vector - std::vector displ(m_np); - displ[0] = 0; - for (size_t j=1; j all_indices(n); - - //receive the indices - std::vector v; - v.reserve(m_data.size()); - for (auto t : m_data) { - v.push_back(t.first); - } - m_comm->all_gather(v, all_indices, counts, displ, m_comm->get_trainer_comm()); - - //fill in the final data structure +// fills in m_ds_indices and m_owner +void data_store_jag::build_ds_indices() { m_owner.clear(); - for (int p=0; p> proc_to_indices(m_np); + size_t j = 0; + for (size_t i = 0; i < m_shuffled_indices->size(); i++) { + auto index = (*m_shuffled_indices)[i]; + m_ds_indices[j].insert(index); + m_owner[index] = j; + j = (j + 1) % m_np; } } From 14d671cd2d5775ec5646007f29dbfed1abbc53ad Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Fri, 8 Feb 2019 15:28:26 -0800 Subject: [PATCH 024/443] deleted exchange_ds_indices(), since it's been replaced by build_ds_indices. Removed a couple of comments that are no longer relevant (code cleanup). name changes from m_all_minibatch_indices to m_ds_indices. --- src/data_store/data_store_jag.cpp | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 1fedf6e048c..05a9da9bafb 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -121,12 +121,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //build map: proc -> global indices that P_x needs for this epoch, and // which I own - //@TODO: change m_all_minibatch_indices from vector> to - //vector>; then: - // const std::unordered_set> &my_datastore_indices;m_rank] - // - // Hm ... I think m_all_minibatch_indices is identical to ds indices - double tma = get_time(); std::vector> proc_to_indices(m_np); @@ -342,19 +336,6 @@ void data_store_jag::build_node_for_sending(const conduit::Node &node_in, condui } -#if 0 -void data_store_jag::build_all_minibatch_indices() { - m_all_minibatch_indices.clear(); - m_owner.clear(); - m_all_minibatch_indices.resize(m_np); - for (size_t idx=0; idxsize(); ++idx) { - int owner = idx % m_np; - m_owner[idx] = owner; - m_all_minibatch_indices[owner].push_back(idx); - } -} -#endif - void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) { double tm1 = get_time(); @@ -388,19 +369,8 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) // which I own // build map: owner -> set of indices I need that owner has - //@TODO: change m_all_minibatch_indices from vector> to - //vector>; then: - // const std::unordered_set> &my_datastore_indices;m_rank] - // - // Hm ... I think m_all_minibatch_indices is identical to ds indices - double tma = get_time(); - // std::unordered_set my_ds_indices; - // for (auto t : m_all_minibatch_indices[m_rank]) { - // my_ds_indices.insert(t); - // } - std::vector> proc_to_indices(m_np); // get indices that I need for this epoch; these correspond to // samples that this proc receives from others From 884e20616a851a580308670984f4a7ce5c16b1c2 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Sat, 9 Feb 2019 14:08:30 -0800 Subject: [PATCH 025/443] removed "bool any_node" argument from get_conduit_node, since this is no longer used. Much code cleanup, mostly removing debug statements. Some of these may be useful, but for now they're cluttering up the code too much. --- include/lbann/data_store/data_store_jag.hpp | 2 +- src/data_store/data_store_jag.cpp | 106 ++------------------ 2 files changed, 9 insertions(+), 99 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index f087cfde027..ecd3620ac5c 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -60,7 +60,7 @@ class data_store_jag : public generic_data_store { void setup() override; /// returns the conduit node - const conduit::Node & get_conduit_node(int data_id, bool any_node = false) const; + const conduit::Node & get_conduit_node(int data_id) const; void set_conduit_node(int data_id, conduit::Node &node); diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 05a9da9bafb..0db7d8c43e5 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -244,70 +244,26 @@ void data_store_jag::set_conduit_node(int data_id, conduit::Node &node) { node["id"] = data_id; conduit::Node n2; build_node_for_sending(node, n2); - - // if(n2.total_bytes_compact() != 201624) { - - // // debug.open(b, std::ios::app); - // std::cout << "set_conduit_node sample size node " << data_id << " node : " << node.total_bytes_compact() << "\n"; - // // node.print(); - // std::cout << "set_conduit_node sample size node " << data_id << " node n2 : " << n2.total_bytes_compact() << "\n"; - // n2.print(); - // MPI_Barrier(MPI_COMM_WORLD); - // MPI_Abort(MPI_COMM_WORLD, -1); - // } - // debug.close(); - // debug.open(b, std::ios::app); - m_data[data_id] = n2; } else { m_data[data_id] = node; - /* debug block, to test if idx matches the id in the conduit node; - * if these don't match up exceptions will be thrown in get_conduit_node - * - if (m_master) { - std::cerr<<"data id:" <::const_iterator t = m_data.find(data_id); + if (t != m_data.end()) { + return t->second; } - { - std::unordered_map::const_iterator t = m_data.find(data_id); - if (t != m_data.end()) { - return t->second; - } - } - - /// check the main m_data as well - std::unordered_map::const_iterator t = m_minibatch_data.find(data_id); - if (t == m_minibatch_data.end()) { - debug << "failed to find data_id: " << data_id << " in m_minibatch_data; m_minibatch_data.size: " << m_minibatch_data.size() << "\n"; - debug << "data IDs that we know about (these are the keys in the m_minibatch_data map): "; - std::set s3; - for (auto t3 : m_minibatch_data) { - s3.insert(t3.first); - } - for (auto t3 : s3) debug << t3 << " "; - debug << "\n"; - int owner = m_owner.at(data_id); - debug << "I believe that the owner is " << std::to_string(owner) << "\n"; - debug.close(); - debug.open(b, std::ios::app); - + std::unordered_map::const_iterator t2 = m_minibatch_data.find(data_id); + if (t2 == m_minibatch_data.end()) { LBANN_ERROR("failed to find data_id: " + std::to_string(data_id) + " in m_minibatch_data; m_minibatch_data.size: " + std::to_string(m_minibatch_data.size()) + "; epoch:" + std::to_string(m_model->get_cur_epoch())); } - return t->second; + return t2->second; } // code in the following method is a modification of code from @@ -390,38 +346,12 @@ double tma = get_time(); j = (j + 1) % m_np; } } - { - debug.open(b, std::ios::app); - debug << "preparing to send the following indices: " << "\n"; - for (int p=0; p Date: Sun, 10 Feb 2019 07:19:33 -0800 Subject: [PATCH 026/443] refactoring and renaming; new functions: build_indices_i_will_receive build_indices_i_will_send build_owner_map code cleanup, better documentation exchange_data_by_sample is temporarily commented out (again) >>> compiles, but not yet tested <<< --- include/lbann/data_store/data_store_jag.hpp | 28 ++++-- src/data_store/data_store_jag.cpp | 103 ++++++++++++-------- 2 files changed, 81 insertions(+), 50 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index ecd3620ac5c..37a9a47d55d 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -68,7 +68,6 @@ protected : bool m_super_node; - /// retrive data needed for passing to the data reader for the next epoch /// this is pure virtual in generic_data_reader, so must include it for /// now. May go away when we refactore/revise all of data_store void exchange_data() override {} @@ -82,7 +81,7 @@ protected : } void exchange_data_by_super_node(size_t current_pos, size_t mb_size); void exchange_data_by_sample(size_t current_pos, size_t mb_size); - void setup_data_store_buffers(); + // when m_super_node = false std::unordered_map m_index_to_data_id; @@ -105,17 +104,32 @@ protected : std::vector m_outgoing_msg_sizes; std::vector m_incoming_msg_sizes; + /// ??? as our code currently stands (sun, 10 feb) this isn't necessary + /// -- but it's being used. @TODO: revisit std::vector m_reconstituted; + void setup_data_store_buffers(); + /// called by exchange_data void build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out); - /// m_ds_indices[j] contains the sample indices (data store (ds) indices) - // for the samples that P_j owns - std::vector> m_ds_indices; + /// fills in mowner, which maps index -> owning processor + void build_owner_map(); + + /// maps processor id -> set of indices (whose associated samples) + /// this proc needs to send. (formerly called "proc_to_indices) + std::vector> m_indices_to_send; + + /// fills in m_indices_to_send + void build_indices_i_will_send(int current_pos, int mb_size); + + /// maps processor id -> set of indices (whose associated samples) + /// this proc needs to recv from others. (formerly called "needed") + std::vector> m_indices_to_recv; + + /// fills in m_indices_to_recv + void build_indices_i_will_recv(int current_pos, int mb_size); - /// fills in m_ds_indices and m_owner - void build_ds_indices(); }; } // namespace lbann diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 0db7d8c43e5..42a2cc0c1e1 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -66,7 +66,7 @@ void data_store_jag::setup() { } generic_data_store::setup(); - build_ds_indices(); + build_owner_map(); m_super_node = options::get()->get_bool("super_node"); if (m_master) { @@ -115,56 +115,34 @@ void data_store_jag::setup_data_store_buffers() { // handle things ourselves. TODO: possible modify conduit to // handle non-blocking comms void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_size) { - // double tm1 = get_time(); - - //======================================================================== - //build map: proc -> global indices that P_x needs for this epoch, and - // which I own - - double tma = get_time(); + double tm1 = get_time(); - std::vector> proc_to_indices(m_np); - /// Within a trainer the shuffled indices are distributed round - /// robin across ranks - size_t j = 0; - for (auto i = current_pos; i < current_pos + mb_size; i++) { - auto index = (*m_shuffled_indices)[i]; - /// If this rank owns the index send it to the j'th rank - if (m_data.find(index) != m_data.end()) { - proc_to_indices[j].insert(index); - } - j = (j + 1) % m_np; - } //======================================================================== //part 1: exchange the sizes of the data // m_send_buffer[j] is a conduit::Node that contains // all samples that this proc will send to P_j -tma = get_time(); -//double t1 = 0; -//double t2 = 0; + double tma = get_time(); + build_indices_i_will_send(current_pos, mb_size); + // construct a super node for each processor; the super node + // contains all samples this proc owns that other procs need for (int p=0; psize(); j += m_np) { + auto index = (*m_shuffled_indices)[j]; + int owner = m_owner[index]; + m_indices_to_recv[owner].insert(index); + j = (j + 1) % m_np; + } +} + +void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { + m_indices_to_send.clear(); + m_indices_to_send.resize(m_np); + size_t j = 0; + for (auto i = current_pos; i < current_pos + mb_size; i++) { + auto index = (*m_shuffled_indices)[i]; + /// If this rank owns the index send it to the j'th rank + if (m_data.find(index) != m_data.end()) { + m_indices_to_send[j].insert(index); + } + j = (j + 1) % m_np; + } +} + +void data_store_jag::build_owner_map() { + m_owner.clear(); + size_t j = 0; + for (size_t i = 0; i < m_shuffled_indices->size(); i++) { + auto index = (*m_shuffled_indices)[i]; + m_owner[index] = j; + j = (j + 1) % m_np; + } +} } // namespace lbann From 2fb112e96c6e192af899f2786d679aae92ce0537 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Sun, 10 Feb 2019 12:54:39 -0800 Subject: [PATCH 027/443] 1. Added method to compute super_node overhead; this will be used (in a susequent commit) to eliminate a round of all-to-all comms. 2. Changed some "auto t" to "auto &t" to avoid copies 3. Verified that, using super_node mode, the code gives correct resuls (see comment on previous commit) --- include/lbann/data_store/data_store_jag.hpp | 8 +++++ src/data_store/data_store_jag.cpp | 38 +++++++++++++++++---- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index 37a9a47d55d..7b939e6c9fb 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -104,6 +104,14 @@ protected : std::vector m_outgoing_msg_sizes; std::vector m_incoming_msg_sizes; + /// overhead incurred by the super_node; this is constant, + /// regardless of the number of samples contained in the super_node; + /// assumes the super_node contains at least two samples + int m_super_node_overhead; + + /// assignes a value to m_super_node_overhead + void compute_super_node_overhead(); + /// ??? as our code currently stands (sun, 10 feb) this isn't necessary /// -- but it's being used. @TODO: revisit std::vector m_reconstituted; diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 42a2cc0c1e1..15680306472 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -44,7 +44,8 @@ char b[1024]; data_store_jag::data_store_jag( generic_data_reader *reader, model *m) : generic_data_store(reader, m), - m_super_node(false) { + m_super_node(false), + m_super_node_overhead(0) { set_name("data_store_jag"); } @@ -124,6 +125,7 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s // all samples that this proc will send to P_j double tma = get_time(); + compute_super_node_overhead(); build_indices_i_will_send(current_pos, mb_size); // construct a super node for each processor; the super node @@ -199,15 +201,13 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s m_reconstituted[p].update_external(n_msg["data"]); const std::vector &names = m_reconstituted[p].child_names(); - for (auto t : names) { + for (auto &t : names) { m_minibatch_data[atoi(t.c_str())][t].update_external(m_reconstituted[p][t]); } } debug << "TOTAL Time to unpack and break up all incoming data: " << get_time() - tmw << "\n"; - if (m_master) std::cout << "data_store_jag::exchange_data Time: " << get_time() - tm1 << "\n"; - debug << "TOTAL exchange_data Time: " << get_time() - tm1 << "\n"; } @@ -249,6 +249,7 @@ const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { // code in the following method is a modification of code from // conduit/src/libs/relay/conduit_relay_mpi.cpp void data_store_jag::build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out) { + node_out.reset(); conduit::Schema s_data_compact; if( node_in.is_compact() && node_in.is_contiguous()) { s_data_compact = node_in.schema(); @@ -293,7 +294,7 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) // sanity check /* int n = 0; - for (auto t : m_data) { + for (auto &t : m_data) { if (t.second.total_bytes_compact() != n) { LBANN_ERROR("t.total_bytes_compact() != n; " + std::to_string(n) + " " + std::to_string(t.second.total_bytes_compact())); } @@ -329,7 +330,7 @@ double tma = get_time(); } int sample_size = 0; - for (auto t : m_data) { + for (auto &t : m_data) { if(sample_size == 0) { sample_size = t.second.total_bytes_compact(); } else { @@ -431,7 +432,7 @@ double tmw = get_time(); nd.update(n_msg["data"]); m_minibatch_data[nd["id"].value()] = nd; } -for (auto t : m_minibatch_data) { +for (auto &t : m_minibatch_data) { debug << t.first << " "; } debug << "\n"; @@ -496,6 +497,29 @@ void data_store_jag::build_owner_map() { } } +void data_store_jag::compute_super_node_overhead() { + if (m_super_node_overhead != 0) { + return; + } + if (m_data.size() < 2) { + LBANN_ERROR("m_data must contain at least two sample nodes"); + } + conduit::Node n2; + conduit::Node n3; + int first = 0; + for (auto &t : m_data) { + n2.update_external(t.second); + build_node_for_sending(n2, n3); + if (first == 0) { + first = n3.total_bytes_compact(); + } else { + m_super_node_overhead = 2*first - n3.total_bytes_compact(); + if (m_master) std::cerr << "m_super_node_overhead: " << m_super_node_overhead << "\n"; + return; + } + } +} + } // namespace lbann #endif //#ifdef LBANN_HAS_CONDUIT From 1287a46115530cb87b2c5cf1961456cf76e7c5fa Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Mon, 11 Feb 2019 07:15:59 -0800 Subject: [PATCH 028/443] removing calls to exchange data sizes. Compiles but crashes: message sizes apparently aren't being properly computed --- include/lbann/data_store/data_store_jag.hpp | 5 +- src/data_store/data_store_jag.cpp | 142 ++++++++++++-------- 2 files changed, 92 insertions(+), 55 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index 7b939e6c9fb..55f8ba2d465 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -109,7 +109,10 @@ protected : /// assumes the super_node contains at least two samples int m_super_node_overhead; - /// assignes a value to m_super_node_overhead + /// size of a compacted conduit::Node that contains a single sample + int m_compacted_sample_size; + + /// assignes values to m_super_node_overhead and m_compacted_sample_size void compute_super_node_overhead(); /// ??? as our code currently stands (sun, 10 feb) this isn't necessary diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 15680306472..4049da18d75 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -112,21 +112,34 @@ void data_store_jag::setup_data_store_buffers() { // this gets called at the beginning of each epoch (except for epoch 0) // // Note: conduit has a very nice interface for communicating nodes -// in non-blocking scenarios. Unf, for blocking we need to +// in blocking scenarios. Unf, for non-blocking we need to // handle things ourselves. TODO: possible modify conduit to // handle non-blocking comms void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_size) { double tm1 = get_time(); - //======================================================================== //part 1: exchange the sizes of the data // m_send_buffer[j] is a conduit::Node that contains // all samples that this proc will send to P_j double tma = get_time(); + compute_super_node_overhead(); build_indices_i_will_send(current_pos, mb_size); + build_indices_i_will_recv(current_pos, mb_size); + + // debug block + debug << "supernode overhead: " << m_super_node_overhead << " compacted sample size: " << m_compacted_sample_size << "\n"; + for (int p=0; p global indices that P_x needs for this epoch, and - // which I own - // build map: owner -> set of indices I need that owner has double tma = get_time(); - std::vector> proc_to_indices(m_np); - // get indices that I need for this epoch; these correspond to - // samples that this proc receives from others - std::unordered_map> needed; - { - size_t j = 0; - for (auto i = current_pos; i < current_pos + mb_size; i++) { - auto index = (*m_shuffled_indices)[i]; - /// If this rank owns the index send it to the j'th rank - if (m_data.find(index) != m_data.end()) { - proc_to_indices[j].insert(index); - } - if(j == static_cast(m_rank)) { - int owner = m_owner[index]; - needed[owner].insert(index); - } - j = (j + 1) % m_np; - } - } + build_indices_i_will_send(current_pos, mb_size); + build_indices_i_will_recv(current_pos, mb_size); + // debug block + #if 0 int sample_size = 0; for (auto &t : m_data) { if(sample_size == 0) { @@ -342,6 +364,7 @@ double tma = get_time(); debug << "sample size: " << sample_size << " num samples: " << m_data.size() << "\n"; debug.close(); debug.open(b, std::ios::app); + #endif //======================================================================== @@ -352,7 +375,7 @@ tma = get_time(); // start sends for outgoing data size_t ss = 0; for (int p=0; p &indices = proc_to_indices[p]; + const std::unordered_set &indices = indices_i_will_send[p]; for (auto index : indices) { if (m_data.find(index) == m_data.end()) { LBANN_ERROR("failed to find data_id: " + std::to_string(index) + " to be sent to " + std::to_string(p) + " in m_data"); @@ -360,7 +383,7 @@ tma = get_time(); //const void *s = m_send_buffer[ss].data_ptr(); const void *s = m_data[index].data_ptr(); - MPI_Isend(s, sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_send_requests[ss++]); + MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_send_requests[ss++]); //MPI_Isend(s, m_outgoing_msg_sizes[p], MPI_BYTE, p, 1, MPI_COMM_WORLD, &m_send_requests[p]); } } @@ -384,7 +407,7 @@ tma = get_time(); debug << "starting " << indices.size() << " recvs from " << p << "\n"; for (auto index : indices) { m_recv_buffer[ss].set(conduit::DataType::uint8(sample_size)); - MPI_Irecv(m_recv_buffer[ss].data_ptr(), sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_recv_requests[ss]); + MPI_Irecv(m_recv_buffer[ss].data_ptr(), m_compacted_sample_size,, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_recv_requests[ss]); m_index_to_data_id[index] = ss; ++ss; } @@ -446,38 +469,27 @@ debug.close(); debug.open(b, std::ios::app); #endif } -#if 0 -// fills in m_ds_indices and m_owner -void data_store_jag::build_ds_indices() { - m_owner.clear(); - m_ds_indices.clear(); - m_ds_indices.resize(m_np); - - std::vector> proc_to_indices(m_np); +void data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { + m_indices_to_recv.clear(); + m_indices_to_recv.resize(m_np); size_t j = 0; - for (size_t i = 0; i < m_shuffled_indices->size(); i++) { + for (int i=current_pos; i< current_pos + mb_size; ++i) { auto index = (*m_shuffled_indices)[i]; - m_ds_indices[j].insert(index); - m_owner[index] = j; + // if (m_data.find(index) != m_data.end()) { + if (index % m_np == m_rank) { + int owner = m_owner[index]; + m_indices_to_recv[owner].insert(index); + } j = (j + 1) % m_np; } -} -#endif -void data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { - for (size_t j=m_rank; jsize(); j += m_np) { - auto index = (*m_shuffled_indices)[j]; - int owner = m_owner[index]; - m_indices_to_recv[owner].insert(index); - j = (j + 1) % m_np; - } } void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { m_indices_to_send.clear(); m_indices_to_send.resize(m_np); size_t j = 0; - for (auto i = current_pos; i < current_pos + mb_size; i++) { + for (int i = current_pos; i < current_pos + mb_size; i++) { auto index = (*m_shuffled_indices)[i]; /// If this rank owns the index send it to the j'th rank if (m_data.find(index) != m_data.end()) { @@ -487,6 +499,24 @@ void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { } } +#if 0 +// fills in m_ds_indices and m_owner +void data_store_jag::build_ds_indices() { + m_owner.clear(); + m_ds_indices.clear(); + m_ds_indices.resize(m_np); + + std::vector> proc_to_indices(m_np); + size_t j = 0; + for (size_t i = 0; i < m_shuffled_indices->size(); i++) { + auto index = (*m_shuffled_indices)[i]; + m_ds_indices[j].insert(index); + m_owner[index] = j; + j = (j + 1) % m_np; + } +} +#endif + void data_store_jag::build_owner_map() { m_owner.clear(); size_t j = 0; @@ -514,7 +544,11 @@ void data_store_jag::compute_super_node_overhead() { first = n3.total_bytes_compact(); } else { m_super_node_overhead = 2*first - n3.total_bytes_compact(); - if (m_master) std::cerr << "m_super_node_overhead: " << m_super_node_overhead << "\n"; + m_compacted_sample_size = first - m_super_node_overhead; + if (m_master) { + std::cerr << "m_super_node_overhead: " << m_super_node_overhead + << " m_compacted_sample_size: " << m_compacted_sample_size << "\n"; + } return; } } From dd1dc0574eb9653b3e0ffa48b46e31a489132a09 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Mon, 11 Feb 2019 09:49:32 -0800 Subject: [PATCH 029/443] Starting development on v0.99.0 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index eee3e65bf73..b5e5e610a3c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,8 +48,8 @@ endif () # set(LBANN_VERSION_MAJOR 0) -set(LBANN_VERSION_MINOR 98) -set(LBANN_VERSION_PATCH 1) +set(LBANN_VERSION_MINOR 99) +set(LBANN_VERSION_PATCH 0) set(LBANN_VERSION "${LBANN_VERSION_MAJOR}.${LBANN_VERSION_MINOR}.${LBANN_VERSION_PATCH}") From 5d786887858afa3c7e540236db1b16f3d0e47cfe Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 11 Feb 2019 09:58:52 -0800 Subject: [PATCH 030/443] update required H version to 1.2.0 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b5e5e610a3c..70fd2ce4839 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -167,12 +167,12 @@ set(LBANN_HAS_CEREAL ${CEREAL_FOUND}) # The imported target is just called "cereal". Super. # Setup the linear algebra library -find_package(Hydrogen 1.1.0 NO_MODULE QUIET +find_package(Hydrogen 1.2.0 NO_MODULE QUIET HINTS ${Hydrogen_DIR} ${HYDROGEN_DIR} $ENV{Hydrogen_DIR} $ENV{HYDROGEN_DIR} PATH_SUFFIXES lib/cmake/hydrogen NO_DEFAULT_PATH) if (NOT Hydrogen_FOUND) - find_package(Hydrogen 1.1.0 NO_MODULE QUIET REQUIRED) + find_package(Hydrogen 1.2.0 NO_MODULE QUIET REQUIRED) endif () message(STATUS "Found Hydrogen: ${Hydrogen_DIR}") set(LBANN_HAS_HYDROGEN ${Hydrogen_FOUND}) From 3baa7d9522c82b35d08a7779496886680f827f56 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Mon, 11 Feb 2019 11:48:38 -0800 Subject: [PATCH 031/443] Added version checking for the Aluminum library. Set LBANN to require v0.2.0 for Aluminum. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70fd2ce4839..18b655e669b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -209,13 +209,13 @@ endif () if (LBANN_WITH_ALUMINUM) # Aluminum may have already been found by Hydrogen if (NOT Aluminum_FOUND) - find_package(Aluminum NO_MODULE QUIET + find_package(Aluminum 0.2.0 NO_MODULE QUIET HINTS ${Aluminum_DIR} ${ALUMINUM_DIR} ${AL_DIR} $ENV{Aluminum_DIR} $ENV{ALUMINUM_DIR} $ENV{AL_DIR} PATH_SUFFIXES lib64/cmake/aluminum lib/cmake/aluminum NO_DEFAULT_PATH) if (NOT Aluminum_FOUND) - find_package(Aluminum NO_MODULE QUIET) + find_package(Aluminum 0.2.0 NO_MODULE QUIET) endif () endif () set(LBANN_HAS_ALUMINUM ${Aluminum_FOUND}) From a7d5cd1ae90e36761362da8f41a5abcb816c94ec Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 11 Feb 2019 13:33:07 -0800 Subject: [PATCH 032/443] disable NVPROF annotations if no CUDA --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 18b655e669b..fbe752f6da6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -305,7 +305,7 @@ if (LBANN_WITH_VTUNE) endif (VTune_FOUND) endif (LBANN_WITH_VTUNE) -if (LBANN_WITH_NVPROF) +if (LBANN_WITH_CUDA AND LBANN_WITH_NVPROF) set(LBANN_NVPROF TRUE) endif () From 0fbf182e8cea116a1e318ae685dc81cb465563b6 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 12 Feb 2019 13:36:40 -0800 Subject: [PATCH 033/443] Adding directions for how an LBANN user can install with Spack. --- docs/BuildingLBANN.md | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/docs/BuildingLBANN.md b/docs/BuildingLBANN.md index 4643c7d3354..9b35dfa9405 100644 --- a/docs/BuildingLBANN.md +++ b/docs/BuildingLBANN.md @@ -96,8 +96,25 @@ The following third-party packages are **optional**. ### Building & Installing LBANN as a user -This section is work in progress. For now, follow the developer -instructions below. We are working to simplify this process. +Now that spack is setup and installed into your path, it can be used +to install the LBANN executables. This approach is appropriate for +users that just want to train new or existing models using the python +front end. Note that if your model requires custom layers or data +readers you may need to install LBANN as a developer, which would +allow you to modify and recompile the source code. + +- Building with the latest released versions and GPU support: + `spack install lbann +gpu` + +- Building with the head of develop branch for lbann, hydrogen and + aluminum with GPU support: + `spack install lbann@develop +gpu ^hydrogen@develop ^aluminum@master` + +Note that there are a number of options for all of these packages and +can be viewed via commands such as `spack info lbann`. To specify the +compiler you can add options such as `%gcc@7.3.0`. For further +information about specifying dependencies like the MPI library please +consult the [Spack documentation](https://spack.readthedocs.io/). ### Building & Installing LBANN as a developer From da506aa321a8d5942fb2e569255ca929c8c212e1 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 12 Feb 2019 14:28:33 -0800 Subject: [PATCH 034/443] Added a Spack environment for users to use when building on LLNL LC systems. Updated the documentation to reflect new user build options. --- docs/BuildingLBANN.md | 21 ++++- spack_environments/externals_llnl_lc_cz.yaml | 14 +-- .../users/llnl_lc/x86_64_cuda/spack.yaml | 88 +++++++++++++++++++ 3 files changed, 113 insertions(+), 10 deletions(-) create mode 100644 spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml diff --git a/docs/BuildingLBANN.md b/docs/BuildingLBANN.md index 9b35dfa9405..3c82dcc78a6 100644 --- a/docs/BuildingLBANN.md +++ b/docs/BuildingLBANN.md @@ -101,14 +101,29 @@ to install the LBANN executables. This approach is appropriate for users that just want to train new or existing models using the python front end. Note that if your model requires custom layers or data readers you may need to install LBANN as a developer, which would -allow you to modify and recompile the source code. +allow you to modify and recompile the source code. Here are three +easy ways to install LBANN: - Building with the latest released versions and GPU support: - `spack install lbann +gpu` + ```bash + spack install lbann +gpu +nccl + ml load spack + ``` - Building with the head of develop branch for lbann, hydrogen and aluminum with GPU support: - `spack install lbann@develop +gpu ^hydrogen@develop ^aluminum@master` + ```bash + spack install lbann@develop +gpu +nccl ^hydrogen@develop ^aluminum@master + ml load spack + ``` + +- Using the Spack environment method, (e.g. for an x86_64 LLNL LC system with GPU support): + ```bash + cd /spack_environments/users/llnl_lc/x86_64_gpu/ + spack install + spack env loads + source loads + ``` Note that there are a number of options for all of these packages and can be viewed via commands such as `spack info lbann`. To specify the diff --git a/spack_environments/externals_llnl_lc_cz.yaml b/spack_environments/externals_llnl_lc_cz.yaml index 14ed4837809..9e0498bcdb6 100644 --- a/spack_environments/externals_llnl_lc_cz.yaml +++ b/spack_environments/externals_llnl_lc_cz.yaml @@ -8,45 +8,45 @@ packages: modules: {} compiler: [gcc@7.3.0 arch=linux-rhel7-x86_64, gcc@7.3.1 arch=linux-rhel7-ppc64le] - cmake: + cmake:: variants: ~openssl ~ncurses paths: cmake@3.12.1 arch=linux-rhel7-x86_64: /usr/tce/packages/cmake/cmake-3.12.1 - mvapich2: + mvapich2:: buildable: True version: [2.3] paths: mvapich2@2.3%gcc@7.3.0 arch=linux-rhel7-x86_64: /usr/tce/packages/mvapich2/mvapich2-2.3-gcc-7.3.0/ - hwloc: + hwloc:: buildable: False version: [2.0.2] paths: hwloc@2.0.2 arch=linux-rhel7-x86_64: /usr/lib64/libhwloc.so - cuda: + cuda:: buildable: False version: [9.2.88, 10.0.130] paths: cuda@10.0.130 arch=linux-rhel7-x86_64: /usr/tce/packages/cuda/cuda-10.0.130 cuda@9.2.88 arch=linux-rhel7-ppc64le: /usr/tce/packages/cuda/cuda-9.2.88/ - cudnn: + cudnn:: buildable: False version: [7.4.2] paths: cudnn@7.4.2 arch=linux-rhel7-x86_64: /usr/workspace/wsb/brain/cudnn/cudnn-7.4.2/cuda-10.0_x86_64 cudnn@7.4.2 arch=linux-rhel7-ppc64le: /usr/workspace/wsb/brain/cudnn/cudnn-7.4.2/cuda-9.2_ppc64le - nccl: + nccl:: buildable: False version: [2.3] paths: nccl@2.3 arch=linux-rhel7-x86_64: /usr/workspace/wsb/brain/nccl2/nccl_2.3.7-1+cuda10.0_x86_64 nccl@2.3 arch=linux-rhel7-ppc64le: /usr/workspace/wsb/brain/nccl2/nccl_2.2.13-1+cuda9.2_ppc64le - spectrum-mpi: + spectrum-mpi:: buildable: False version: [rolling-release] paths: diff --git a/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml b/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml new file mode 100644 index 00000000000..76d4455437e --- /dev/null +++ b/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml @@ -0,0 +1,88 @@ +# This is a Spack Environment file. +# +# It describes a set of packages to be installed, along with +# configuration settings. +spack: + # add package specs to the `specs` list + specs: + - lbann@develop+conduit+docs+gpu+nccl+opencv ^aluminum@master ^hydrogen@develop + mirrors: {} + modules: + enable: [] + repos: [] + config: {} +################################################################################ +# Include paths to standard compilers and packages on LLNL LC systems +# Remove and/or replace these with your site specific packages and paths +################################################################################ +# include: +# - externals_llnl_lc_cz.yaml + packages: + all: + providers: + mpi: [mvapich2@2.3 arch=linux-rhel7-x86_64] + buildable: true + version: [] + paths: {} + modules: {} + compiler: [gcc@7.3.0 arch=linux-rhel7-x86_64] + 'cmake:': + variants: ~openssl ~ncurses + paths: + cmake@3.12.1 arch=linux-rhel7-x86_64: /usr/tce/packages/cmake/cmake-3.12.1 + + buildable: true + version: [] + providers: {} + modules: {} + compiler: [] + 'mvapich2:': + buildable: true + version: [2.3] + paths: + mvapich2@2.3%gcc@7.3.0 arch=linux-rhel7-x86_64: /usr/tce/packages/mvapich2/mvapich2-2.3-gcc-7.3.0/ + + providers: {} + modules: {} + compiler: [] + 'hwloc:': + buildable: false + version: [2.0.2] + paths: + hwloc@2.0.2 arch=linux-rhel7-x86_64: /usr/lib64/libhwloc.so + + providers: {} + modules: {} + compiler: [] + 'cuda:': + buildable: false + version: [10.0.130] + paths: + cuda@10.0.130 arch=linux-rhel7-x86_64: /usr/tce/packages/cuda/cuda-10.0.130 + + providers: {} + modules: {} + compiler: [] + 'cudnn:': + buildable: false + version: [7.4.2] + paths: + cudnn@7.4.2 arch=linux-rhel7-x86_64: /usr/workspace/wsb/brain/cudnn/cudnn-7.4.2/cuda-10.0_x86_64 + + providers: {} + modules: {} + compiler: [] + compilers: + - compiler: + environment: {} + extra_rpaths: [] + flags: {} + modules: [] + operating_system: rhel7 + paths: + cc: /usr/tce/packages/gcc/gcc-7.3.0/bin/gcc + cxx: /usr/tce/packages/gcc/gcc-7.3.0/bin/g++ + f77: /usr/tce/packages/gcc/gcc-7.3.0/bin/gfortran + fc: /usr/tce/packages/gcc/gcc-7.3.0/bin/gfortran + spec: gcc@7.3.0 + target: x86_64 From 7509148828fe2103f45be474a29158fdf42da87e Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 12 Feb 2019 18:31:10 -0800 Subject: [PATCH 035/443] Added the CORAL ppc64le user environment. --- ReleaseNotes.txt | 2 + .../users/llnl_lc/ppc64le_cuda/spack.yaml | 95 +++++++++++++++++++ .../users/llnl_lc/x86_64_cuda/spack.yaml | 7 ++ 3 files changed, 104 insertions(+) create mode 100644 spack_environments/users/llnl_lc/ppc64le_cuda/spack.yaml diff --git a/ReleaseNotes.txt b/ReleaseNotes.txt index fea20150a3f..7152df49fd2 100644 --- a/ReleaseNotes.txt +++ b/ReleaseNotes.txt @@ -14,6 +14,8 @@ Internal features: I/O & data readers: Build system: + - Added documentation for how users can use Spack to install LBANN + either directly or via environments Retired features: diff --git a/spack_environments/users/llnl_lc/ppc64le_cuda/spack.yaml b/spack_environments/users/llnl_lc/ppc64le_cuda/spack.yaml new file mode 100644 index 00000000000..a14908a90cb --- /dev/null +++ b/spack_environments/users/llnl_lc/ppc64le_cuda/spack.yaml @@ -0,0 +1,95 @@ +# This is a Spack Environment file. +# +# It describes a set of packages to be installed, along with +# configuration settings. +spack: + # add package specs to the `specs` list + specs: + - lbann@develop+conduit+docs+gpu+nccl+opencv ^aluminum@master ^hydrogen@develop + mirrors: {} + modules: + enable: [] + repos: [] + config: {} +################################################################################ +# Include paths to standard compilers and packages on LLNL LC systems +# Remove and/or replace these with your site specific packages and paths +################################################################################ +# include: +# - externals_llnl_lc_cz.yaml + packages: + all: + providers: + mpi: [spectrum-mpi@rolling-release arch=linux-rhel7-ppc64le] + buildable: true + version: [] + paths: {} + modules: {} + compiler: [gcc@7.3.1 arch=linux-rhel7-ppc64le] + 'nccl:': + buildable: true + version: [] + providers: {} + paths: {} + modules: {} + compiler: [] + cmake: + variants: ~openssl ~ncurses + paths: + cmake@3.12.1 arch=linux-rhel7-ppc64le: /usr/tce/packages/cmake/cmake-3.12.1 + + buildable: true + version: [] + providers: {} + modules: {} + compiler: [] + hwloc: + buildable: false + version: [2.0.2] + paths: + hwloc@2.0.2 arch=linux-rhel7-ppc64le: /usr/lib64/libhwloc.so + + providers: {} + modules: {} + compiler: [] + cuda: + buildable: false + version: [9.2.88] + paths: + cuda@9.2.88 arch=linux-rhel7-ppc64le: /usr/tce/packages/cuda/cuda-9.2.88/ + + providers: {} + modules: {} + compiler: [] + cudnn: + buildable: false + version: [7.4.2] + paths: + cudnn@7.4.2 arch=linux-rhel7-ppc64le: /usr/workspace/wsb/brain/cudnn/cudnn-7.4.2/cuda-9.2_ppc64le + + providers: {} + modules: {} + compiler: [] + spectrum-mpi: + buildable: false + version: [rolling-release] + paths: + spectrum-mpi@rolling-release %gcc@7.3.1 arch=linux-rhel7-ppc64le: /usr/tce/packages/spectrum-mpi/spectrum-mpi-rolling-release-gcc-7.3.1 + + providers: {} + modules: {} + compiler: [] + compilers: + - compiler: + environment: {} + extra_rpaths: [] + flags: {} + modules: [] + operating_system: rhel7 + paths: + cc: /usr/tce/packages/gcc/gcc-7.3.1/bin/gcc + cxx: /usr/tce/packages/gcc/gcc-7.3.1/bin/g++ + f77: /usr/tce/packages/gcc/gcc-7.3.1/bin/gfortran + fc: /usr/tce/packages/gcc/gcc-7.3.1/bin/gfortran + spec: gcc@7.3.1 + target: ppc64le diff --git a/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml b/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml index 76d4455437e..012c4e233a4 100644 --- a/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml +++ b/spack_environments/users/llnl_lc/x86_64_cuda/spack.yaml @@ -26,6 +26,13 @@ spack: paths: {} modules: {} compiler: [gcc@7.3.0 arch=linux-rhel7-x86_64] + 'nccl:': + buildable: true + version: [] + providers: {} + paths: {} + modules: {} + compiler: [] 'cmake:': variants: ~openssl ~ncurses paths: From 232ba21a7f1d4a55e97e9f073a9a4dd0f945787a Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Wed, 13 Feb 2019 05:34:07 -0800 Subject: [PATCH 036/443] reverting to exchanging data sizes in by_super_node --- src/data_store/data_store_jag.cpp | 100 ++++++++++++------------------ 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 4049da18d75..8eb3be63156 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -96,6 +96,7 @@ void data_store_jag::setup() { } void data_store_jag::setup_data_store_buffers() { + debug << "\nstarting data_store_jag::setup_data_store_buffers\n"; // allocate buffers that are used in exchange_data() m_send_buffer.resize(m_np); m_send_buffer_2.resize(m_np); @@ -119,28 +120,13 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s double tm1 = get_time(); //======================================================================== - //part 1: exchange the sizes of the data - // m_send_buffer[j] is a conduit::Node that contains - // all samples that this proc will send to P_j + //part 1: construct the super_nodes double tma = get_time(); - compute_super_node_overhead(); build_indices_i_will_send(current_pos, mb_size); build_indices_i_will_recv(current_pos, mb_size); - // debug block - debug << "supernode overhead: " << m_super_node_overhead << " compacted sample size: " << m_compacted_sample_size << "\n"; - for (int p=0; p"<< t.name() <<"<\n"; +} +*/ + node_out.reset(); conduit::Schema s_data_compact; if( node_in.is_compact() && node_in.is_contiguous()) { @@ -472,15 +446,16 @@ debug.close(); debug.open(b, std::ios::app); void data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { m_indices_to_recv.clear(); m_indices_to_recv.resize(m_np); - size_t j = 0; + // size_t j = 0; for (int i=current_pos; i< current_pos + mb_size; ++i) { auto index = (*m_shuffled_indices)[i]; // if (m_data.find(index) != m_data.end()) { - if (index % m_np == m_rank) { + if (i % m_np == m_rank) { + //if (index % m_np == m_rank) { int owner = m_owner[index]; m_indices_to_recv[owner].insert(index); } - j = (j + 1) % m_np; +// j = (j + 1) % m_np; } } @@ -494,6 +469,13 @@ void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { /// If this rank owns the index send it to the j'th rank if (m_data.find(index) != m_data.end()) { m_indices_to_send[j].insert(index); + + // Sanity check + if (m_owner[index] != m_rank) { + std::stringstream s; + s << "error for i: "< Date: Wed, 13 Feb 2019 05:43:00 -0800 Subject: [PATCH 037/443] chnged size of m_reconstituted array from m_data.size() to m_np. --- src/data_store/data_store_jag.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 8eb3be63156..a1ecf41ebc9 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -96,7 +96,6 @@ void data_store_jag::setup() { } void data_store_jag::setup_data_store_buffers() { - debug << "\nstarting data_store_jag::setup_data_store_buffers\n"; // allocate buffers that are used in exchange_data() m_send_buffer.resize(m_np); m_send_buffer_2.resize(m_np); @@ -106,15 +105,14 @@ void data_store_jag::setup_data_store_buffers() { m_outgoing_msg_sizes.resize(m_np); m_incoming_msg_sizes.resize(m_np); m_recv_buffer.resize(m_np); - - m_reconstituted.resize(m_data.size()); + m_reconstituted.resize(m_np); } // this gets called at the beginning of each epoch (except for epoch 0) // // Note: conduit has a very nice interface for communicating nodes // in blocking scenarios. Unf, for non-blocking we need to -// handle things ourselves. TODO: possible modify conduit to +// handle things ourselves. TODO: possibly modify conduit to // handle non-blocking comms void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_size) { double tm1 = get_time(); @@ -191,8 +189,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s gen.walk(rcv_schema); n_buff_ptr += n_msg["schema"].total_bytes_compact(); n_msg["data"].set_external(rcv_schema,n_buff_ptr); - //nd.reset(); - //nd.update_external(n_msg["data"]); m_reconstituted[p].reset(); m_reconstituted[p].update_external(n_msg["data"]); const std::vector &names = m_reconstituted[p].child_names(); From 4e246e130a1df8d76e2ff1a58561c2d945c1f0b7 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 13 Feb 2019 10:22:08 -0800 Subject: [PATCH 038/443] Updated and fixed a number of bugs in the per sample data exchange in the data store. Additionally, applied several optimizations from the super_node exchange to minimize data movement. The per-sample exchange now provides correct functionality and is measurably faster than the super_node exchange. --- include/lbann/data_store/data_store_jag.hpp | 14 +- src/data_store/data_store_jag.cpp | 200 ++++++++++---------- 2 files changed, 113 insertions(+), 101 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index 55f8ba2d465..aabccb0feb9 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -83,8 +83,8 @@ protected : void exchange_data_by_sample(size_t current_pos, size_t mb_size); - // when m_super_node = false - std::unordered_map m_index_to_data_id; + /// Contains the list of data IDs that will be received + std::vector m_recv_data_ids; /// contains the Nodes that this processor owns; /// maps data_id to conduit::Node @@ -131,15 +131,17 @@ protected : /// this proc needs to send. (formerly called "proc_to_indices) std::vector> m_indices_to_send; - /// fills in m_indices_to_send - void build_indices_i_will_send(int current_pos, int mb_size); + /// fills in m_indices_to_send and returns the number of samples + /// that will be sent + int build_indices_i_will_send(int current_pos, int mb_size); /// maps processor id -> set of indices (whose associated samples) /// this proc needs to recv from others. (formerly called "needed") std::vector> m_indices_to_recv; - /// fills in m_indices_to_recv - void build_indices_i_will_recv(int current_pos, int mb_size); + /// fills in m_indices_to_recv and returns the number of samples + /// that will be received + int build_indices_i_will_recv(int current_pos, int mb_size); }; diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index a1ecf41ebc9..e116c57d42a 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -45,7 +45,8 @@ data_store_jag::data_store_jag( generic_data_reader *reader, model *m) : generic_data_store(reader, m), m_super_node(false), - m_super_node_overhead(0) { + m_super_node_overhead(0), + m_compacted_sample_size(0) { set_name("data_store_jag"); } @@ -209,10 +210,25 @@ void data_store_jag::set_conduit_node(int data_id, conduit::Node &node) { } if (! m_super_node) { - //@TODO fix, so we don't need to do a deep copy - conduit::Node n2; - build_node_for_sending(node, n2); - m_data[data_id] = n2; + build_node_for_sending(node, m_data[data_id]); + const conduit::Node& n2 = m_data[data_id]; + if(m_compacted_sample_size == 0) { + m_compacted_sample_size = n2.total_bytes_compact(); + }else if(m_compacted_sample_size != n2.total_bytes_compact()) { + LBANN_ERROR("Conduit node being added data_id: " + std::to_string(data_id) + + " is not the same size as existing nodes in the data_store " + + std::to_string(m_compacted_sample_size) + " != " + + std::to_string(n2.total_bytes_compact())); + } + if(!m_data[data_id].is_contiguous()) { + LBANN_ERROR("m_data[" + std::to_string(data_id) + "] does not have a contiguous layout"); + } + if(m_data[data_id].data_ptr() == nullptr) { + LBANN_ERROR("m_data[" + std::to_string(data_id) + "] does not have a valid data pointer"); + } + if(m_data[data_id].contiguous_data_ptr() == nullptr) { + LBANN_ERROR("m_data[" + std::to_string(data_id) + "] does not have a valid contiguous data pointer"); + } } else { @@ -227,7 +243,11 @@ void data_store_jag::set_conduit_node(int data_id, conduit::Node &node) { const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { std::unordered_map::const_iterator t = m_data.find(data_id); if (t != m_data.end()) { - return t->second; + if(m_super_node) { + return t->second; + }else { + return t->second["data"]; + } } std::unordered_map::const_iterator t2 = m_minibatch_data.find(data_id); @@ -280,109 +300,99 @@ while (t.has_next()) { node_out.set(s_msg_compact); node_out["schema"].set(snd_schema_json); node_out["data"].update(node_in); + + if(!node_out.is_contiguous()) { + LBANN_ERROR("node_out does not have a contiguous layout"); + } + if(node_out.data_ptr() == nullptr) { + LBANN_ERROR("node_out does not have a valid data pointer"); + } + if(node_out.contiguous_data_ptr() == nullptr) { + LBANN_ERROR("node_out does not have a valid contiguous data pointer"); + } } void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) { -#if 0 - double tm1 = get_time(); - - debug.open(b, std::ios::app); - debug << "\n============================================================\n" - <<"starting exchange_data_by_sample; epoch: "<get_cur_epoch()<< " data size: "<get_cur_epoch()<< " data size: "<get_cur_epoch()<< " data size: "< &indices = indices_i_will_send[p]; + const std::unordered_set &indices = m_indices_to_send[p]; + // std::cout << "I am going to be sending data for p " << p << std::endl; for (auto index : indices) { if (m_data.find(index) == m_data.end()) { LBANN_ERROR("failed to find data_id: " + std::to_string(index) + " to be sent to " + std::to_string(p) + " in m_data"); } - - //const void *s = m_send_buffer[ss].data_ptr(); - const void *s = m_data[index].data_ptr(); - MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_send_requests[ss++]); - //MPI_Isend(s, m_outgoing_msg_sizes[p], MPI_BYTE, p, 1, MPI_COMM_WORLD, &m_send_requests[p]); + const conduit::Node& n = m_data[index]; + const void *s = n.data_ptr(); + if(!n.is_contiguous()) { + LBANN_ERROR("data_id: " + std::to_string(index) + " does not have a contiguous layout"); + } + if(n.data_ptr() == nullptr) { + LBANN_ERROR("data_id: " + std::to_string(index) + " does not have a valid data pointer"); + } + if(n.contiguous_data_ptr() == nullptr) { + LBANN_ERROR("data_id: " + std::to_string(index) + " does not have a valid contiguous data pointer"); + } + // MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, comm->get_world_comm(), &m_send_requests[ss++]); + MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_send_requests.at(ss++)); } } - LBANN_ERROR("Stopping"); // sanity checks if (ss != m_send_requests.size()) { - LBANN_ERROR("ss != m_send_requests.size; ss: " + std::to_string(ss) + " m_send_requests`.size: " + std::to_string(m_send_requests.size())); + LBANN_ERROR("ss != m_send_requests.size; ss: " + std::to_string(ss) + " m_send_requests.size: " + std::to_string(m_send_requests.size())); } MPI_Barrier(MPI_COMM_WORLD); - if (m_master) std::cerr << "\nSENDS STARTED\n\n"; - debug << "\nSENDS STARTED\n\n"; - MPI_Barrier(MPI_COMM_WORLD); + // if (m_master) std::cerr << "\nSENDS STARTED\n\n"; + // debug << "\nSENDS STARTED\n\n"; + // MPI_Barrier(MPI_COMM_WORLD); // start recvs for incoming data ss = 0; for (int p=0; p &indices = needed[p]; -debug << "starting " << indices.size() << " recvs from " << p << "\n"; + const std::unordered_set &indices = m_indices_to_recv[p]; +// debug << "starting " << indices.size() << " recvs from " << p << "\n"; for (auto index : indices) { - m_recv_buffer[ss].set(conduit::DataType::uint8(sample_size)); - MPI_Irecv(m_recv_buffer[ss].data_ptr(), m_compacted_sample_size,, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_recv_requests[ss]); - m_index_to_data_id[index] = ss; + m_recv_buffer[ss].set(conduit::DataType::uint8(m_compacted_sample_size)); + MPI_Irecv(m_recv_buffer[ss].data_ptr(), m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_recv_requests[ss]); + m_recv_data_ids[ss] = index; ++ss; } } + // if(m_master) std::cout << "\nRECV COMPLETE\n\n"; // sanity checks if (ss != m_recv_buffer.size()) { LBANN_ERROR("ss != m_recv_buffer.size; ss: " + std::to_string(ss) + " m_recv_buffer.size: " + std::to_string(m_recv_buffer.size())); @@ -393,18 +403,19 @@ debug << "starting " << indices.size() << " recvs from " << p << "\n"; // wait for all msgs to complete MPI_Waitall(m_send_requests.size(), m_send_requests.data(), m_status.data()); + m_status.clear(); MPI_Waitall(m_recv_requests.size(), m_recv_requests.data(), m_status.data()); -debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; -debug.close(); -debug.open(b, std::ios::app); +// debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; +// debug.close(); +// debug.open(b, std::ios::app); -tma = get_time(); +// tma = get_time(); //======================================================================== //part 3: construct the Nodes needed by me for the current minibatch -double tmw = get_time(); +// double tmw = get_time(); conduit::Node nd; m_minibatch_data.clear(); @@ -420,46 +431,43 @@ double tmw = get_time(); n_buff_ptr += n_msg["schema"].total_bytes_compact(); n_msg["data"].set_external(rcv_schema,n_buff_ptr); - // this is inefficent @TODO - nd.reset(); - nd.update(n_msg["data"]); - m_minibatch_data[nd["id"].value()] = nd; + int data_id = m_recv_data_ids[j]; + // m_minibatch_data[data_id].set(n_msg["data"]); + m_minibatch_data[data_id].set_external(n_msg["data"]); } -for (auto &t : m_minibatch_data) { - debug << t.first << " "; -} -debug << "\n"; +// for (auto &t : m_minibatch_data) { +// debug << t.first << " "; +// } +// debug << "\n"; -debug << "TOTAL Time to unpack incoming data: " << get_time() - tmw << "\n"; +// debug << "TOTAL Time to unpack incoming data: " << get_time() - tmw << "\n"; - if (m_master) std::cout << "data_store_jag::exchange_data Time: " << get_time() - tm1 << "\n"; +// if (m_master) std::cout << "data_store_jag::exchange_data Time: " << get_time() - tm1 << "\n"; - debug << "TOTAL exchange_data Time: " << get_time() - tm1 << "\n"; -debug.close(); debug.open(b, std::ios::app); -#endif +// debug << "TOTAL exchange_data Time: " << get_time() - tm1 << "\n"; +// debug.close(); debug.open(b, std::ios::app); } -void data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { +int data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { m_indices_to_recv.clear(); m_indices_to_recv.resize(m_np); - // size_t j = 0; + int k = 0; for (int i=current_pos; i< current_pos + mb_size; ++i) { auto index = (*m_shuffled_indices)[i]; - // if (m_data.find(index) != m_data.end()) { if (i % m_np == m_rank) { - //if (index % m_np == m_rank) { int owner = m_owner[index]; m_indices_to_recv[owner].insert(index); + k++; } -// j = (j + 1) % m_np; } - + return k; } -void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { +int data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { m_indices_to_send.clear(); m_indices_to_send.resize(m_np); size_t j = 0; + int k = 0; for (int i = current_pos; i < current_pos + mb_size; i++) { auto index = (*m_shuffled_indices)[i]; /// If this rank owns the index send it to the j'th rank @@ -472,9 +480,11 @@ void data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { s << "error for i: "< Date: Wed, 13 Feb 2019 10:45:23 -0800 Subject: [PATCH 039/443] Changed the padding of the sample index to use a macro. Removed an unnecessary barrier from the per-sample data exchange. --- src/data_readers/data_reader_jag_conduit.cpp | 10 ++++++---- src/data_store/data_store_jag.cpp | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index 17b1d645865..eb04fb41899 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -55,6 +55,8 @@ #include #include +#define SAMPLE_ID_PAD 7 + // This macro may be moved to a global scope #define _THROW_LBANN_EXCEPTION_(_CLASS_NAME_,_MSG_) { \ std::stringstream _err; \ @@ -1084,7 +1086,7 @@ data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& s for (const auto& emi_tag : m_emi_image_keys) { const std::string conduit_field = m_output_image_prefix + emi_tag; - const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1207,7 +1209,7 @@ std::vector data_reader_jag_conduit::get_scal for(const auto key: m_scalar_keys) { std::string conduit_field = m_output_scalar_prefix + key; - std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; + std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1238,7 +1240,7 @@ std::vector data_reader_jag_conduit::get_input // avoid some overhead by taking advantage of the fact that all the variables are of the same type for(const auto key: m_input_keys) { const std::string conduit_field = m_input_prefix + key; - const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj @@ -1256,7 +1258,7 @@ std::vector data_reader_jag_conduit::get_input } else { for(const auto key: m_input_keys) { const std::string conduit_field = m_input_prefix + key; - const std::string conduit_obj = '/' + pad(std::to_string(sample_id), 5, '0') + '/' + conduit_field; + const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field; if(sample[conduit_obj].schema().dtype().is_empty()) { if (data_store_active()) { LBANN_ERROR("Unable to find field " + conduit_obj diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index e116c57d42a..6aef21915d5 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -373,7 +373,7 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) LBANN_ERROR("ss != m_send_requests.size; ss: " + std::to_string(ss) + " m_send_requests.size: " + std::to_string(m_send_requests.size())); } - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); // if (m_master) std::cerr << "\nSENDS STARTED\n\n"; // debug << "\nSENDS STARTED\n\n"; // MPI_Barrier(MPI_COMM_WORLD); From 1c9b9518941852517c6b36c0ff86862b21ec2e88 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 12:53:14 -0800 Subject: [PATCH 040/443] Replacing AbsMat::operator*= with El::Scale. This was causing strange problems in the image preprocessor, although I'm not sure why. --- include/lbann/layers/regularizers/dropout.hpp | 2 +- src/data_readers/image_preprocessor.cpp | 8 ++++---- src/utils/statistics.cpp | 4 ++-- src/weights/weights.cpp | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/lbann/layers/regularizers/dropout.hpp b/include/lbann/layers/regularizers/dropout.hpp index 82a7d310791..3fa652b07da 100644 --- a/include/lbann/layers/regularizers/dropout.hpp +++ b/include/lbann/layers/regularizers/dropout.hpp @@ -184,7 +184,7 @@ class dropout : public regularizer_layer { m_mask->Resize(height, width); #ifdef LBANN_DETERMINISTIC bernoulli_fill_procdet(*m_mask, height, width, DataType(m_keep_prob)); - *m_mask *= scale; + El::Scale(scale, *m_mask); #else El::EntrywiseMap(*m_mask, (std::function) diff --git a/src/data_readers/image_preprocessor.cpp b/src/data_readers/image_preprocessor.cpp index 38a825d879b..8069f36b8da 100644 --- a/src/data_readers/image_preprocessor.cpp +++ b/src/data_readers/image_preprocessor.cpp @@ -192,22 +192,22 @@ void lbann_image_preprocessor::unit_scale(Mat& pixels, unsigned num_channels) { // Pixels are in range [0, 255], normalize using that. // Channels are not relevant here. - pixels *= DataType(1) / 255; + El::Scale(DataType(1) / 255, pixels); } -void lbann_image_preprocessor::pixel_noise(Mat& pixels) +void lbann_image_preprocessor::pixel_noise(Mat& pixels) { if(m_noise_factor){ Mat X_noise; El::Gaussian(X_noise, pixels.Height(), pixels.Width(), DataType(0), DataType(1)); El::Axpy(m_noise_factor,X_noise,pixels); //@todo - clip to min and max of input entry - auto clip = [](const DataType& z) { + auto clip = [](const DataType& z) { return std::max(DataType(0), std::min(z,DataType(1))); }; EntrywiseMap(pixels, El::MakeFunction(clip)); - } + } } void lbann_image_preprocessor::z_score(Mat& pixels, diff --git a/src/utils/statistics.cpp b/src/utils/statistics.cpp index 72135e14a79..6a0de434834 100644 --- a/src/utils/statistics.cpp +++ b/src/utils/statistics.cpp @@ -412,8 +412,8 @@ void columnwise_covariance(const AbsDistMat& data1, } local_covs(0, col) = sum; } - AllReduce(covs, covs.RedundantComm(), El::mpi::SUM); - local_covs *= DataType(1) / height; + El::AllReduce(covs, covs.RedundantComm(), El::mpi::SUM); + El::Scale(DataType(1) / height, local_covs); } diff --git a/src/weights/weights.cpp b/src/weights/weights.cpp index 6b4c2d7557c..171c92fcaec 100644 --- a/src/weights/weights.cpp +++ b/src/weights/weights.cpp @@ -397,7 +397,7 @@ void weights::set_value(DataType value, int row, int col) { void weights::reconcile_values() { auto& values = get_values(); if (values.RedundantSize() > 1) { - values *= DataType(1) / values.RedundantSize(); + El::Scale(DataType(1) / values.RedundantSize(), values); m_comm->allreduce(values, values.RedundantComm()); } } @@ -405,7 +405,7 @@ void weights::reconcile_values() { void weights::reconcile_values(Al::request& req) { auto& values = get_values(); if (values.RedundantSize() > 1) { - values *= DataType(1) / values.RedundantSize(); + El::Scale(DataType(1) / values.RedundantSize(), values); m_comm->nb_allreduce(values, values.RedundantComm(), req); } } From 290f0543077fbf3da75ca5dc6c9ccb1240364aea Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 13:15:43 -0800 Subject: [PATCH 041/443] Partitioned data reader is now the default. All model zoo prototexts have been updated to use the default. --- .../models/alexnet/model_alexnet.prototext | 4 +- .../model_autoencoder_chem_ecfp.prototext | 1 - ...er_chem_ecfp_200x150x100x100x100.prototext | 1 - ...utoencoder_chem_ecfp_500x250x100.prototext | 4 +- .../model_autoencoder_chem_sigmoid.prototext | 1 - .../model_dnn_chem_ecfp.prototext | 4 +- .../model_autoencoder_cifar10.prototext | 4 +- .../model_conv_autoencoder_cifar10.prototext | 4 +- .../model_conv_autoencoder_imagenet.prototext | 4 +- .../model_autoencoder_mnist.prototext | 4 +- .../model_conv_autoencoder_mnist.prototext | 4 +- .../autoencoder_mnist/vae_mnist.prototext | 4 +- .../candle/pilot1/ae_nodeselect_gdc.prototext | 1 - .../models/candle/pilot1/combo.prototext | 1 - .../cosmoflow/model_cosmoflow.prototext | 1 - .../gan/jags/cycle_gan/cycgan_m1.prototext | 4 +- .../gan/jags/cycle_gan/cycgan_m2.prototext | 4 +- .../gan/jags/cycle_gan/cycgan_m3.prototext | 4 +- .../gan/jags/cycle_gan/generate_cycgan_m1.py | 30 ++++++------- .../gan/jags/cycle_gan/generate_cycgan_m2.py | 22 +++++----- .../gan/jags/cycle_gan/generate_cycgan_m3.py | 22 +++++----- .../gan/mnist/adversarial_model.prototext | 4 +- .../gan/mnist/discriminator_model.prototext | 4 +- .../jag/ae_cycle_gan/3models/ae.prototext | 2 - .../jag/ae_cycle_gan/3models/ae_cyc.prototext | 1 - .../ae_cycle_gan/3models/ae_cyc2.prototext | 1 - .../ae_cycle_gan/3models/cycle_gan.prototext | 1 - .../jag/ae_cycle_gan/cycgan_m1.prototext | 1 - .../jag/ae_cycle_gan/cycgan_m2.prototext | 1 - .../jag/ae_cycle_gan/cycgan_m3.prototext | 1 - .../models/jag/ae_cycle_gan/vae1.prototext | 2 - .../models/jag/ae_cycle_gan/vae_cyc.prototext | 1 - .../models/jag/cycle_gan/cycgan_m1.prototext | 1 - .../models/jag/cycle_gan/cycgan_m2.prototext | 1 - .../models/jag/cycle_gan/cycgan_m3.prototext | 1 - .../jag/cycle_gan/generate_cycgan_m1.py | 32 +++++++------- .../jag/cycle_gan/generate_cycgan_m2.py | 22 +++++----- .../jag/cycle_gan/generate_cycgan_m3.py | 22 +++++----- .../jag/gan/cyclic/cyclic_gan_model.prototext | 1 - .../models/jag/gan/cyclic/generate_model.py | 44 +++++++++---------- .../models/jag/gan/vanilla/gan.prototext | 1 - .../models/jag/gan/vanilla/generate_gan.py | 26 +++++------ model_zoo/models/jag/vae_fcn.prototext | 1 - model_zoo/models/jag/wae.prototext | 1 - .../lenet_mnist/model_lenet_mnist.prototext | 4 +- ...onv_molecular_autoencoder_pilot2.prototext | 4 +- ...olecular_bead_autoencoder_pilot2.prototext | 4 +- ...del_molecular_autoencoder_pilot2.prototext | 4 +- model_zoo/models/python/keras/kerbann.py | 2 - .../models/python/keras/mnist_cnn.prototext | 4 +- .../models/resnet50/model_resnet50.prototext | 4 +- .../siamese/finetune-cub/model_cub.prototext | 1 - .../model_cub_batchnorm.prototext | 4 +- ...batchnorm_transferred_and_frozen.prototext | 4 +- ..._alexnet_batchnorm_dag_frozen_bn.prototext | 4 +- .../model_mnist_simple_1.prototext | 4 +- .../model_mnist_simple_2.prototext | 4 +- model_zoo/models/vram/generate_dram.py | 17 ++++--- .../model_channelwise_mean.prototext | 4 +- .../tests/layer_tests/model_clamp.prototext | 4 +- .../layer_tests/model_covariance.prototext | 4 +- .../tests/layer_tests/model_elu.prototext | 4 +- .../layer_tests/model_identity.prototext | 4 +- .../tests/layer_tests/model_l1_norm.prototext | 4 +- .../layer_tests/model_l2_norm2.prototext | 4 +- .../layer_tests/model_leaky_relu.prototext | 4 +- .../layer_tests/model_log_sigmoid.prototext | 4 +- .../layer_tests/model_log_softmax.prototext | 4 +- .../model_mean_absolute_error.prototext | 4 +- .../tests/layer_tests/model_relu.prototext | 4 +- .../tests/layer_tests/model_selu.prototext | 4 +- .../tests/layer_tests/model_sigmoid.prototext | 4 +- .../tests/layer_tests/model_softmax.prototext | 4 +- .../layer_tests/model_softplus.prototext | 4 +- .../layer_tests/model_softsign.prototext | 4 +- .../model_squared_difference.prototext | 4 +- .../layer_tests/model_tessellate.prototext | 4 +- .../layer_tests/model_variance.prototext | 4 +- .../tests/model_lenet_mnist_ckpt.prototext | 4 +- .../model_lenet_mnist_dist_ckpt.prototext | 4 +- .../model_lenet_mnist_lbann2ckpt.prototext | 4 +- .../tests/model_mnist_conv_graph.prototext | 4 +- .../model_mnist_distributed_io.prototext | 4 +- .../model_mnist_partitioned_io.prototext | 4 +- .../model_mnist_ridge_regression.prototext | 2 +- .../model_mnist_softmax_classifier.prototext | 2 +- src/proto/factories/layer_factory.cpp | 4 +- src/proto/lbann.proto | 6 +-- 88 files changed, 169 insertions(+), 313 deletions(-) diff --git a/model_zoo/models/alexnet/model_alexnet.prototext b/model_zoo/models/alexnet/model_alexnet.prototext index b3cd4dc1a23..58e8edfb22d 100644 --- a/model_zoo/models/alexnet/model_alexnet.prototext +++ b/model_zoo/models/alexnet/model_alexnet.prototext @@ -65,9 +65,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { name: "image" diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext index 4faacb06bc4..3bb56aef475 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext @@ -53,7 +53,6 @@ model { children: "data dummy" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "reconstruction" } } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext index d81eb16d276..b82cc9e95ef 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext @@ -53,7 +53,6 @@ model { children: "data dummy" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "reconstruction" } } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext index 3917087b639..5bbede7797f 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext @@ -52,9 +52,7 @@ model { name: "input" children: "data dummy" data_layout: "model_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext index 449d499471e..222f638a62a 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext @@ -49,7 +49,6 @@ model { children: "data dummy" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "reconstruction" } } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext index e617cc79afc..e302f1f2b39 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext @@ -56,9 +56,7 @@ model { name: "data" children: "finetunedata label" data_layout: "model_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext b/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext index c6fd5fcd20b..4afc128da9d 100644 --- a/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext +++ b/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext @@ -45,9 +45,7 @@ model { name: "data" children: "image dummy" data_layout: "model_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext b/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext index 0b5abd7f425..c716bdb9752 100644 --- a/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext +++ b/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext @@ -54,9 +54,7 @@ model { name: "data" children: "image dummy" data_layout: "data_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_imagenet/model_conv_autoencoder_imagenet.prototext b/model_zoo/models/autoencoder_imagenet/model_conv_autoencoder_imagenet.prototext index 1ac12c65832..97d0ee18f3f 100644 --- a/model_zoo/models/autoencoder_imagenet/model_conv_autoencoder_imagenet.prototext +++ b/model_zoo/models/autoencoder_imagenet/model_conv_autoencoder_imagenet.prototext @@ -47,9 +47,7 @@ model { name: "data" children: "image dummy" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext b/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext index b55d4eba3c0..dc908c6cee3 100644 --- a/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext +++ b/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext @@ -34,9 +34,7 @@ model { name: "data" children: "image dummy" data_layout: "model_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext b/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext index 52e667c096e..fb75fc165ec 100644 --- a/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext +++ b/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext @@ -47,9 +47,7 @@ model { name: "data" children: "image dummy" data_layout: "data_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/autoencoder_mnist/vae_mnist.prototext b/model_zoo/models/autoencoder_mnist/vae_mnist.prototext index 7cbf5e52e41..bcba455a50b 100644 --- a/model_zoo/models/autoencoder_mnist/vae_mnist.prototext +++ b/model_zoo/models/autoencoder_mnist/vae_mnist.prototext @@ -51,9 +51,7 @@ model { name: "data" children: "image dummy" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext b/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext index 9de1859dc9f..5de896c9931 100644 --- a/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext +++ b/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext @@ -50,7 +50,6 @@ model { #children: "encode1 recon_data" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "N/A" } } diff --git a/model_zoo/models/candle/pilot1/combo.prototext b/model_zoo/models/candle/pilot1/combo.prototext index 0cbeb722d05..91324eb0aa6 100644 --- a/model_zoo/models/candle/pilot1/combo.prototext +++ b/model_zoo/models/candle/pilot1/combo.prototext @@ -53,7 +53,6 @@ model { children: "data response" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "regression" } } diff --git a/model_zoo/models/cosmoflow/model_cosmoflow.prototext b/model_zoo/models/cosmoflow/model_cosmoflow.prototext index 4c1e04dbf3c..f4b6829a637 100644 --- a/model_zoo/models/cosmoflow/model_cosmoflow.prototext +++ b/model_zoo/models/cosmoflow/model_cosmoflow.prototext @@ -24,7 +24,6 @@ model { children: "DARK_MATTER SECRETS_OF_THE_UNIVERSE" data_layout: "data_parallel" input { - io_buffer: "partitioned" target_mode: "regression" } } diff --git a/model_zoo/models/gan/jags/cycle_gan/cycgan_m1.prototext b/model_zoo/models/gan/jags/cycle_gan/cycgan_m1.prototext index 9739bb4db50..26e83f01d6f 100644 --- a/model_zoo/models/gan/jags/cycle_gan/cycgan_m1.prototext +++ b/model_zoo/models/gan/jags/cycle_gan/cycgan_m1.prototext @@ -23,9 +23,7 @@ model { num_epochs: 1 data_layout: "data_parallel" layer { - input { - io_buffer: "partitioned" - } + input {} name: "data" data_layout: "data_parallel" parents: " " diff --git a/model_zoo/models/gan/jags/cycle_gan/cycgan_m2.prototext b/model_zoo/models/gan/jags/cycle_gan/cycgan_m2.prototext index ebe4ff5ff3d..e188b803517 100644 --- a/model_zoo/models/gan/jags/cycle_gan/cycgan_m2.prototext +++ b/model_zoo/models/gan/jags/cycle_gan/cycgan_m2.prototext @@ -23,9 +23,7 @@ model { num_epochs: 1 data_layout: "data_parallel" layer { - input { - io_buffer: "partitioned" - } + input {} name: "data" data_layout: "data_parallel" parents: " " diff --git a/model_zoo/models/gan/jags/cycle_gan/cycgan_m3.prototext b/model_zoo/models/gan/jags/cycle_gan/cycgan_m3.prototext index a354a2d02ef..ee04a006ed8 100644 --- a/model_zoo/models/gan/jags/cycle_gan/cycgan_m3.prototext +++ b/model_zoo/models/gan/jags/cycle_gan/cycgan_m3.prototext @@ -23,9 +23,7 @@ model { num_epochs: 1 data_layout: "data_parallel" layer { - input { - io_buffer: "partitioned" - } + input {} name: "data" data_layout: "data_parallel" parents: " " diff --git a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m1.py b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m1.py index d03c92ebd04..82cc659dcfe 100644 --- a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m1.py +++ b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m1.py @@ -62,7 +62,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -71,7 +71,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -81,13 +81,13 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): - #different weights + #different weights fc1 = prefix+'fc1'+tag fc2 = prefix+'fc2'+tag fc3 = prefix+'fc3'+tag @@ -114,7 +114,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc2 + 'linearity', 'he_normal_initializer') l.weights = fc2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -128,7 +128,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc3 + 'linearity', 'he_normal_initializer') l.weights = fc3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -147,8 +147,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -167,7 +166,7 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #D_Loss1 branch #Fake path #freeze generator = True @@ -175,12 +174,12 @@ def configure_model(model): g_sample = add_generator(model, 'param_data_id','gen1', 2500, True,True) #g_sample2= generator(y) g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, True,False) - + #True path (share weights with fake path discriminator) #discriminator(y,x) #data = y + x D_real = add_discriminator(model, 'data','disc1',False, True, '_real') - #CONCAT + #CONCAT # Gsample + x # l = new_layer(model, 'concat_gsample_n_param','','concatenation') @@ -188,15 +187,15 @@ def configure_model(model): #discriminator false path #discriminator(g_sample,x) D_fake = add_discriminator(model,'concat_gsample_n_param','disc1',False, False, '_fake') - + #D_loss2 branch #Reconcatenate x+y l = new_layer(model, 'concat_param_n_img','param_data_id image_data_dummy','concatenation') - + #D_real2 = discriminator2(x,y) D_real2 = add_discriminator(model,'concat_param_n_img','disc2',False, True, '_real') - + #D_fake2 = discriminator2(G_sample2,y) l = new_layer(model, 'concat_gsample2_n_img',g_sample2+ ' image_data_dummy','concatenation') D_fake2 = add_discriminator(model,'concat_gsample2_n_img','disc2', False, False, '_fake') @@ -207,7 +206,7 @@ def configure_model(model): l = new_layer(model, 'disc1_fake_bce', [D_fake, zero.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'disc1_fake_eval','disc1_fake_bce', 'evaluation') - + l = new_layer(model, 'disc2_real_bce', [D_real2, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'disc2_real_eval','disc2_real_bce', 'evaluation') @@ -248,4 +247,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m2.py b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m2.py index 6c28fe41b41..b0298098194 100644 --- a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m2.py +++ b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m2.py @@ -61,7 +61,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -70,7 +70,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -80,12 +80,12 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, add_weight=True, tag=''): - + w1 = prefix+'fc1' w2 = prefix+'fc2' w3 = prefix+'fc3' @@ -119,7 +119,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight): w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -134,7 +134,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -154,8 +154,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -166,13 +165,13 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #******************************************** #g_sample=generator(x) #do not freeze, train generator to confuse discriminator #_1 => first generator1 to be added, to solve problem of all generator1 having the same name g_sample = add_generator(model, 'param_data_id','gen1', 2500, False,True,True,'_1') - # g_adv1 = discriminator(g_sample,x) + # g_adv1 = discriminator(g_sample,x) l = new_layer(model, 'concat_gsample_n_param',g_sample+' param_data_id','concatenation') #freeze discriminator, fake it as real D_real = add_discriminator(model,'concat_gsample_n_param','disc1',True, True, '_real') @@ -182,7 +181,7 @@ def configure_model(model): one.constant.num_neurons = '1' l = new_layer(model, 'g_adv1_bce', [D_real, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'g_adv1_eval','g_adv1_bce', 'evaluation') - + #************************************************ #g_sample2= generator2(y) //freeze g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, True,False,True,'_y') @@ -245,4 +244,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m3.py b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m3.py index 361e939edf8..73cd7f9c134 100644 --- a/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m3.py +++ b/model_zoo/models/gan/jags/cycle_gan/generate_cycgan_m3.py @@ -60,7 +60,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -69,7 +69,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -79,12 +79,12 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, add_weight=True, tag=''): - + w1 = prefix+'fc1' w2 = prefix+'fc2' w3 = prefix+'fc3' @@ -118,7 +118,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight): w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -133,7 +133,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -153,8 +153,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -165,13 +164,13 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #******************************************** #g_sample2=generator2(y) #do not freeze, train generator to confuse discriminator #_1 => first generator1 to be added, to solve problem of all generator2 having the same name g_sample2 = add_generator(model, 'image_data_dummy','gen2', 11, False,False,True,'_1') - # g_adv21 = discriminator2(g_sample2,y) + # g_adv21 = discriminator2(g_sample2,y) l = new_layer(model, 'concat_gsample2_n_img',g_sample2+' image_data_dummy','concatenation') #freeze discriminator, fake it as real D_real = add_discriminator(model,'concat_gsample2_n_img','disc2',True, True, '_real') @@ -181,7 +180,7 @@ def configure_model(model): one.constant.num_neurons = '1' l = new_layer(model, 'g_adv2_bce', [D_real, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'g_adv2_eval','g_adv2_bce', 'evaluation') - + #************************************************ #g_sample2= generator2(y) //train g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, False,False,False,'_y') @@ -246,4 +245,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/gan/mnist/adversarial_model.prototext b/model_zoo/models/gan/mnist/adversarial_model.prototext index 17b9ea9040b..c95dba69ad3 100644 --- a/model_zoo/models/gan/mnist/adversarial_model.prototext +++ b/model_zoo/models/gan/mnist/adversarial_model.prototext @@ -68,9 +68,7 @@ model { name: "input" children: "input label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/gan/mnist/discriminator_model.prototext b/model_zoo/models/gan/mnist/discriminator_model.prototext index dc19a6c8594..98e17c9b396 100644 --- a/model_zoo/models/gan/mnist/discriminator_model.prototext +++ b/model_zoo/models/gan/mnist/discriminator_model.prototext @@ -61,9 +61,7 @@ model { name: "input" children: "input label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext index 6e20a7f4e47..dff2f6b5185 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext @@ -72,14 +72,12 @@ model { # children: "encode1 reconstruction" # data_layout: "data_parallel" # input { - # io_buffer: "distributed" # target_mode: "reconstruction" # } #} layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext index 793354e53bb..16008c2b4f5 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext @@ -58,7 +58,6 @@ model { #Layer from cycle GAN layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext index f3328566bfb..e7ae8d72727 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext @@ -54,7 +54,6 @@ model { #Layer from cycle GAN layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext index 1f6856a9437..bd3d05794d0 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext @@ -50,7 +50,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" data_set_per_model: true target_mode: "N/A" } diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext index 31ad29381dc..9e3195a1c65 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext @@ -28,7 +28,6 @@ model { #### Data space layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext index 04a671b9d7c..e6b73ba7cdc 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext @@ -19,7 +19,6 @@ model { data_layout: "model_parallel" layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext index ae28be52944..eaf041c41bd 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext @@ -19,7 +19,6 @@ model { data_layout: "model_parallel" layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/ae_cycle_gan/vae1.prototext b/model_zoo/models/jag/ae_cycle_gan/vae1.prototext index 3fe497acbaf..8e790c22092 100644 --- a/model_zoo/models/jag/ae_cycle_gan/vae1.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/vae1.prototext @@ -57,14 +57,12 @@ model { # children: "encode1 reconstruction" # data_layout: "model_parallel" # input { - # io_buffer: "distributed" # target_mode: "reconstruction" # } #} layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "input" diff --git a/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext b/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext index c377313ac69..7f271d3a4a6 100644 --- a/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext @@ -61,7 +61,6 @@ model { #Layer from cycle GAN layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "input" diff --git a/model_zoo/models/jag/cycle_gan/cycgan_m1.prototext b/model_zoo/models/jag/cycle_gan/cycgan_m1.prototext index 7c0fb4c7ef4..574fa83ed20 100644 --- a/model_zoo/models/jag/cycle_gan/cycgan_m1.prototext +++ b/model_zoo/models/jag/cycle_gan/cycgan_m1.prototext @@ -24,7 +24,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/cycle_gan/cycgan_m2.prototext b/model_zoo/models/jag/cycle_gan/cycgan_m2.prototext index 5af382ab027..6fd5b2caa07 100644 --- a/model_zoo/models/jag/cycle_gan/cycgan_m2.prototext +++ b/model_zoo/models/jag/cycle_gan/cycgan_m2.prototext @@ -16,7 +16,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/cycle_gan/cycgan_m3.prototext b/model_zoo/models/jag/cycle_gan/cycgan_m3.prototext index b8e482078f1..6917f1767a1 100644 --- a/model_zoo/models/jag/cycle_gan/cycgan_m3.prototext +++ b/model_zoo/models/jag/cycle_gan/cycgan_m3.prototext @@ -16,7 +16,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" target_mode: "N/A" } name: "data" diff --git a/model_zoo/models/jag/cycle_gan/generate_cycgan_m1.py b/model_zoo/models/jag/cycle_gan/generate_cycgan_m1.py index 2abc985a995..c089a2a49ca 100644 --- a/model_zoo/models/jag/cycle_gan/generate_cycgan_m1.py +++ b/model_zoo/models/jag/cycle_gan/generate_cycgan_m1.py @@ -62,7 +62,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -71,7 +71,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -81,14 +81,14 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing #todo, handle weight sharing def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): - #different weights + #different weights fc1 = prefix+'fc1'+tag fc2 = prefix+'fc2'+tag fc3 = prefix+'fc3'+tag @@ -115,7 +115,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc2 + 'linearity', 'he_normal_initializer') l.weights = fc2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -129,7 +129,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc3 + 'linearity', 'he_normal_initializer') l.weights = fc3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -148,8 +148,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -162,13 +161,13 @@ def configure_model(model): one = new_layer(model,'one','','constant') one.constant.value = 1.0 one.constant.num_neurons = '1' - + #ID Image (Y) data l = new_layer(model,'image_data_dummy','slice_data','identity') #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #D_Loss1 branch #Fake path #def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): @@ -177,12 +176,12 @@ def configure_model(model): g_sample = add_generator(model, 'param_data_id','gen1', 2500, True,True) #g_sample2= generator(y) g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, True,False) - + #True path (share weights with fake path discriminator) #discriminator(y,x) #data = y + x D_real = add_discriminator(model, 'data','disc1',False, True, '_real') - #CONCAT + #CONCAT # Gsample + x # l = new_layer(model, 'concat_gsample_n_param','','concatenation') @@ -191,16 +190,16 @@ def configure_model(model): #question: how to deal with weight sharing? #discriminator(g_sample,x) D_fake = add_discriminator(model,'concat_gsample_n_param','disc1',False, False, '_fake') - + #obectives here (D_real, D_fake) #D_loss2 branch #Reconcatenate x+y l = new_layer(model, 'concat_param_n_img','param_data_id image_data_dummy','concatenation') - + #D_real2 = discriminator2(x,y) D_real2 = add_discriminator(model,'concat_param_n_img','disc2',False, True, '_real') - + #D_fake2 = discriminator2(G_sample2,y) l = new_layer(model, 'concat_gsample2_n_img',g_sample2+ ' image_data_dummy','concatenation') D_fake2 = add_discriminator(model,'concat_gsample2_n_img','disc2', False, False, '_fake') @@ -211,7 +210,7 @@ def configure_model(model): l = new_layer(model, 'disc1_fake_bce', [D_fake, zero.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'disc1_fake_eval','disc1_fake_bce', 'evaluation') - + l = new_layer(model, 'disc2_real_bce', [D_real2, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'disc2_real_eval','disc2_real_bce', 'evaluation') @@ -252,4 +251,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/jag/cycle_gan/generate_cycgan_m2.py b/model_zoo/models/jag/cycle_gan/generate_cycgan_m2.py index 80447d1329d..de8b704f877 100644 --- a/model_zoo/models/jag/cycle_gan/generate_cycgan_m2.py +++ b/model_zoo/models/jag/cycle_gan/generate_cycgan_m2.py @@ -62,7 +62,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -71,7 +71,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -81,14 +81,14 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing #todo, handle weight sharing def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, add_weight=True, tag=''): - + w1 = prefix+'fc1' w2 = prefix+'fc2' w3 = prefix+'fc3' @@ -122,7 +122,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight): w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -137,7 +137,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -157,8 +157,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -169,13 +168,13 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #******************************************** #g_sample=generator(x) #do not freeze, train generator to confuse discriminator #_1 => first generator1 to be added, to solve problem of all generator1 having the same name g_sample = add_generator(model, 'param_data_id','gen1', 2500, False,True,True,'_1') - # g_adv1 = discriminator(g_sample,x) + # g_adv1 = discriminator(g_sample,x) l = new_layer(model, 'concat_gsample_n_param',g_sample+' param_data_id','concatenation') #freeze discriminator, fake it as real D_real = add_discriminator(model,'concat_gsample_n_param','disc1',True, True, '_real') @@ -185,7 +184,7 @@ def configure_model(model): one.constant.num_neurons = '1' l = new_layer(model, 'g_adv1_bce', [D_real, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'g_adv1_eval','g_adv1_bce', 'evaluation') - + #************************************************ #g_sample2= generator2(y) //freeze g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, True,False,True,'_y') @@ -254,4 +253,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/jag/cycle_gan/generate_cycgan_m3.py b/model_zoo/models/jag/cycle_gan/generate_cycgan_m3.py index ee0a9c7b198..3a14b8b6da5 100644 --- a/model_zoo/models/jag/cycle_gan/generate_cycgan_m3.py +++ b/model_zoo/models/jag/cycle_gan/generate_cycgan_m3.py @@ -62,7 +62,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -71,7 +71,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -81,14 +81,14 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing #todo, handle weight sharing def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, add_weight=True, tag=''): - + w1 = prefix+'fc1' w2 = prefix+'fc2' w3 = prefix+'fc3' @@ -122,7 +122,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight): w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -137,7 +137,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -157,8 +157,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -169,13 +168,13 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #******************************************** #g_sample2=generator2(y) #do not freeze, train generator to confuse discriminator #_1 => first generator1 to be added, to solve problem of all generator2 having the same name g_sample2 = add_generator(model, 'image_data_dummy','gen2', 11, False,False,True,'_1') - # g_adv21 = discriminator2(g_sample2,y) + # g_adv21 = discriminator2(g_sample2,y) l = new_layer(model, 'concat_gsample2_n_img',g_sample2+' image_data_dummy','concatenation') #freeze discriminator, fake it as real D_real = add_discriminator(model,'concat_gsample2_n_img','disc2',True, True, '_real') @@ -185,7 +184,7 @@ def configure_model(model): one.constant.num_neurons = '1' l = new_layer(model, 'g_adv2_bce', [D_real, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'g_adv2_eval','g_adv2_bce', 'evaluation') - + #************************************************ #g_sample2= generator2(y) //train g_sample2 = add_generator(model,'image_data_dummy','gen2', 11, False,False,False,'_y') @@ -256,4 +255,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/jag/gan/cyclic/cyclic_gan_model.prototext b/model_zoo/models/jag/gan/cyclic/cyclic_gan_model.prototext index 7a8f12f6cd3..235a00313d7 100644 --- a/model_zoo/models/jag/gan/cyclic/cyclic_gan_model.prototext +++ b/model_zoo/models/jag/gan/cyclic/cyclic_gan_model.prototext @@ -46,7 +46,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" data_set_per_model: true target_mode: "N/A" } diff --git a/model_zoo/models/jag/gan/cyclic/generate_model.py b/model_zoo/models/jag/gan/cyclic/generate_model.py index 5c1e22c9c9d..1cd1032cd38 100644 --- a/model_zoo/models/jag/gan/cyclic/generate_model.py +++ b/model_zoo/models/jag/gan/cyclic/generate_model.py @@ -63,7 +63,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -73,7 +73,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -83,16 +83,16 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing #todo, handle weight sharing #@todo, use default weight/bias, adding weights cause bad thing to happen with LTFB except you add/transfer both w and b -#@todo, generally automate manual editing made in debugging process +#@todo, generally automate manual editing made in debugging process def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): - #different weights + #different weights fc1 = prefix+'fc1'+tag fc2 = prefix+'fc2'+tag fc3 = prefix+'fc3'+tag @@ -119,7 +119,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc2 + 'linearity', 'he_normal_initializer') l.weights = fc2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -133,7 +133,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc3 + 'linearity', 'he_normal_initializer') l.weights = fc3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -152,8 +152,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -172,8 +171,8 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - - # Forward Model + + # Forward Model #D_Loss1 branch #Fake path #def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): @@ -181,15 +180,15 @@ def configure_model(model): #forward generator x->y' #g_sample=generator1(x) g_sample = add_generator(model, 'param_data_id','gen1', 2500, False,True) - + #True path (share weights with fake path discriminator) #discriminator(y,x) #data = y + x #def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, tag=''): #forward_model D_real = add_discriminator(model, 'data','d1',False, True, '_real') - - #CONCAT + + #CONCAT # Gsample + x # l = new_layer(model, 'concat_gsample_n_param','','concatenation') @@ -197,17 +196,17 @@ def configure_model(model): l.children = 'd1_stop_gradient d2_dummy' #discriminator false path #question: how to deal with d1 weight sharing? //Dreal and Dfake weights are shared? - #And copied to discriminator (d2) on adversarial path at every iteration + #And copied to discriminator (d2) on adversarial path at every iteration #discriminator(g_sample,x) #add stop gradient, so gradient doesnt go to generator on Dfake path - l = new_layer(model, 'd1_stop_gradient','concat_gsample_n_param', 'stop_gradient') + l = new_layer(model, 'd1_stop_gradient','concat_gsample_n_param', 'stop_gradient') #D_fake = add_discriminator(model,'concat_gsample_n_param','disc1',False, False, '_fake') D_fake = add_discriminator(model,'d1_stop_gradient','d1',False, False, '_fake') #Objective term (and metric) layers here l = new_layer(model, 'disc1_real_bce', [D_real, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'disc1_fake_bce', [D_fake, zero.name], 'sigmoid_binary_cross_entropy') - + #Adversarial part #replicate discriminator (freeze it), weight will be copied through replace_layer callback, fake it as real #add identity/dummy layer that is a copy of concat @@ -222,11 +221,11 @@ def configure_model(model): l = new_layer(model, 'gsample_minus_y', ' ', 'weighted_sum') l.parents = g_sample+' image_data_dummy' l.weighted_sum.scaling_factors = '1 -1' - + l = new_layer(model,'l_l2_y', 'gsample_minus_y','l2_norm2') #####Inverse Model - + #inverse generator y->x' #g_sample2=generator2(y) g_sample2 = add_generator(model, 'image_data_dummy','gen2', 11, False,False) @@ -236,7 +235,7 @@ def configure_model(model): l.parents = 'param_data_id image_data_dummy' #l.children = ' ' D_inv_real = add_discriminator(model, 'concat_param_n_img','d1_inv',False, True, '_real') - #CONCAT + #CONCAT # Gsample2 (that is x') + y # l = new_layer(model, 'concat_gsample2_n_img','','concatenation') @@ -244,7 +243,7 @@ def configure_model(model): l.children = 'd1_inv_stop_gradient d2_inv_dummy' #discriminator(g_sample2,y) #add stop gradient, so gradient doesnt go to generator on this path - l = new_layer(model, 'd1_inv_stop_gradient','concat_gsample2_n_img', 'stop_gradient') + l = new_layer(model, 'd1_inv_stop_gradient','concat_gsample2_n_img', 'stop_gradient') D_inv_fake = add_discriminator(model,'d1_inv_stop_gradient','d1_inv',False, False, '_fake') #Objective term (and metric) layers here l = new_layer(model, 'disc1_inv_real_bce', [D_inv_real, one.name], 'sigmoid_binary_cross_entropy') @@ -260,7 +259,7 @@ def configure_model(model): l = new_layer(model, 'gsample2_minus_x', ' ', 'weighted_sum') l.parents = g_sample2+' param_data_id' l.weighted_sum.scaling_factors = '1 -1' - + l = new_layer(model,'l_l2_x', 'gsample2_minus_x','l2_norm2') if __name__ == "__main__": @@ -296,4 +295,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/jag/gan/vanilla/gan.prototext b/model_zoo/models/jag/gan/vanilla/gan.prototext index df40cdcc1db..7bfa4e4d3d8 100644 --- a/model_zoo/models/jag/gan/vanilla/gan.prototext +++ b/model_zoo/models/jag/gan/vanilla/gan.prototext @@ -52,7 +52,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" data_set_per_model: true target_mode: "N/A" } diff --git a/model_zoo/models/jag/gan/vanilla/generate_gan.py b/model_zoo/models/jag/gan/vanilla/generate_gan.py index 277f9063fe9..26eae2f3c1e 100644 --- a/model_zoo/models/jag/gan/vanilla/generate_gan.py +++ b/model_zoo/models/jag/gan/vanilla/generate_gan.py @@ -62,7 +62,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t l.weights = w1 + 'linearity' l = new_layer(model, relu1, fc1,'relu') - + l = new_layer(model, fc2, relu1,'fully_connected') l.fully_connected.num_neurons = 16 @@ -71,7 +71,7 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w2 + 'linearity', 'he_normal_initializer') l.weights = w2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') l = new_layer(model, fc3, relu2, 'fully_connected') @@ -81,14 +81,14 @@ def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, t if(add_weight) : w = new_weights(model, w3 + 'linearity', 'he_normal_initializer') l.weights = w3 + 'linearity' - return fc3 + return fc3 #Generator #Weight frozen, no weight sharing #todo, handle weight sharing def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): - #different weights + #different weights fc1 = prefix+'fc1'+tag fc2 = prefix+'fc2'+tag fc3 = prefix+'fc3'+tag @@ -115,7 +115,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc2 + 'linearity', 'he_normal_initializer') l.weights = fc2 + 'linearity' - + l = new_layer(model, relu2, fc2,'relu') next_parent = relu2 if(add_dropout): @@ -129,7 +129,7 @@ def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropou l.freeze = freeze w = new_weights(model, fc3 + 'linearity', 'he_normal_initializer') l.weights = fc3 + 'linearity' - + l = new_layer(model, relu3, fc3, 'relu') l = new_layer(model, fc4, relu3, 'fully_connected') @@ -148,8 +148,7 @@ def configure_model(model): #####INPUT DATA (including Slices) ### Input data comes from merge features of image (Y) and param (X) l = new_layer(model,'data',' ', 'input') - l.input.io_buffer = 'partitioned' - + slice_points = [0,2500,2511] l = new_layer(model, 'slice_data','data', 'slice') l.children = 'image_data_dummy param_data_id' @@ -168,20 +167,20 @@ def configure_model(model): #ID parameter data (X) l = new_layer(model,'param_data_id','slice_data','identity') - + #D_Loss1 branch #Fake path #def add_generator(model, gen_input, prefix, output_dim, freeze=False, add_dropout=True, tag=''): #freeze generator = False #g_sample=generator1(x) g_sample = add_generator(model, 'param_data_id','gen1', 2500, False,True) - + #True path (share weights with fake path discriminator) #discriminator(y,x) #data = y + x #def add_discriminator(model,disc_input, prefix, freeze=False, add_weight=True, tag=''): D_real = add_discriminator(model, 'data','d1',False, True, '_real') - #CONCAT + #CONCAT # Gsample + x # l = new_layer(model, 'concat_gsample_n_param','','concatenation') @@ -191,7 +190,7 @@ def configure_model(model): #question: how to deal with weight sharing? #discriminator(g_sample,x) #add stop gradient, so gradient doesnt go to generator on this path - l = new_layer(model, 'd1_stop_gradient','concat_gsample_n_param', 'stop_gradient') + l = new_layer(model, 'd1_stop_gradient','concat_gsample_n_param', 'stop_gradient') #D_fake = add_discriminator(model,'concat_gsample_n_param','disc1',False, False, '_fake') D_fake = add_discriminator(model,'d1_stop_gradient','d1',False, False, '_fake') @@ -212,7 +211,7 @@ def configure_model(model): #fake as real l = new_layer(model, 'g_adv1_bce', [D_real2, one.name], 'sigmoid_binary_cross_entropy') l = new_layer(model, 'g_adv1_eval','g_adv1_bce', 'evaluation') - + if __name__ == "__main__": @@ -247,4 +246,3 @@ def configure_model(model): # Export prototext with open(output_proto, 'w') as f: f.write(txtf.MessageToString(pb)) - diff --git a/model_zoo/models/jag/vae_fcn.prototext b/model_zoo/models/jag/vae_fcn.prototext index 859e6503512..abaef603c1b 100644 --- a/model_zoo/models/jag/vae_fcn.prototext +++ b/model_zoo/models/jag/vae_fcn.prototext @@ -68,7 +68,6 @@ model { children: "data dummy" data_layout: "model_parallel" input { - io_buffer: "distributed" target_mode: "reconstruction" } } diff --git a/model_zoo/models/jag/wae.prototext b/model_zoo/models/jag/wae.prototext index 5ab9cb5c0ad..0c09e386d9e 100644 --- a/model_zoo/models/jag/wae.prototext +++ b/model_zoo/models/jag/wae.prototext @@ -38,7 +38,6 @@ model { data_layout: "data_parallel" layer { input { - io_buffer: "partitioned" data_set_per_model: true target_mode: "N/A" } diff --git a/model_zoo/models/lenet_mnist/model_lenet_mnist.prototext b/model_zoo/models/lenet_mnist/model_lenet_mnist.prototext index dc806c51748..99a763c52b6 100644 --- a/model_zoo/models/lenet_mnist/model_lenet_mnist.prototext +++ b/model_zoo/models/lenet_mnist/model_lenet_mnist.prototext @@ -92,9 +92,7 @@ model { name: "data" children: "images labels" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { name: "images" diff --git a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_autoencoder_pilot2.prototext b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_autoencoder_pilot2.prototext index bd172e4864d..8b6bb672ceb 100644 --- a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_autoencoder_pilot2.prototext +++ b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_autoencoder_pilot2.prototext @@ -39,9 +39,7 @@ model { name: "input" children: "data label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_bead_autoencoder_pilot2.prototext b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_bead_autoencoder_pilot2.prototext index 9fb642a23f6..2c503c625ab 100644 --- a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_bead_autoencoder_pilot2.prototext +++ b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_conv_molecular_bead_autoencoder_pilot2.prototext @@ -35,9 +35,7 @@ model { name: "input" children: "data label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_molecular_autoencoder_pilot2.prototext b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_molecular_autoencoder_pilot2.prototext index ffc2b49bf54..91810258bd0 100644 --- a/model_zoo/models/molecular_autoencoder_candle_pilot2/model_molecular_autoencoder_pilot2.prototext +++ b/model_zoo/models/molecular_autoencoder_candle_pilot2/model_molecular_autoencoder_pilot2.prototext @@ -35,9 +35,7 @@ model { name: "input" children: "data label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/python/keras/kerbann.py b/model_zoo/models/python/keras/kerbann.py index 2b471c496fd..ba304cc5526 100644 --- a/model_zoo/models/python/keras/kerbann.py +++ b/model_zoo/models/python/keras/kerbann.py @@ -28,7 +28,6 @@ def keras_to_lbann(model, num_classes, l = pb.model.layer.add() l.name = model.input_names[0] exec('l.input.SetInParent()') - l.input.io_buffer = "partitioned" setup_layers(model) # allow user to specify we need a reconstruciton target layer target_layer(model,target) @@ -115,7 +114,6 @@ def setup_callbacks(callbacks): # IO layers def input(keras_layer, pb_layer): exec('pb_layer.input.SetInParent()') - pb_layer.input.io_buffer = "partitioned" def target_layer(model, target): l = pb.model.layer.add() diff --git a/model_zoo/models/python/keras/mnist_cnn.prototext b/model_zoo/models/python/keras/mnist_cnn.prototext index b47899c70ad..f32940773dc 100644 --- a/model_zoo/models/python/keras/mnist_cnn.prototext +++ b/model_zoo/models/python/keras/mnist_cnn.prototext @@ -14,9 +14,7 @@ model { } data_layout: "data_parallel" layer { - input { - io_buffer: "partitioned" - } + input {} name: "conv2d_1_input" } layer { diff --git a/model_zoo/models/resnet50/model_resnet50.prototext b/model_zoo/models/resnet50/model_resnet50.prototext index e87b5bdf862..520bb8aa11a 100644 --- a/model_zoo/models/resnet50/model_resnet50.prototext +++ b/model_zoo/models/resnet50/model_resnet50.prototext @@ -63,9 +63,7 @@ model { name: "data" children: "images labels" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { name: "images" diff --git a/model_zoo/models/siamese/finetune-cub/model_cub.prototext b/model_zoo/models/siamese/finetune-cub/model_cub.prototext index 3a00b2ccc7e..6d9e9761e1f 100644 --- a/model_zoo/models/siamese/finetune-cub/model_cub.prototext +++ b/model_zoo/models/siamese/finetune-cub/model_cub.prototext @@ -319,7 +319,6 @@ model { children: "data_new label_new" data_layout: "data_parallel" input { - io_buffer: "partitioned" } } layer { diff --git a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm.prototext b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm.prototext index 25b06aac3cd..091ed4b5acd 100644 --- a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm.prototext +++ b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm.prototext @@ -349,9 +349,7 @@ model { name: "input_new" children: "data_new label_new" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input_new" diff --git a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext index 2c3f779404f..27d8cbe6f96 100644 --- a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext +++ b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext @@ -533,9 +533,7 @@ model { name: "input_new" children: "data_new label_new" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input_new" diff --git a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext index 2286e2dccfa..c1c959f41af 100644 --- a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext +++ b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext @@ -572,9 +572,7 @@ model { name: "input" children: "slice label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "input" diff --git a/model_zoo/models/simple_mnist/model_mnist_simple_1.prototext b/model_zoo/models/simple_mnist/model_mnist_simple_1.prototext index bb37f09b3bf..77a1c7ed256 100644 --- a/model_zoo/models/simple_mnist/model_mnist_simple_1.prototext +++ b/model_zoo/models/simple_mnist/model_mnist_simple_1.prototext @@ -56,9 +56,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/simple_mnist/model_mnist_simple_2.prototext b/model_zoo/models/simple_mnist/model_mnist_simple_2.prototext index 23f42530e21..0be924650f9 100644 --- a/model_zoo/models/simple_mnist/model_mnist_simple_2.prototext +++ b/model_zoo/models/simple_mnist/model_mnist_simple_2.prototext @@ -56,9 +56,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/models/vram/generate_dram.py b/model_zoo/models/vram/generate_dram.py index d0778b0846d..9dd68656da4 100755 --- a/model_zoo/models/vram/generate_dram.py +++ b/model_zoo/models/vram/generate_dram.py @@ -146,7 +146,7 @@ class LstmCell: input_fc = None output_fc = None cell_fc = None - + def __init__(self, name, size, model): self.name = name self.size = size @@ -216,7 +216,6 @@ def configure_model(model): # Initialize input data = new_layer(model, "data", [], "input", "cpu") - data.input.io_buffer = "partitioned" image = new_layer(model, "image", data, "split") label = new_layer(model, "label", data, "split") data.children = str_list([image.name, label.name]) @@ -267,7 +266,7 @@ def configure_model(model): # Classification network components class_network = FullyConnectedCell("class_prob", label_dims[0], model, "softmax", "glorot_normal_initializer", False) - + # Construct unrolled model for step in range(unroll_depth): @@ -320,13 +319,13 @@ def configure_model(model): glimpse3.pooling.pool_dims_i = 32 glimpse3.pooling.pool_strides_i = glimpse3.pooling.pool_dims_i glimpse3.pooling.pool_mode = "average" - glimpse = new_layer(model, "glimpse_step%d" % step, + glimpse = new_layer(model, "glimpse_step%d" % step, [glimpse1, glimpse2, glimpse3], "concatenation") - glimpse = new_layer(model, "glimpse_flat_step%d" % step, + glimpse = new_layer(model, "glimpse_flat_step%d" % step, glimpse, "reshape") glimpse.reshape.num_dims = 1 glimpse.reshape.dims = str_list([128 * 3]) - + # Recurrent network h1 = lstm1(glimpse) h2 = lstm2(h1) @@ -366,7 +365,7 @@ def configure_model(model): met.layer_metric.name = "top-5 categorical accuracy (step %d)" % step met.layer_metric.layer = acc5.name met.layer_metric.unit = "%" - + # Objective function class_obj = new_layer(model, "classification_cross_entropy_step%d" % step, [class_prob, label], "cross_entropy") @@ -383,8 +382,8 @@ def configure_model(model): obj = model.objective_function.layer_term.add() obj.scale_factor = 1.0 obj.layer = locy_obj.name - - + + if __name__ == "__main__": # Make sure protobuf Python implementation is built diff --git a/model_zoo/tests/layer_tests/model_channelwise_mean.prototext b/model_zoo/tests/layer_tests/model_channelwise_mean.prototext index b683d82023d..d530e311f1b 100644 --- a/model_zoo/tests/layer_tests/model_channelwise_mean.prototext +++ b/model_zoo/tests/layer_tests/model_channelwise_mean.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_clamp.prototext b/model_zoo/tests/layer_tests/model_clamp.prototext index bb27c22d471..b02fd5919ec 100644 --- a/model_zoo/tests/layer_tests/model_clamp.prototext +++ b/model_zoo/tests/layer_tests/model_clamp.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_covariance.prototext b/model_zoo/tests/layer_tests/model_covariance.prototext index 972ae7ea9d3..1324f945ec8 100644 --- a/model_zoo/tests/layer_tests/model_covariance.prototext +++ b/model_zoo/tests/layer_tests/model_covariance.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_elu.prototext b/model_zoo/tests/layer_tests/model_elu.prototext index e0366e63736..ce20c7cb110 100644 --- a/model_zoo/tests/layer_tests/model_elu.prototext +++ b/model_zoo/tests/layer_tests/model_elu.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_identity.prototext b/model_zoo/tests/layer_tests/model_identity.prototext index 1dbb0ab0f71..98eb617f70e 100644 --- a/model_zoo/tests/layer_tests/model_identity.prototext +++ b/model_zoo/tests/layer_tests/model_identity.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_l1_norm.prototext b/model_zoo/tests/layer_tests/model_l1_norm.prototext index b874c33e891..9192a686411 100644 --- a/model_zoo/tests/layer_tests/model_l1_norm.prototext +++ b/model_zoo/tests/layer_tests/model_l1_norm.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_l2_norm2.prototext b/model_zoo/tests/layer_tests/model_l2_norm2.prototext index 1c879529096..07c72d2ef85 100644 --- a/model_zoo/tests/layer_tests/model_l2_norm2.prototext +++ b/model_zoo/tests/layer_tests/model_l2_norm2.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { diff --git a/model_zoo/tests/layer_tests/model_leaky_relu.prototext b/model_zoo/tests/layer_tests/model_leaky_relu.prototext index 53f4c7d6afc..cc6473695cb 100644 --- a/model_zoo/tests/layer_tests/model_leaky_relu.prototext +++ b/model_zoo/tests/layer_tests/model_leaky_relu.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_log_sigmoid.prototext b/model_zoo/tests/layer_tests/model_log_sigmoid.prototext index 882b3f905a6..b3e58f7fd15 100644 --- a/model_zoo/tests/layer_tests/model_log_sigmoid.prototext +++ b/model_zoo/tests/layer_tests/model_log_sigmoid.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_log_softmax.prototext b/model_zoo/tests/layer_tests/model_log_softmax.prototext index 1b4ef3099d9..12555305705 100644 --- a/model_zoo/tests/layer_tests/model_log_softmax.prototext +++ b/model_zoo/tests/layer_tests/model_log_softmax.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_mean_absolute_error.prototext b/model_zoo/tests/layer_tests/model_mean_absolute_error.prototext index d75d4b06432..beda327e807 100644 --- a/model_zoo/tests/layer_tests/model_mean_absolute_error.prototext +++ b/model_zoo/tests/layer_tests/model_mean_absolute_error.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_relu.prototext b/model_zoo/tests/layer_tests/model_relu.prototext index dbc49b704ac..db91a7ba590 100644 --- a/model_zoo/tests/layer_tests/model_relu.prototext +++ b/model_zoo/tests/layer_tests/model_relu.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_selu.prototext b/model_zoo/tests/layer_tests/model_selu.prototext index 19ce0f1c3f7..9e98a04ea17 100644 --- a/model_zoo/tests/layer_tests/model_selu.prototext +++ b/model_zoo/tests/layer_tests/model_selu.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_sigmoid.prototext b/model_zoo/tests/layer_tests/model_sigmoid.prototext index 3f53416f080..989c1fb4c5c 100644 --- a/model_zoo/tests/layer_tests/model_sigmoid.prototext +++ b/model_zoo/tests/layer_tests/model_sigmoid.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_softmax.prototext b/model_zoo/tests/layer_tests/model_softmax.prototext index 313165c17f8..b231ff7d179 100644 --- a/model_zoo/tests/layer_tests/model_softmax.prototext +++ b/model_zoo/tests/layer_tests/model_softmax.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_softplus.prototext b/model_zoo/tests/layer_tests/model_softplus.prototext index 30d84782bfd..fc4d06823b3 100644 --- a/model_zoo/tests/layer_tests/model_softplus.prototext +++ b/model_zoo/tests/layer_tests/model_softplus.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_softsign.prototext b/model_zoo/tests/layer_tests/model_softsign.prototext index e008da2aa80..55e4e89cfc9 100644 --- a/model_zoo/tests/layer_tests/model_softsign.prototext +++ b/model_zoo/tests/layer_tests/model_softsign.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_squared_difference.prototext b/model_zoo/tests/layer_tests/model_squared_difference.prototext index 6ee5f1ce4fe..87b8a14c7c7 100644 --- a/model_zoo/tests/layer_tests/model_squared_difference.prototext +++ b/model_zoo/tests/layer_tests/model_squared_difference.prototext @@ -46,9 +46,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_tessellate.prototext b/model_zoo/tests/layer_tests/model_tessellate.prototext index fb9b6a56822..11440379413 100644 --- a/model_zoo/tests/layer_tests/model_tessellate.prototext +++ b/model_zoo/tests/layer_tests/model_tessellate.prototext @@ -46,9 +46,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/layer_tests/model_variance.prototext b/model_zoo/tests/layer_tests/model_variance.prototext index 9ad486c62d6..33d0ac06373 100644 --- a/model_zoo/tests/layer_tests/model_variance.prototext +++ b/model_zoo/tests/layer_tests/model_variance.prototext @@ -49,9 +49,7 @@ model { layer { name: "data" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } # Input data diff --git a/model_zoo/tests/model_lenet_mnist_ckpt.prototext b/model_zoo/tests/model_lenet_mnist_ckpt.prototext index 0a62ba260ec..e717e129366 100644 --- a/model_zoo/tests/model_lenet_mnist_ckpt.prototext +++ b/model_zoo/tests/model_lenet_mnist_ckpt.prototext @@ -70,9 +70,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/tests/model_lenet_mnist_dist_ckpt.prototext b/model_zoo/tests/model_lenet_mnist_dist_ckpt.prototext index cf81201a4dc..8afa85edd18 100644 --- a/model_zoo/tests/model_lenet_mnist_dist_ckpt.prototext +++ b/model_zoo/tests/model_lenet_mnist_dist_ckpt.prototext @@ -72,9 +72,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/tests/model_lenet_mnist_lbann2ckpt.prototext b/model_zoo/tests/model_lenet_mnist_lbann2ckpt.prototext index 00db10776b1..d8e7066afd5 100644 --- a/model_zoo/tests/model_lenet_mnist_lbann2ckpt.prototext +++ b/model_zoo/tests/model_lenet_mnist_lbann2ckpt.prototext @@ -69,9 +69,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "data" diff --git a/model_zoo/tests/model_mnist_conv_graph.prototext b/model_zoo/tests/model_mnist_conv_graph.prototext index 1973cfdcab5..78a9dbafc4d 100644 --- a/model_zoo/tests/model_mnist_conv_graph.prototext +++ b/model_zoo/tests/model_mnist_conv_graph.prototext @@ -36,9 +36,7 @@ model { name: "data" children: "images labels" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { name: "images" diff --git a/model_zoo/tests/model_mnist_distributed_io.prototext b/model_zoo/tests/model_mnist_distributed_io.prototext index 90741394952..5067e394360 100644 --- a/model_zoo/tests/model_mnist_distributed_io.prototext +++ b/model_zoo/tests/model_mnist_distributed_io.prototext @@ -94,9 +94,7 @@ model { name: "1" children: "1a 1b" data_layout: "model_parallel" - input { - io_buffer: "distributed" - } + input {} } layer { parents: "1" diff --git a/model_zoo/tests/model_mnist_partitioned_io.prototext b/model_zoo/tests/model_mnist_partitioned_io.prototext index 0e17945ddb7..0096a9e72dc 100644 --- a/model_zoo/tests/model_mnist_partitioned_io.prototext +++ b/model_zoo/tests/model_mnist_partitioned_io.prototext @@ -106,9 +106,7 @@ model { name: "1" children: "1a 1b" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} } layer { parents: "1" diff --git a/model_zoo/tests/model_mnist_ridge_regression.prototext b/model_zoo/tests/model_mnist_ridge_regression.prototext index ca77151c085..173ea38fc71 100644 --- a/model_zoo/tests/model_mnist_ridge_regression.prototext +++ b/model_zoo/tests/model_mnist_ridge_regression.prototext @@ -43,7 +43,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { io_buffer: "partitioned" } + input {} } layer { parents: "data" diff --git a/model_zoo/tests/model_mnist_softmax_classifier.prototext b/model_zoo/tests/model_mnist_softmax_classifier.prototext index b7c7d43ce8c..8bbd7fa5bc5 100644 --- a/model_zoo/tests/model_mnist_softmax_classifier.prototext +++ b/model_zoo/tests/model_mnist_softmax_classifier.prototext @@ -41,7 +41,7 @@ model { name: "data" children: "image label" data_layout: "data_parallel" - input { io_buffer: "partitioned" } + input {} } layer { parents: "data" diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 1864b7183ce..9a6d1bbd0dc 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -60,13 +60,15 @@ std::unique_ptr construct_layer( if (mode_str == "regression") { target_mode = data_reader_target_mode::REGRESSION; } if (mode_str == "reconstruction") { target_mode = data_reader_target_mode::RECONSTRUCTION; } if (mode_str == "na" || mode_str == "NA" || mode_str == "N/A") { target_mode = data_reader_target_mode::NA; } - if (io_buffer == "partitioned") { + if (io_buffer == "partitioned" || io_buffer.empty()) { return lbann::make_unique>( comm, num_parallel_readers, data_readers, !params.data_set_per_model(), target_mode); + } else { + LBANN_ERROR("invalid IO buffer type (" + io_buffer + ")"); } } diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 230973ae05f..8d5cb67798e 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -1022,9 +1022,9 @@ message Dropout { // Input layers // ////////////////// message Input { - bool data_set_per_model = 1; //default: false - string io_buffer = 2; - string target_mode = 3; + bool data_set_per_model = 1; // Default: false + string io_buffer = 2; // Options: "partitioned" (default) + string target_mode = 3; // Options: "classification" (default), "regression", "reconstruction", "N/A" } ////////////////////// From 3c1f6df5a28c2723b8372c8a4c0ad35ce2bf0805 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 13:35:30 -0800 Subject: [PATCH 042/443] Using default data reader in Python interface and Bamboo. --- .../prototext/model_mnist_simple_1.prototext | 50 +++++++++++++----- .../prototext/model_mnist_simple_2.prototext | 52 ++++++++++++++----- scripts/proto/README.md | 6 +-- scripts/proto/lbann/models/alexnet.py | 4 +- scripts/proto/lbann/models/resnet.py | 2 +- scripts/proto/lbann/onnx/o2l/__init__.py | 2 +- 6 files changed, 81 insertions(+), 35 deletions(-) diff --git a/bamboo/unit_tests/prototext/model_mnist_simple_1.prototext b/bamboo/unit_tests/prototext/model_mnist_simple_1.prototext index c2c6477837b..77a1c7ed256 100644 --- a/bamboo/unit_tests/prototext/model_mnist_simple_1.prototext +++ b/bamboo/unit_tests/prototext/model_mnist_simple_1.prototext @@ -1,19 +1,17 @@ model { - name: "sequential_model" data_layout: "data_parallel" mini_batch_size: 64 block_size: 256 num_epochs: 3 num_parallel_readers: 0 - procs_per_model: 0 - num_gpus: -1 + procs_per_trainer: 0 ################################################### # Objective function ################################################### objective_function { - cross_entropy {} + layer_term { layer: "cross_entropy" } l2_weight_regularization { scale_factor: 1e-4 } @@ -24,7 +22,11 @@ model { ################################################### metric { - categorical_accuracy {} + layer_metric { + name: "categorical accuracy" + layer: "accuracy" + unit: "%" + } } ################################################### @@ -52,13 +54,25 @@ model { layer { name: "data" + children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} + } + layer { + parents: "data" + name: "image" + data_layout: "data_parallel" + split {} + } + layer { + parents: "data" + name: "label" + data_layout: "data_parallel" + split {} } layer { + parents: "image" name: "ip1" data_layout: "model_parallel" fully_connected { @@ -68,12 +82,14 @@ model { } layer { + parents: "ip1" name: "relu1" data_layout: "model_parallel" relu {} } layer { + parents: "relu1" name: "ip2" data_layout: "model_parallel" fully_connected { @@ -83,18 +99,24 @@ model { } layer { + parents: "ip2" name: "prob" - data_layout: "model_parallel" + data_layout: "data_parallel" softmax {} } layer { - name: "target" + parents: "prob label" + name: "cross_entropy" data_layout: "data_parallel" - target { - io_buffer: "partitioned" - shared_data_reader: true - } + cross_entropy {} + } + + layer { + parents: "prob label" + name: "accuracy" + data_layout: "data_parallel" + categorical_accuracy {} } } diff --git a/bamboo/unit_tests/prototext/model_mnist_simple_2.prototext b/bamboo/unit_tests/prototext/model_mnist_simple_2.prototext index d265acf7276..c89c171566f 100644 --- a/bamboo/unit_tests/prototext/model_mnist_simple_2.prototext +++ b/bamboo/unit_tests/prototext/model_mnist_simple_2.prototext @@ -1,19 +1,17 @@ model { - name: "sequential_model" data_layout: "data_parallel" mini_batch_size: 64 block_size: 256 num_epochs: 3 num_parallel_readers: 0 - procs_per_model: 0 - num_gpus: -1 + procs_per_trainer: 0 ################################################### # Objective function ################################################### objective_function { - cross_entropy {} + layer_term { layer: "cross_entropy" } l2_weight_regularization { scale_factor: 1e-4 } @@ -24,7 +22,11 @@ model { ################################################### metric { - categorical_accuracy {} + layer_metric { + name: "categorical accuracy" + layer: "accuracy" + unit: "%" + } } ################################################### @@ -52,13 +54,25 @@ model { layer { name: "data" + children: "image label" data_layout: "data_parallel" - input { - io_buffer: "partitioned" - } + input {} + } + layer { + parents: "data" + name: "image" + data_layout: "data_parallel" + split {} + } + layer { + parents: "data" + name: "label" + data_layout: "data_parallel" + split {} } layer { + parents: "image" name: "ip1" data_layout: "model_parallel" fully_connected { @@ -68,12 +82,14 @@ model { } layer { + parents: "ip1" name: "relu1" data_layout: "model_parallel" relu {} } layer { + parents: "relu1" name: "ip3" data_layout: "model_parallel" fully_connected { @@ -83,11 +99,13 @@ model { } layer { + parents: "ip3" name: "relu3" data_layout: "model_parallel" relu {} } layer { + parents: "relu3" name: "ip2" data_layout: "model_parallel" fully_connected { @@ -97,18 +115,24 @@ model { } layer { + parents: "ip2" name: "prob" - data_layout: "model_parallel" + data_layout: "data_parallel" softmax {} } layer { - name: "target" + parents: "prob label" + name: "cross_entropy" data_layout: "data_parallel" - target { - io_buffer: "partitioned" - shared_data_reader: true - } + cross_entropy {} + } + + layer { + parents: "prob label" + name: "accuracy" + data_layout: "data_parallel" + categorical_accuracy {} } } diff --git a/scripts/proto/README.md b/scripts/proto/README.md index c0d45c7044f..d26f83bf01a 100644 --- a/scripts/proto/README.md +++ b/scripts/proto/README.md @@ -109,9 +109,9 @@ import lbann.proto as lp # Input data. # Note: Order matters for the children of the input layer! -input = lp.Input(io_buffer='partitioned') # Interacts with data reader. -images = lp.Identity(input) # NCHW image tensor. -labels = lp.Identity(input) # One-hot vector. +input = lp.Input() # Interacts with data reader. +images = lp.Identity(input) # NCHW image tensor. +labels = lp.Identity(input) # One-hot vector. # Simple convolutional network. conv = lp.Convolution( diff --git a/scripts/proto/lbann/models/alexnet.py b/scripts/proto/lbann/models/alexnet.py index d53b7b747f5..c89df25c2b9 100644 --- a/scripts/proto/lbann/models/alexnet.py +++ b/scripts/proto/lbann/models/alexnet.py @@ -15,7 +15,7 @@ class AlexNet(lm.Module): E. Hinton. "ImageNet classification with deep convolutional neural networks." In Advances in Neural Information Processing Systems, pp. 1097-1105. 2012. - + Note that there is very little consistency in the implementation of AlexNet across frameworks. If a particular variant is needed, you should implement it yourself. @@ -114,7 +114,7 @@ def forward(self, x): args = parser.parse_args() # Construct layer graph. - input = lp.Input(io_buffer='partitioned') + input = lp.Input() images = lp.Identity(input) labels = lp.Identity(input) preds = AlexNet(args.num_labels)(images) diff --git a/scripts/proto/lbann/models/resnet.py b/scripts/proto/lbann/models/resnet.py index 46a25ebbb59..d274766f559 100644 --- a/scripts/proto/lbann/models/resnet.py +++ b/scripts/proto/lbann/models/resnet.py @@ -514,7 +514,7 @@ def __init__(self, output_size, bn_stats_aggregation=args.bn_stats_aggregation) # Construct layer graph. - input = lp.Input(io_buffer='partitioned') + input = lp.Input() images = lp.Identity(input) labels = lp.Identity(input) softmax = lp.Softmax(resnet(images)) diff --git a/scripts/proto/lbann/onnx/o2l/__init__.py b/scripts/proto/lbann/onnx/o2l/__init__.py index 8890c98d9f2..a826b1b391e 100644 --- a/scripts/proto/lbann/onnx/o2l/__init__.py +++ b/scripts/proto/lbann/onnx/o2l/__init__.py @@ -55,7 +55,7 @@ def onnxToLbannLayers(o, lbannInputNames, l2oInputMap, dataLayout="auto"): layers.append(lbann_pb2.Layer(name=inputLayerName, children=lbann.onnx.util.list2LbannList(lbannInputNames), data_layout="data_parallel", - input=lbann_pb2.Input(io_buffer="partitioned"))) + input=lbann_pb2.Input())) for i in lbannInputNames: layers.append(lbann_pb2.Layer(name=i, parents=lbann.onnx.util.list2LbannList([inputLayerName]), From 0ce9a03fb90553d32f2a8bb0dd6f217a0a0a0590 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 13 Feb 2019 13:36:05 -0800 Subject: [PATCH 043/443] fix issue where comms are moved erroneously --- include/lbann/comm.hpp | 74 +++++++++++++++++++++--------------------- src/comm.cpp | 8 ++--- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/include/lbann/comm.hpp b/include/lbann/comm.hpp index c99b4bb4462..799388787f6 100644 --- a/include/lbann/comm.hpp +++ b/include/lbann/comm.hpp @@ -272,7 +272,7 @@ class lbann_comm { // Default to cpu memory template void broadcast(const int root, T* data, const int count, const El::mpi::Comm& c) { - broadcast(root, data, count, std::move(c), El::SyncInfo{}); + broadcast(root, data, count, c, El::SyncInfo{}); } template < typename T, El::Device D, bool S = is_instantiated_El_mpi_type::value > @@ -319,7 +319,7 @@ class lbann_comm { size_t resize(const int root, std::vector &data, const El::mpi::Comm& c) { auto const rank_c = El::mpi::Rank(c); size_t count = data.size(); - El::mpi::Broadcast(&count, 1, root, std::move(c), El::SyncInfo{}); + El::mpi::Broadcast(&count, 1, root, c, El::SyncInfo{}); count_bytes_broadcast(sizeof(size_t), rank_c, root); data.resize(count); return count; @@ -335,7 +335,7 @@ class lbann_comm { if (count <= 0) { return; } - broadcast(root, data.data(), count, std::move(c), El::SyncInfo{}); + broadcast(root, data.data(), count, c, El::SyncInfo{}); } /// Broadcast vector<> to world. template @@ -371,13 +371,13 @@ class lbann_comm { /** Allgather over an arbitrary communicator */ template void all_gather(const T* src, int src_count, T* rcv, int rcv_count, const El::mpi::Comm& c) { - all_gather(src, src_count, rcv, rcv_count, std::move(c), + all_gather(src, src_count, rcv, rcv_count, c, El::SyncInfo{}); } template void all_gather(const T* src, int src_count, T* rcv, int rcv_count, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { - El::mpi::AllGather(src, src_count, rcv, rcv_count, std::move(c), syncInfo); + El::mpi::AllGather(src, src_count, rcv, rcv_count, c, syncInfo); } /** @@ -393,7 +393,7 @@ class lbann_comm { << "this doesn't work!"; lbann_comm_abort(err.str()); } - El::mpi::AllGather(src.data(), src.size(), rcs.data(), rcv_counts.data(), rcv_disp.data(), std::move(c), El::SyncInfo{}); + El::mpi::AllGather(src.data(), src.size(), rcs.data(), rcv_counts.data(), rcv_disp.data(), c, El::SyncInfo{}); } /** * Allgatherv over a trainer communicator; @@ -409,7 +409,7 @@ class lbann_comm { */ template void all_gather(T &src, std::vector &data, const El::mpi::Comm& c) { - El::mpi::AllGather(&src, 1, data.data(), 1, std::move(c), + El::mpi::AllGather(&src, 1, data.data(), 1, c, El::SyncInfo{}); } /** @@ -481,7 +481,7 @@ class lbann_comm { template void gather(T snd, int root, const El::mpi::Comm& c) { bytes_sent += sizeof(T); - El::mpi::Gather(&snd, 1, (T*) nullptr, 0, root, std::move(c), + El::mpi::Gather(&snd, 1, (T*) nullptr, 0, root, c, El::SyncInfo{}); } /** Scalar gather (for root processes). */ @@ -489,47 +489,47 @@ class lbann_comm { void gather(T snd, T *rcv, const El::mpi::Comm& c) { auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); - El::mpi::Gather(&snd, 1, rcv, 1, rank_c, std::move(c), + El::mpi::Gather(&snd, 1, rcv, 1, rank_c, c, El::SyncInfo{}); bytes_received += sizeof(T) * (size_c - 1); } /** Scalar gather (for root processes). */ template void gather(T snd, std::vector& rcv, const El::mpi::Comm& c) { - gather(snd, rcv.data(), std::move(c)); + gather(snd, rcv.data(), c); } /** Scalar-array gather (for non-root processes). */ template void gather(T *snd, int count, int root, const El::mpi::Comm& c) { - gather(snd, count, root, std::move(c), + gather(snd, count, root, c, El::SyncInfo{}); } template void gather(T *snd, int count, int root, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { bytes_sent += sizeof(T) * count; - El::mpi::Gather(snd, count, (T*) nullptr, 0, root, std::move(c), + El::mpi::Gather(snd, count, (T*) nullptr, 0, root, c, syncInfo); } /** Scalar-array gather (for root processes). */ template void gather(T *snd, int count, T *rcv, const El::mpi::Comm& c) { - gather(snd, count, rcv, std::move(c), El::SyncInfo{}); + gather(snd, count, rcv, c, El::SyncInfo{}); } template void gather(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); - El::mpi::Gather(snd, count, rcv, count, rank_c, std::move(c), syncInfo); + El::mpi::Gather(snd, count, rcv, count, rank_c, c, syncInfo); bytes_received += sizeof(T) * count * (size_c - 1); } /** Scalar scatter (for non-root processes). */ template T scatter(int root, const El::mpi::Comm& c) { T val = {}; - El::mpi::Scatter((T*) nullptr, 1, &val, 1, root, std::move(c), + El::mpi::Scatter((T*) nullptr, 1, &val, 1, root, c, El::SyncInfo{}); bytes_received += sizeof(T); return val; @@ -540,7 +540,7 @@ class lbann_comm { bytes_sent += sizeof(T) * (El::mpi::Size(c) - 1); T val = {}; auto root = El::mpi::Rank(c); - El::mpi::Scatter(snd, 1, &val, 1, root, std::move(c), + El::mpi::Scatter(snd, 1, &val, 1, root, c, El::SyncInfo{}); return val; } @@ -578,7 +578,7 @@ class lbann_comm { template void reduce(T snd, int root, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { bytes_sent += sizeof(T); - El::mpi::Reduce(&snd, (T*) NULL, 1, op, root, std::move(c), + El::mpi::Reduce(&snd, (T*) NULL, 1, op, root, c, El::SyncInfo{}); } /** Scalar reduce (for root processes). */ @@ -587,7 +587,7 @@ class lbann_comm { T val = {}; auto const size_c = El::mpi::Size(c); auto const rank_c = El::mpi::Rank(c); - El::mpi::Reduce(&snd, &val, 1, op, rank_c, std::move(c), + El::mpi::Reduce(&snd, &val, 1, op, rank_c, c, El::SyncInfo{}); bytes_received += sizeof(T) * (size_c - 1); return val; @@ -597,43 +597,43 @@ class lbann_comm { // Op is "SUM" template void reduce(T *snd, int count, int root, const El::mpi::Comm& c) { - reduce(snd, count, root, std::move(c), El::mpi::SUM, + reduce(snd, count, root, c, El::mpi::SUM, El::SyncInfo{}); } template void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { - reduce(snd, count, root, std::move(c), El::mpi::SUM, syncInfo); + reduce(snd, count, root, c, El::mpi::SUM, syncInfo); } template void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::mpi::Op op) { - reduce(snd, count, root, std::move(c), op, El::SyncInfo{}); + reduce(snd, count, root, c, op, El::SyncInfo{}); } template void reduce(T *snd, int count, int root, const El::mpi::Comm& c, El::mpi::Op op, El::SyncInfo const& syncInfo) { bytes_sent += sizeof(T) * count; - El::mpi::Reduce(snd, (T*) NULL, count, op, root, std::move(c), syncInfo); + El::mpi::Reduce(snd, (T*) NULL, count, op, root, c, syncInfo); } /** Scalar-array reduce (for root processes). */ template void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::SyncInfo const& syncInfo) { - reduce(snd, count, rcv, std::move(c), El::mpi::SUM, syncInfo); + reduce(snd, count, rcv, c, El::mpi::SUM, syncInfo); } template void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c) { - reduce(snd, count, rcv, std::move(c), El::mpi::SUM, El::SyncInfo{}); + reduce(snd, count, rcv, c, El::mpi::SUM, El::SyncInfo{}); } template void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::mpi::Op op) { - reduce(snd, count, rcv, std::move(c), op, El::SyncInfo{}); + reduce(snd, count, rcv, c, op, El::SyncInfo{}); } template void reduce(T *snd, int count, T *rcv, const El::mpi::Comm& c, El::mpi::Op op, El::SyncInfo const& syncInfo) { if (snd == rcv) { snd = (T*)MPI_IN_PLACE; } auto const rank_c = El::mpi::Rank(c); auto const size_c = El::mpi::Size(c); - El::mpi::Reduce(snd, rcv, count, op, rank_c, std::move(c), syncInfo); + El::mpi::Reduce(snd, rcv, count, op, rank_c, c, syncInfo); bytes_received += sizeof(T) * count * (size_c - 1); } /** Inter-trainer all-reduce. */ @@ -656,7 +656,7 @@ class lbann_comm { T allreduce(T snd, const El::mpi::Comm& c, El::mpi::Op op = El::mpi::SUM) { auto const size_c = El::mpi::Size(c); bytes_sent += sizeof(T); - allreduce(&snd, 1, std::move(c), op); + allreduce(&snd, 1, c, op); bytes_received += sizeof(T) * (size_c - 1); return snd; } @@ -677,7 +677,7 @@ class lbann_comm { ::Al::Allreduce<::Al::MPIBackend>( snd, rcv, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), algo); #else - El::mpi::AllReduce(snd, rcv, count, op, std::move(c), + El::mpi::AllReduce(snd, rcv, count, op, c, El::SyncInfo{}); #endif bytes_received += count * sizeof(T) * (size_c - 1); @@ -696,7 +696,7 @@ class lbann_comm { ::Al::Allreduce<::Al::MPIBackend>( data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), algo); #else - El::mpi::AllReduce(data, count, op, std::move(c), + El::mpi::AllReduce(data, count, op, c, El::SyncInfo{}); #endif bytes_received += count * sizeof(T) * (size_c - 1); @@ -740,7 +740,7 @@ class lbann_comm { data, count, mpi_op_to_al_op(op), c.template GetComm<::Al::MPIBackend>(El::SyncInfo{}), req.mpi_req); bytes_received += count * sizeof(T) * (El::mpi::Size(c) - 1); #else - allreduce(data, count, std::move(c), op); + allreduce(data, count, c, op); #endif // LBANN_HAS_ALUMINUM } @@ -804,7 +804,7 @@ class lbann_comm { void nb_tagged_send(const T *data, int count, int rank, int tag, El::mpi::Request& req, const El::mpi::Comm& c) { bytes_sent += sizeof(T) * count; - El::mpi::TaggedISend(data, count, rank, tag, std::move(c), req); + El::mpi::TaggedISend(data, count, rank, tag, c, req); } template void nb_send(const T *data, int count, int trainer, El::mpi::Request& req) { @@ -867,7 +867,7 @@ class lbann_comm { template void nb_tagged_recv( T *data, int count, int rank, int tag, El::mpi::Request& req, const El::mpi::Comm& c) { - El::mpi::TaggedIRecv(data, count, rank, tag, std::move(c), req); + El::mpi::TaggedIRecv(data, count, rank, tag, c, req); bytes_received += sizeof(T) * count; } @@ -1073,22 +1073,22 @@ void lbann_comm::broadcast(int root, T& val, const El::mpi::Comm& c) { if (S) { // Avoid linking error from uninstantiated El::mpi routine if !S by converting T to El::byte using TT = typename interpret_as_byte_if_needed::type; - broadcast_native(root, reinterpret_cast(val), std::move(c)); + broadcast_native(root, reinterpret_cast(val), c); } else { - broadcast_custom(root, val, std::move(c)); + broadcast_custom(root, val, c); } count_bytes_broadcast(sizeof(T), rank_c, root); } template void lbann_comm::broadcast_native(int root, T& val, const El::mpi::Comm& c) const { - El::mpi::Broadcast(val, root, std::move(c), El::SyncInfo{}); + El::mpi::Broadcast(val, root, c, El::SyncInfo{}); } template void lbann_comm::broadcast_custom(int root, T& val, const El::mpi::Comm& c) const { const int bytes = static_cast(sizeof(T)); - El::mpi::Broadcast(reinterpret_cast(&val), bytes, root, std::move(c), + El::mpi::Broadcast(reinterpret_cast(&val), bytes, root, c, El::SyncInfo{}); } @@ -1098,7 +1098,7 @@ void lbann_comm::broadcast(const int root, T* data, const int count, const El::m const int size = static_cast(S? count : sizeof(T)*count); // Avoid linking error from uninstantiated El::mpi routine if !S by converting T to El::byte using TT = typename interpret_as_byte_if_needed::type; - El::mpi::Broadcast(reinterpret_cast(data), size, root, std::move(c), syncInfo); + El::mpi::Broadcast(reinterpret_cast(data), size, root, c, syncInfo); count_bytes_broadcast(sizeof(T)*count, rank_c, root); } diff --git a/src/comm.cpp b/src/comm.cpp index 04da25ade19..f03a8352691 100644 --- a/src/comm.cpp +++ b/src/comm.cpp @@ -217,7 +217,7 @@ void lbann_comm::allreduce(AbsMat& m, void lbann_comm::allreduce(AbsDistMat& m, const El::mpi::Comm& c, El::mpi::Op op) { - allreduce(m.Matrix(), std::move(c), op); + allreduce(m.Matrix(), c, op); } void lbann_comm::nb_allreduce(AbsMat& m, @@ -301,7 +301,7 @@ void lbann_comm::nb_allreduce(AbsMat& m, #endif // AL_HAS_MPI_CUDA bytes_received += sizeof(DataType) * local_size * (El::mpi::Size(c) - 1); #else - allreduce(m, std::move(c), op); + allreduce(m, c, op); #endif // LBANN_HAS_ALUMINUM } @@ -309,7 +309,7 @@ void lbann_comm::nb_allreduce(AbsDistMat& m, const El::mpi::Comm& c, Al::request& req, El::mpi::Op op) { - nb_allreduce(m.Matrix(), std::move(c), req, op); + nb_allreduce(m.Matrix(), c, req, op); } void lbann_comm::wait(Al::request& req) { @@ -363,7 +363,7 @@ void lbann_comm::intertrainer_broadcast_matrix(AbsDistMat& mat, int root) { template<> void lbann_comm::broadcast(const int root, std::string& str, const El::mpi::Comm& c) { std::vector data(str.begin(), str.end()); - broadcast(root, data, std::move(c)); + broadcast(root, data, c); str.assign(data.begin(), data.end()); } From 69d2863d41a08be426c31b2aac50e035f55992f1 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 13:47:35 -0800 Subject: [PATCH 044/443] Removed unit test comparing partitioned and distributed data readers. --- .../test_integration_io_buffers.py | 125 --------- containers/README.md | 28 +- .../model_mnist_distributed_io.prototext | 252 ----------------- .../model_mnist_partitioned_io.prototext | 264 ------------------ tests/test_distributed_io_mnist.sh | 41 --- 5 files changed, 14 insertions(+), 696 deletions(-) delete mode 100644 bamboo/integration_tests/test_integration_io_buffers.py delete mode 100644 model_zoo/tests/model_mnist_distributed_io.prototext delete mode 100644 model_zoo/tests/model_mnist_partitioned_io.prototext delete mode 100755 tests/test_distributed_io_mnist.sh diff --git a/bamboo/integration_tests/test_integration_io_buffers.py b/bamboo/integration_tests/test_integration_io_buffers.py deleted file mode 100644 index 9132b36ba83..00000000000 --- a/bamboo/integration_tests/test_integration_io_buffers.py +++ /dev/null @@ -1,125 +0,0 @@ -import sys -sys.path.insert(0, '../common_python') -import tools -import pytest -import os, sys -import common_code - -def skeleton_io_buffers(cluster, dir_name, executables, compiler_name, weekly): - if not weekly: - pytest.skip('Not doing weekly testing') - if cluster == 'surface': - pytest.skip('skeleton_io_buffers does not run on surface') - if compiler_name not in executables: - pytest.skip('default_exes[%s] does not exist' % compiler_name) - max_mb = 300 - # Printing output from 6*6*2=72 runs of LBANN makes the logs too slow. - # Output from run_lbann is still printed - if there is a failure. - should_log = False - partitioned = 'mnist_partitioned_io' - distributed = 'mnist_distributed_io' - model_names = [partitioned, distributed] - accuracies = {} - errors = [] - all_values = [] - fatal_errors = [] - overall_min_partitioned_accuracy = float('inf') - overall_min_distributed_accuracy = float('inf') - for mini_batch_size in [300, 150, 100, 75, 60, 50]: - num_models = max_mb / mini_batch_size - for procs_per_model in [1, 2, 3, 4, 5, 6]: - num_ranks = procs_per_model * num_models - for model_name in model_names: - output_file_name = '%s/bamboo/integration_tests/output/%s_%d_%d_output.txt' % (dir_name, model_name, mini_batch_size, procs_per_model) - error_file_name = '%s/bamboo/integration_tests/error/%s_%d_%d_error.txt' % (dir_name, model_name, mini_batch_size, procs_per_model) - command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=2, - num_processes=num_ranks, dir_name=dir_name, - data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', - data_reader_name='mnist', mini_batch_size=mini_batch_size, - model_folder='tests', model_name=model_name, num_epochs=5, - optimizer_name='adagrad', - processes_per_model=procs_per_model, - output_file_name=output_file_name, error_file_name=error_file_name) - try: - common_code.run_lbann(command, model_name, output_file_name, error_file_name, should_log) # Don't need return value - accuracy_dict = common_code.extract_data(output_file_name, ['test_accuracy'], should_log) - accuracies[model_name] = accuracy_dict['test_accuracy'] - except Exception: - # We want to keep running to see if any other mini_batch_size & procs_per_model combination crashes. - # However, it is now pointless to compare accuracies. - fatal_errors.append('Crashed running %s with mini_batch_size=%d, procs_per_model=%d' % (model_name, mini_batch_size, procs_per_model)) - # End model name loop - if fatal_errors == []: - partitioned_num_models = len(accuracies[partitioned].keys()) - distributed_num_models = len(accuracies[distributed].keys()) - assert partitioned_num_models == distributed_num_models - - min_partitioned_accuracy = float('inf') - min_distributed_accuracy = float('inf') - for model_num in sorted(accuracies[partitioned].keys()): - partitioned_accuracy = accuracies[partitioned][model_num]['overall'] - distributed_accuracy = accuracies[distributed][model_num]['overall'] - if partitioned_accuracy < min_partitioned_accuracy: - min_partitioned_accuracy = partitioned_accuracy - if distributed_accuracy < min_distributed_accuracy: - min_distributed_accuracy = distributed_accuracy - tolerance = 0.05 - # Are we within tolerance * expected_value? - if abs(partitioned_accuracy - distributed_accuracy) > abs(tolerance * min(partitioned_accuracy, distributed_accuracy)): - errors.append('partitioned = %f != %f = distributed; model_num=%s mini_batch_size=%d procs_per_model=%d' % (partitioned_accuracy, distributed_accuracy, model_num, mini_batch_size, procs_per_model)) - all_values.append('partitioned = %f, %f = distributed; model_num=%s mini_batch_size=%d procs_per_model=%d' % (partitioned_accuracy, distributed_accuracy, model_num, mini_batch_size, procs_per_model)) - # End model_num loop - if min_partitioned_accuracy < overall_min_partitioned_accuracy: - overall_min_partitioned_accuracy = min_partitioned_accuracy - if min_distributed_accuracy < overall_min_distributed_accuracy: - overall_min_distributed_accuracy = min_distributed_accuracy - # End fatal_errors == [] block - # End procs_per_model loop - # End mini_batch_size loop - for fatal_error in fatal_errors: - print(fatal_error) - assert fatal_errors == [] - # If there were no fatal errors, archive the accuracies. - if os.environ['LOGNAME'] == 'lbannusr': - key = 'bamboo_planKey' - if key in os.environ: - plan = os.environ[key] - if plan in ['LBANN-NIGHTD', 'LBANN-WD']: - archive_file = '/usr/workspace/wsb/lbannusr/archives/%s/%s/%s/io_buffers.txt' % (plan, cluster, compiler_name) - with open(archive_file, 'a') as archive: - archive.write('%s, %f, %f\n' % (os.environ['bamboo_buildNumber'], overall_min_partitioned_accuracy, overall_min_distributed_accuracy)) - else: - print('The plan %s does not have archiving activated' % plan) - else: - print('%s is not in os.environ' % key) - else: - print('os.environ["LOGNAME"]=%s' % os.environ['LOGNAME']) - - print('Errors for: partitioned_and_distributed %s (%d)' % (compiler_name, len(errors))) - for error in errors: - print(error) - if should_log: - print('All values for: partitioned_and_distributed %s (%d)' % (compiler_name, len(all_values))) - for value in all_values: - print(value) - assert errors == [] - -def test_integration_io_buffers_clang4(cluster, dirname, exes, weekly): - skeleton_io_buffers(cluster, dirname, exes, 'clang4', weekly) - -def test_integration_io_buffers_gcc4(cluster, dirname, exes, weekly): - skeleton_io_buffers(cluster, dirname, exes, 'gcc4', weekly) - -def test_integration_io_buffers_gcc7(cluster, dirname, exes, weekly): - skeleton_io_buffers(cluster, dirname, exes, 'gcc7', weekly) - -def test_integration_io_buffers_intel18(cluster, dirname, exes, weekly): - skeleton_io_buffers(cluster, dirname, exes, 'intel18', weekly) - -# Run with python -m pytest -s test_integration_io_buffers.py -k 'test_integration_io_buffers_exe' --exe= -def test_integration_performance_io_buffers_exe(cluster, dirname, exe): - if exe == None: - pytest.skip('Non-local testing') - exes = {'exe' : exe} - skeleton_io_buffers(cluster, dirname, exes, 'exe', True) diff --git a/containers/README.md b/containers/README.md index 224510e9156..e405a25f0f4 100644 --- a/containers/README.md +++ b/containers/README.md @@ -1,42 +1,42 @@ ## Singularity - + [Singularity](http://singularity.lbl.gov/) - + First build a Singularity container with the lbann.def file: ``` sudo singularity build --writable lbann.img lbann.def ``` *Note: Building the image requires root access.* - + *Note: --writable allows users to make changes inside the container (Required for LC).* -This will create a container called lbann.img which can be used to invoke lbann on any system with singularity and openmpi installed. +This will create a container called lbann.img which can be used to invoke lbann on any system with singularity and openmpi installed. ### Customizing Configuration in lbann.def -Singularity is designed to take advantage of underlying HPC resources. The lbann.def file in this directory specifically installs packages necessary for infiniband interconnects (lines 15-19). It builds openmpi outside of the spack step to ensure it is built with infiniband support (lines 37-55). Experienced users should modify these sections to match with the underlying resources they intend to run on. This defintion file also builds gcc version 4.9.3, and uses it to build openmpi and lbann (lines 33-35). This is also customized to run on specific LC resources, and can be modified depending on the users system. +Singularity is designed to take advantage of underlying HPC resources. The lbann.def file in this directory specifically installs packages necessary for infiniband interconnects (lines 15-19). It builds openmpi outside of the spack step to ensure it is built with infiniband support (lines 37-55). Experienced users should modify these sections to match with the underlying resources they intend to run on. This defintion file also builds gcc version 4.9.3, and uses it to build openmpi and lbann (lines 33-35). This is also customized to run on specific LC resources, and can be modified depending on the users system. ### Running LBANN with Singualrity To run LBANN use mpirun and singularity's execute command: ``` salloc -N2 - mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity_optimizied_test/model_zoo/lbann mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity/model_zoo/lbann --model=/lbann/model_zoo/tests/model_mnist_distributed_io.prototext --reader=/lbann/model_zoo/data_readers/data_reader_mnist.prototext --optimizer=/lbann/ model_zoo/optimizers/opt_adagrad.prototext + mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity_optimizied_test/model_zoo/lbann mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity/model_zoo/lbann --model=/lbann/model_zoo/tests/model_mnist_partitioned_io.prototext --reader=/lbann/model_zoo/data_readers/data_reader_mnist.prototext --optimizer=/lbann/ model_zoo/optimizers/opt_adagrad.prototext ``` *Note: The -B singularity command, binds directories from the surrounding filesystem to the container. Be sure to include any necessary files using this command (i.e model prototext files, datasets, etc). Alternatively, system admins are capable of allowing a singularity container to utilize the host's filesystem. This is done by changing MOUNT HOSTFS in the singularity config file.* ## Docker - + [Docker](https://www.docker.com/) - + First build a Docker image with the Dockerfile. From whichever directory contains the Dockerfile: ``` docker build -t dockban . ``` - + *Note: The -t flag specifies an identifying tag for this image. "dockban" can be changed to any desired tag.* - + ### Customizing Configuration in Dockerfile - The Dockerfile container defintion is less complicated than its Singularity counterpart. gcc 7.1.0 is built and registered with spack in lines 19-21. Users can change this, as well as LBANN specific build options in spack (line 22). For instance, to add gpu support a user can add "+gpu" to this line. - + The Dockerfile container defintion is less complicated than its Singularity counterpart. gcc 7.1.0 is built and registered with spack in lines 19-21. Users can change this, as well as LBANN specific build options in spack (line 22). For instance, to add gpu support a user can add "+gpu" to this line. + ### Running LBANN with Docker -This LBANN build also uses openmpi, so lbann can be launched with mpirun here as well. However, this example will just show the single process invocation. +This LBANN build also uses openmpi, so lbann can be launched with mpirun here as well. However, this example will just show the single process invocation. Start a docker container from the previously created image, and attach to it. Make sure to bind any necessary directories using -v: ``` @@ -44,5 +44,5 @@ docker run -it -v $HOME/MNIST:/MNIST dockban ``` Run LBANN as you would outside of a container: ``` -./spack_build/docker_build/model_zoo/lbann --model=model_zoo/models/lenet_mnist/model_lenet_mnist.prototext --reader=model_zoo/data_readers/data_reader_mnist.prototext --optimizer=model_zoo/optimizers/opt_sgd.prototext +./spack_build/docker_build/model_zoo/lbann --model=model_zoo/models/lenet_mnist/model_lenet_mnist.prototext --reader=model_zoo/data_readers/data_reader_mnist.prototext --optimizer=model_zoo/optimizers/opt_sgd.prototext ``` diff --git a/model_zoo/tests/model_mnist_distributed_io.prototext b/model_zoo/tests/model_mnist_distributed_io.prototext deleted file mode 100644 index 5067e394360..00000000000 --- a/model_zoo/tests/model_mnist_distributed_io.prototext +++ /dev/null @@ -1,252 +0,0 @@ -model { - data_layout: "model_parallel" - mini_batch_size: 10 - block_size: 256 - num_epochs: 20 - num_parallel_readers: 0 - procs_per_trainer: 1 - - ################################################### - # Objective function - ################################################### - - objective_function { - layer_term { layer: "cross_entropy" } - l2_weight_regularization { - scale_factor: 1e-4 - } - } - - ################################################### - # Metrics - ################################################### - - metric { - layer_metric { - name: "categorical accuracy" - layer: "accuracy" - unit: "%" - } - } - - ################################################### - # Callbacks - ################################################### - callback { - print { - interval: 1 - } - } - callback { - timer { - } - } - callback { - summary { - dir: "." - batch_interval: 1 - mat_interval: 25 - } - } - # callback { - # debug { - # phase: "test" - # } - # } - # callback { - # debug_io { - # phase: "test" - # lvl: 1 - # } - # } - callback { - adaptive_learning_rate { - patience: 4 - amt: 0.1 - } - } - callback { - imcomm { - intertrainer_comm_method: "normal" - all_optimizers: true - } - } - # callback { - # dump_mb_indices { - # basename: "debug_alexnet/" - # interval: 1 - # } - # } - # callback { - # disp_io_stats { - # layers: "1" - # } - # } - - ################################################### - # start of layers - ################################################### - - - # INPUT 1 - ###################### - layer { - name: "1" - children: "1a 1b" - data_layout: "model_parallel" - input {} - } - layer { - parents: "1" - name: "1a" - data_layout: "model_parallel" - split {} - } - layer { - parents: "1" - name: "1b" - data_layout: "model_parallel" - split {} - } - - # FULLY_CONNECTED 2 - ###################### - layer { - parents: "1a" - name: "2" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 3 - ###################### - layer { - parents: "2" - name: "3" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 4 - ###################### - layer { - parents: "3" - name: "4" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 5 - ###################### - layer { - parents: "4" - name: "5" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 6 - ###################### - layer { - parents: "5" - name: "6" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 7 - ###################### - layer { - parents: "6" - name: "7" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 8 - ###################### - layer { - parents: "7" - name: "8" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 9 - ###################### - layer { - parents: "8" - name: "9" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 10 - ###################### - layer { - parents: "9" - name: "10" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 11 - ###################### - layer { - parents: "10" - name: "11" - data_layout: "model_parallel" - fully_connected { - num_neurons: 10 - weight_initialization: "glorot_uniform" - has_bias: false - } - } - - # SOFTMAX 12 - ###################### - layer { - parents: "11" - name: "12" - data_layout: "model_parallel" - softmax { - } - } - - # Evaluation - ###################### - layer { - parents: "12 1b" - name: "cross_entropy" - data_layout: "model_parallel" - cross_entropy {} - } - layer { - parents: "12 1b" - name: "accuracy" - data_layout: "model_parallel" - categorical_accuracy {} - } - -} diff --git a/model_zoo/tests/model_mnist_partitioned_io.prototext b/model_zoo/tests/model_mnist_partitioned_io.prototext deleted file mode 100644 index 0096a9e72dc..00000000000 --- a/model_zoo/tests/model_mnist_partitioned_io.prototext +++ /dev/null @@ -1,264 +0,0 @@ -model { - data_layout: "model_parallel" - mini_batch_size: 10 - block_size: 256 - num_epochs: 20 - num_parallel_readers: 0 - procs_per_trainer: 1 - - ################################################### - # Objective function - ################################################### - - objective_function { - layer_term { layer: "cross_entropy" } - l2_weight_regularization { - scale_factor: 1e-4 - } - } - - ################################################### - # Metrics - ################################################### - - metric { - layer_metric { - name: "categorical accuracy" - layer: "accuracy" - unit: "%" - } - } - - ################################################### - # Callbacks - ################################################### - callback { - print { - interval: 1 - } - } - callback { - timer { - } - } - callback { - summary { - dir: "." - batch_interval: 1 - mat_interval: 25 - } - } - # callback { - # debug { - # phase: "train" - # } - # } - callback { - adaptive_learning_rate { - patience: 4 - amt: 0.1 - } - } - callback { - imcomm { - intertrainer_comm_method: "normal" - all_optimizers: true - } - } - # callback { - # dump_mb_indices { - # basename: "debug_mnist/" - # interval: 1 - # } - # } - # callback { - # disp_io_stats { - # layers: "1" - # } - # } - # callback { - # checkpoint { - # checkpoint_dir: "test" - # checkpoint_epochs: 1 - # checkpoint_steps: 1 - # #checkpoint_secs: 7 - # } - # } - - # callback { - # dump_weights { - # basename: "debug/" - # } - # } - # callback { - # dump_gradients { - # basename: "debug/" - # } - # } - ################################################### - # start of layers - ################################################### - - - # INPUT 1 - ###################### - layer { - name: "1" - children: "1a 1b" - data_layout: "data_parallel" - input {} - } - layer { - parents: "1" - name: "1a" - data_layout: "data_parallel" - split {} - } - layer { - parents: "1" - name: "1b" - data_layout: "data_parallel" - split {} - } - - # FULLY_CONNECTED 2 - ###################### - layer { - parents: "1" - name: "2" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 3 - ###################### - layer { - parents: "2" - name: "3" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 4 - ###################### - layer { - parents: "3" - name: "4" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 5 - ###################### - layer { - parents: "4" - name: "5" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 6 - ###################### - layer { - parents: "5" - name: "6" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 7 - ###################### - layer { - parents: "6" - name: "7" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 8 - ###################### - layer { - parents: "7" - name: "8" - data_layout: "model_parallel" - fully_connected { - num_neurons: 1024 - weight_initialization: "glorot_uniform" - has_bias: true - } - } - - # RELU 9 - ###################### - layer { - parents: "8" - name: "9" - data_layout: "model_parallel" - relu { - } - } - - # DROPOUT 10 - ###################### - layer { - parents: "9" - name: "10" - data_layout: "model_parallel" - dropout { - keep_prob: -1 - } - } - - # FULLY_CONNECTED 11 - ###################### - layer { - parents: "10" - name: "11" - data_layout: "model_parallel" - fully_connected { - num_neurons: 10 - weight_initialization: "glorot_uniform" - has_bias: false - } - } - - # SOFTMAX 12 - ###################### - layer { - parents: "11" - name: "12" - data_layout: "model_parallel" - softmax { - } - } - - # Evaluation - ###################### - layer { - parents: "12 1b" - name: "cross_entropy" - data_layout: "model_parallel" - cross_entropy {} - } - layer { - parents: "12 1b" - name: "accuracy" - data_layout: "model_parallel" - categorical_accuracy {} - } - -} diff --git a/tests/test_distributed_io_mnist.sh b/tests/test_distributed_io_mnist.sh deleted file mode 100755 index 3c84976b2af..00000000000 --- a/tests/test_distributed_io_mnist.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -# You can submit this with something like: -# sbatch -Abrain -N16 --enable-hyperthreads -t 1440 --clear-ssd --msr-safe --output="slurm-lbann--test-%j.out" tests/.sh -# and can include the working directory with: -# --workdir=/g/g19/vanessen/DeepLearning/lbann.git/lbann_v0.2/examples - -#SBATCH --time=1440 - -TESTDIR=`dirname $0` -DIRNAME=`dirname $TESTDIR` - -FULLSCRIPT=. -# Figure out which cluster we are on -CLUSTER=`hostname | sed 's/\([a-zA-Z][a-zA-Z]*\)[0-9]*/\1/g'` -# Look for the binary in the cluster specific build directory -SCRIPT="build/${CLUSTER}.llnl.gov/model_zoo/lbann" - -if [ -e "${DIRNAME}/${SCRIPT}" ] ; then - FULLSCRIPT="${DIRNAME}/${SCRIPT}" -elif [ ! -z "$SLURM_SUBMIT_DIR" ] ; then - if [ -e "${SLURM_SUBMIT_DIR}/${SCRIPT}" ] ; then - FULLSCRIPT="${SLURM_SUBMIT_DIR}/${SCRIPT}" - fi -fi - -echo "Executing script $0 -> ${SLURM_JOB_NAME}" -echo "Clearing /l/ssd for batch execution" -srun -N${SLURM_NNODES} --clear-ssd hostname - -MAX_MB=300 -STD_OPTS="--model=../model_zoo/tests/model_mnist_distributed_io.prototext --reader=../model_zoo/data_readers/data_reader_mnist.prototext --optimizer=../model_zoo/optimizers/opt_adagrad.prototext" -echo "################################################################################" -for b in 300 150 100 75 60 50; do - for k in 1 2 3 4 5 6; do - CMD="srun -n$((${k}*${MAX_MB}/${b})) ${FULLSCRIPT} ${STD_OPTS} --mini_batch_size=${b} --num_epochs=5 --procs_per_model=${k}" - echo "${CMD}" - ${CMD} - echo "################################################################################" - done -done From fb1818dd87d418a40f154de9b66370fa6407bd91 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 14:14:18 -0800 Subject: [PATCH 045/443] Bugfixes for FC autoencoder with MNIST and CIFAR-10. --- .../model_autoencoder_cifar10.prototext | 12 +++++------- .../model_conv_autoencoder_cifar10.prototext | 4 ++-- .../model_autoencoder_mnist.prototext | 11 +++++------ .../model_conv_autoencoder_mnist.prototext | 2 +- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext b/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext index 4afc128da9d..c8f1e32b091 100644 --- a/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext +++ b/model_zoo/models/autoencoder_cifar10/model_autoencoder_cifar10.prototext @@ -44,19 +44,19 @@ model { layer { name: "data" children: "image dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input {} } layer { parents: "data" name: "image" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "data" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } @@ -69,7 +69,6 @@ model { data_layout: "model_parallel" fully_connected { num_neurons: 1000 - weight_initialization: "glorot_uniform" has_bias: true } } @@ -103,9 +102,8 @@ model { parents: "dropout1" name: "decode1" data_layout: "model_parallel" - num_neurons_from_data_reader: true + hint_layer: "image" fully_connected { - weight_initialization: "glorot_uniform" has_bias: true } } @@ -137,7 +135,7 @@ model { layer { parents: "dropout2 image" name: "mean_squared_error" - data_layout: "model_parallel" + data_layout: "data_parallel" mean_squared_error {} } diff --git a/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext b/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext index c716bdb9752..1107d1f2bfd 100644 --- a/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext +++ b/model_zoo/models/autoencoder_cifar10/model_conv_autoencoder_cifar10.prototext @@ -91,7 +91,7 @@ model { # RELU 1 ###### layer { - image: "conv1" + parents: "conv1" name: "relu1" data_layout: "data_parallel" relu { @@ -341,7 +341,7 @@ model { parents: "relu6" name: "decode1" data_layout: "data_parallel" - num_neurons_from_data_reader: true + hint_layer: "image" fully_connected { num_neurons: 784 has_bias: true diff --git a/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext b/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext index dc908c6cee3..81d11fbce37 100644 --- a/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext +++ b/model_zoo/models/autoencoder_mnist/model_autoencoder_mnist.prototext @@ -33,19 +33,19 @@ model { layer { name: "data" children: "image dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input {} } layer { parents: "data" name: "image" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "data" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } @@ -208,7 +208,7 @@ model { parents: "relu6" name: "decode1" data_layout: "model_parallel" - num_neurons_from_data_reader: true + hint_layer: "image" fully_connected { weight_initialization: "glorot_uniform" has_bias: true @@ -226,14 +226,13 @@ model { } } - ################# # RECONSTRUCTION ################# layer { parents: "reconstruction image" name: "mean_squared_error" - data_layout: "model_parallel" + data_layout: "data_parallel" mean_squared_error {} } diff --git a/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext b/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext index fb75fc165ec..0bd522e79a7 100644 --- a/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext +++ b/model_zoo/models/autoencoder_mnist/model_conv_autoencoder_mnist.prototext @@ -334,7 +334,7 @@ model { parents: "relu6" name: "decode1" data_layout: "data_parallel" - num_neurons_from_data_reader: true + hint_layer: "image" fully_connected { has_bias: true } From 484138f157be1782b6218b54d79cd121ae53bde3 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 14:27:05 -0800 Subject: [PATCH 046/443] Partitioned data readers require input layers in data-parallel layout. --- .../model_autoencoder_chem_ecfp.prototext | 6 +++--- ...odel_autoencoder_chem_ecfp_200x150x100x100x100.prototext | 6 +++--- .../model_autoencoder_chem_ecfp_500x250x100.prototext | 6 +++--- .../model_autoencoder_chem_sigmoid.prototext | 6 +++--- .../autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext | 6 +++--- model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext | 4 ++-- model_zoo/models/candle/pilot1/combo.prototext | 6 +++--- model_zoo/models/jag/vae_fcn.prototext | 6 +++--- 8 files changed, 23 insertions(+), 23 deletions(-) diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext index 3bb56aef475..6e40b9f3328 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp.prototext @@ -51,7 +51,7 @@ model { layer { name: "input" children: "data dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "reconstruction" } @@ -59,13 +59,13 @@ model { layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext index b82cc9e95ef..c25231de172 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_200x150x100x100x100.prototext @@ -51,7 +51,7 @@ model { layer { name: "input" children: "data dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "reconstruction" } @@ -59,13 +59,13 @@ model { layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext index 5bbede7797f..576d5a3c402 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_ecfp_500x250x100.prototext @@ -51,19 +51,19 @@ model { layer { name: "input" children: "data dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input {} } layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext index 222f638a62a..374ed07ec4c 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_autoencoder_chem_sigmoid.prototext @@ -47,7 +47,7 @@ model { layer { name: "input" children: "data dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "reconstruction" } @@ -55,13 +55,13 @@ model { layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } diff --git a/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext b/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext index e302f1f2b39..47e375a2e25 100644 --- a/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext +++ b/model_zoo/models/autoencoder_candle_pilot1/model_dnn_chem_ecfp.prototext @@ -55,19 +55,19 @@ model { layer { name: "data" children: "finetunedata label" - data_layout: "model_parallel" + data_layout: "data_parallel" input {} } layer { parents: "data" name: "finetunedata" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "data" name: "label" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } diff --git a/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext b/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext index 5de896c9931..93509871d2a 100644 --- a/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext +++ b/model_zoo/models/candle/pilot1/ae_nodeselect_gdc.prototext @@ -48,7 +48,7 @@ model { layer { name: "data" #children: "encode1 recon_data" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "N/A" } @@ -57,7 +57,7 @@ model { layer { name: "recon_data" parents: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" identity { } } diff --git a/model_zoo/models/candle/pilot1/combo.prototext b/model_zoo/models/candle/pilot1/combo.prototext index 91324eb0aa6..0b5a5ac5535 100644 --- a/model_zoo/models/candle/pilot1/combo.prototext +++ b/model_zoo/models/candle/pilot1/combo.prototext @@ -51,7 +51,7 @@ model { layer { name: "input" children: "data response" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "regression" } @@ -59,13 +59,13 @@ model { layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "response" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } diff --git a/model_zoo/models/jag/vae_fcn.prototext b/model_zoo/models/jag/vae_fcn.prototext index abaef603c1b..8f2528984df 100644 --- a/model_zoo/models/jag/vae_fcn.prototext +++ b/model_zoo/models/jag/vae_fcn.prototext @@ -66,7 +66,7 @@ model { layer { name: "input" children: "data dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" input { target_mode: "reconstruction" } @@ -74,13 +74,13 @@ model { layer { parents: "input" name: "data" - data_layout: "model_parallel" + data_layout: "data_parallel" split {} } layer { parents: "input" name: "dummy" - data_layout: "model_parallel" + data_layout: "data_parallel" dummy {} } From 8d59255ebe5604ef9824ee20517536627ae5b3f0 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 13 Feb 2019 14:39:27 -0800 Subject: [PATCH 047/443] Replacing deleted model in Singularity README. --- containers/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/README.md b/containers/README.md index e405a25f0f4..c0bceafbb54 100644 --- a/containers/README.md +++ b/containers/README.md @@ -17,7 +17,7 @@ Singularity is designed to take advantage of underlying HPC resources. The lbann To run LBANN use mpirun and singularity's execute command: ``` salloc -N2 - mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity_optimizied_test/model_zoo/lbann mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity/model_zoo/lbann --model=/lbann/model_zoo/tests/model_mnist_partitioned_io.prototext --reader=/lbann/model_zoo/data_readers/data_reader_mnist.prototext --optimizer=/lbann/ model_zoo/optimizers/opt_adagrad.prototext +mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity_optimizied_test/model_zoo/lbann mpirun -np 4 singularity exec -B /p:/p lbann.img /lbann/spack_builds/singularity/model_zoo/lbann --model=/lbann/model_zoo/models/lenet_mnist/model_lenet_mnist.prototext --reader=/lbann/model_zoo/data_readers/data_reader_mnist.prototext --optimizer=/lbann/ model_zoo/optimizers/opt_adagrad.prototext ``` *Note: The -B singularity command, binds directories from the surrounding filesystem to the container. Be sure to include any necessary files using this command (i.e model prototext files, datasets, etc). Alternatively, system admins are capable of allowing a singularity container to utilize the host's filesystem. This is done by changing MOUNT HOSTFS in the singularity config file.* From c0854ab50fcd0fd9ec70724df026906848e12e3a Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Wed, 13 Feb 2019 15:12:34 -0800 Subject: [PATCH 048/443] anged native mpu calls to use our comm class --- src/data_store/data_store_jag.cpp | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index a1ecf41ebc9..3a8828f600e 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -138,17 +138,23 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //======================================================================== //part 1.5: exchange super_node sizes + // these can eventually be in data_store_jag.hpp + std::vector> send_requests(m_np); + std::vector> recv_requests(m_np); + for (int p=0; p(&m_outgoing_msg_sizes[p]); + m_comm->nb_send(s, sizeof(int), m_comm->get_trainer_rank(), p, send_requests[p]); } for (int p=0; p(&m_incoming_msg_sizes[p]); + m_comm->nb_recv(s, sizeof(int), m_comm->get_trainer_rank(), p, recv_requests[p]); + //MPI_Irecv((void*)&m_incoming_msg_sizes[p], 1, MPI_INT, p, 0, MPI_COMM_WORLD, &m_recv_requests[p]); - MPI_Waitall(m_np, m_send_requests.data(), m_status.data()); - MPI_Waitall(m_np, m_recv_requests.data(), m_status.data()); + m_comm->wait_all(send_requests); + m_comm->wait_all(recv_requests); //======================================================================== //part 2: exchange the actual data @@ -157,19 +163,20 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s // start sends for outgoing data for (int p=0; p(m_send_buffer_2[p].data_ptr()); + m_comm->nb_send(s, m_outgoing_msg_sizes[p], m_comm->get_trainer_rank(), p, send_requests[p]); } // start recvs for incoming data + std::vector> recv_requests_2(m_np); for (int p=0; pnb_recv((unsigned char*)m_recv_buffer[p].data_ptr(), m_incoming_msg_sizes[p], m_comm->get_trainer_rank(), p, recv_requests_2[p]); } // wait for all msgs to complete - MPI_Waitall(m_np, m_recv_requests.data(), m_status.data()); - MPI_Waitall(m_np, m_send_requests.data(), m_status.data()); + m_comm->wait_all(send_requests); + m_comm->wait_all(recv_requests); debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; //======================================================================== From 4312008ef088cb625ac9af5a784d93b2e20da12a Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Wed, 13 Feb 2019 15:31:40 -0800 Subject: [PATCH 049/443] add batch interval option to callback during evaluation. Useful for large-scale validation/testing. Dont expect this to break since default is maintained if user did not provide batch interval --- src/models/model.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/models/model.cpp b/src/models/model.cpp index 09774714e7b..e92ca4f2e11 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -1242,7 +1242,9 @@ void model::do_batch_begin_cbs(execution_mode mode) { break; case execution_mode::validation: case execution_mode::testing: - cb->on_batch_evaluate_begin(this); + if (get_cur_step() % cb->get_batch_interval() == 0) { + cb->on_batch_evaluate_begin(this); + } break; default: std::stringstream err; From 3a2da0da091e88d5d82ba455d4c45e75cddcd152 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 13 Feb 2019 15:42:28 -0800 Subject: [PATCH 050/443] Updated the per-sample data exchange to use the LBANN comm object. Changed message data types to El::byte from unsigned char. --- include/lbann/data_store/data_store_jag.hpp | 5 +- src/data_store/data_store_jag.cpp | 61 +++++++-------------- 2 files changed, 21 insertions(+), 45 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index aabccb0feb9..0ea4a76bef3 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -97,9 +97,8 @@ protected : /// work space; used in exchange_data std::vector m_send_buffer; std::vector m_send_buffer_2; - std::vector m_send_requests; - std::vector m_recv_requests; - std::vector m_status; + std::vector> m_send_requests; + std::vector> m_recv_requests; std::vector m_recv_buffer; std::vector m_outgoing_msg_sizes; std::vector m_incoming_msg_sizes; diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 261be4c473e..06858fe1079 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -102,7 +102,6 @@ void data_store_jag::setup_data_store_buffers() { m_send_buffer_2.resize(m_np); m_send_requests.resize(m_np); m_recv_requests.resize(m_np); - m_status.resize(m_np); m_outgoing_msg_sizes.resize(m_np); m_incoming_msg_sizes.resize(m_np); m_recv_buffer.resize(m_np); @@ -139,23 +138,18 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //======================================================================== //part 1.5: exchange super_node sizes - // these can eventually be in data_store_jag.hpp - std::vector> send_requests(m_np); - std::vector> recv_requests(m_np); - for (int p=0; p(&m_outgoing_msg_sizes[p]); - m_comm->nb_send(s, sizeof(int), m_comm->get_trainer_rank(), p, send_requests[p]); + El::byte *s = reinterpret_cast(&m_outgoing_msg_sizes[p]); + m_comm->nb_send(s, sizeof(int), m_comm->get_trainer_rank(), p, m_send_requests[p]); } for (int p=0; p(&m_incoming_msg_sizes[p]); - m_comm->nb_recv(s, sizeof(int), m_comm->get_trainer_rank(), p, recv_requests[p]); - //MPI_Irecv((void*)&m_incoming_msg_sizes[p], 1, MPI_INT, p, 0, MPI_COMM_WORLD, &m_recv_requests[p]); - - m_comm->wait_all(send_requests); - m_comm->wait_all(recv_requests); + El::byte *s = reinterpret_cast(&m_incoming_msg_sizes[p]); + m_comm->nb_recv(s, sizeof(int), m_comm->get_trainer_rank(), p, m_recv_requests[p]); + } + m_comm->wait_all(m_send_requests); + m_comm->wait_all(m_recv_requests); //======================================================================== //part 2: exchange the actual data @@ -164,20 +158,20 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s // start sends for outgoing data for (int p=0; p(m_send_buffer_2[p].data_ptr()); - m_comm->nb_send(s, m_outgoing_msg_sizes[p], m_comm->get_trainer_rank(), p, send_requests[p]); + const El::byte *s = reinterpret_cast(m_send_buffer_2[p].data_ptr()); + m_comm->nb_send(s, m_outgoing_msg_sizes[p], m_comm->get_trainer_rank(), p, m_send_requests[p]); } // start recvs for incoming data - std::vector> recv_requests_2(m_np); + std::vector> recv_requests_2(m_np); for (int p=0; pnb_recv((unsigned char*)m_recv_buffer[p].data_ptr(), m_incoming_msg_sizes[p], m_comm->get_trainer_rank(), p, recv_requests_2[p]); + m_comm->nb_recv((El::byte*)m_recv_buffer[p].data_ptr(), m_incoming_msg_sizes[p], m_comm->get_trainer_rank(), p, recv_requests_2[p]); } // wait for all msgs to complete - m_comm->wait_all(send_requests); - m_comm->wait_all(recv_requests); + m_comm->wait_all(m_send_requests); + m_comm->wait_all(m_recv_requests); debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; //======================================================================== @@ -269,14 +263,6 @@ const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { // conduit/src/libs/relay/conduit_relay_mpi.cpp void data_store_jag::build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out) { -#if 0 -if (m_master) { - std::cout << " \"======================================\n"; - node_in.print(); -} - - MPI_Barrier(MPI_COMM_WORLD); -#endif /* size_t i = node_in.number_of_children(); debug << "num children: " << i << "\n"; @@ -338,12 +324,10 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) int num_send_req = build_indices_i_will_send(current_pos, mb_size); int num_recv_req = build_indices_i_will_recv(current_pos, mb_size); - //m_send_buffer.resize(sz); m_send_requests.resize(num_send_req); m_recv_requests.resize(num_recv_req); m_recv_buffer.resize(num_recv_req); m_recv_data_ids.resize(num_recv_req); - m_status.resize(std::max(num_send_req, num_recv_req)); //======================================================================== //part 2: exchange the actual data @@ -354,13 +338,12 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) size_t ss = 0; for (int p=0; p &indices = m_indices_to_send[p]; - // std::cout << "I am going to be sending data for p " << p << std::endl; for (auto index : indices) { if (m_data.find(index) == m_data.end()) { LBANN_ERROR("failed to find data_id: " + std::to_string(index) + " to be sent to " + std::to_string(p) + " in m_data"); } const conduit::Node& n = m_data[index]; - const void *s = n.data_ptr(); + const El::byte *s = reinterpret_cast(n.data_ptr()); if(!n.is_contiguous()) { LBANN_ERROR("data_id: " + std::to_string(index) + " does not have a contiguous layout"); } @@ -370,8 +353,7 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) if(n.contiguous_data_ptr() == nullptr) { LBANN_ERROR("data_id: " + std::to_string(index) + " does not have a valid contiguous data pointer"); } - // MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, comm->get_world_comm(), &m_send_requests[ss++]); - MPI_Isend(s, m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_send_requests.at(ss++)); + m_comm->nb_tagged_send(s, m_compacted_sample_size, p, index, m_send_requests[ss++], m_comm->get_trainer_comm()); } } @@ -380,11 +362,6 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) LBANN_ERROR("ss != m_send_requests.size; ss: " + std::to_string(ss) + " m_send_requests.size: " + std::to_string(m_send_requests.size())); } - // MPI_Barrier(MPI_COMM_WORLD); - // if (m_master) std::cerr << "\nSENDS STARTED\n\n"; - // debug << "\nSENDS STARTED\n\n"; - // MPI_Barrier(MPI_COMM_WORLD); - // start recvs for incoming data ss = 0; @@ -393,7 +370,8 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) // debug << "starting " << indices.size() << " recvs from " << p << "\n"; for (auto index : indices) { m_recv_buffer[ss].set(conduit::DataType::uint8(m_compacted_sample_size)); - MPI_Irecv(m_recv_buffer[ss].data_ptr(), m_compacted_sample_size, MPI_BYTE, p, index, MPI_COMM_WORLD, &m_recv_requests[ss]); + El::byte *r = reinterpret_cast(m_recv_buffer[ss].data_ptr()); + m_comm->nb_tagged_recv(r, m_compacted_sample_size, p, index, m_recv_requests[ss], m_comm->get_trainer_comm()); m_recv_data_ids[ss] = index; ++ss; } @@ -409,9 +387,8 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) } // wait for all msgs to complete - MPI_Waitall(m_send_requests.size(), m_send_requests.data(), m_status.data()); - m_status.clear(); - MPI_Waitall(m_recv_requests.size(), m_recv_requests.data(), m_status.data()); + m_comm->wait_all(m_send_requests); + m_comm->wait_all(m_recv_requests); // debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; // debug.close(); From 151a5eb3697d36888e2ace196029b58161900fc4 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 13 Feb 2019 15:56:12 -0800 Subject: [PATCH 051/443] Fixed bug where a set of receive requests were not being waited on. --- src/data_store/data_store_jag.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 06858fe1079..536785cc711 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -163,10 +163,9 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s } // start recvs for incoming data - std::vector> recv_requests_2(m_np); for (int p=0; pnb_recv((El::byte*)m_recv_buffer[p].data_ptr(), m_incoming_msg_sizes[p], m_comm->get_trainer_rank(), p, recv_requests_2[p]); + m_comm->nb_recv((El::byte*)m_recv_buffer[p].data_ptr(), m_incoming_msg_sizes[p], m_comm->get_trainer_rank(), p, m_recv_requests[p]); } // wait for all msgs to complete From fc0a3c01f84d0c3003c9bf622f7bf5138619f8e2 Mon Sep 17 00:00:00 2001 From: Naoya Maruyama Date: Wed, 13 Feb 2019 17:24:57 -0800 Subject: [PATCH 052/443] Update the CUB version to 1.8.0 --- superbuild/cub/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/superbuild/cub/CMakeLists.txt b/superbuild/cub/CMakeLists.txt index a503314ffe5..9391aa726c1 100644 --- a/superbuild/cub/CMakeLists.txt +++ b/superbuild/cub/CMakeLists.txt @@ -9,7 +9,7 @@ else () CACHE STRING "The URL from which to clone CUB.") endif () -set(CUB_TAG "1.5.2" CACHE STRING "The git tag or hash to checkout for CUB") +set(CUB_TAG "1.8.0" CACHE STRING "The git tag or hash to checkout for CUB") # Where to install CUB set(CUB_CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}" From 04e5e4877ca2b7d592155a38abb0b247c8a0207e Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 13 Feb 2019 17:39:27 -0800 Subject: [PATCH 053/443] Commenting out debugging code and removed dead code. --- src/data_store/data_store_jag.cpp | 32 +++++++------------------------ 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 536785cc711..81e64bc854e 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -115,12 +115,12 @@ void data_store_jag::setup_data_store_buffers() { // handle things ourselves. TODO: possibly modify conduit to // handle non-blocking comms void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_size) { - double tm1 = get_time(); + // double tm1 = get_time(); //======================================================================== //part 1: construct the super_nodes - double tma = get_time(); + // double tma = get_time(); build_indices_i_will_send(current_pos, mb_size); build_indices_i_will_recv(current_pos, mb_size); @@ -154,7 +154,7 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //======================================================================== //part 2: exchange the actual data - tma = get_time(); + // tma = get_time(); // start sends for outgoing data for (int p=0; pwait_all(m_send_requests); m_comm->wait_all(m_recv_requests); - debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; + // debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; //======================================================================== //part 3: construct the Nodes needed by me for the current minibatch - double tmw = get_time(); + // double tmw = get_time(); m_minibatch_data.clear(); for (int p=0; p> proc_to_indices(m_np); - size_t j = 0; - for (size_t i = 0; i < m_shuffled_indices->size(); i++) { - auto index = (*m_shuffled_indices)[i]; - m_ds_indices[j].insert(index); - m_owner[index] = j; - j = (j + 1) % m_np; - } -} -#endif - void data_store_jag::build_owner_map() { m_owner.clear(); size_t j = 0; From 9d9a9618e1ec03938865468a5788f90fc565f72e Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 13 Feb 2019 17:53:52 -0800 Subject: [PATCH 054/443] Removed debugging code. --- src/data_store/data_store_jag.cpp | 63 +------------------------------ 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 81e64bc854e..06101ea511d 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -38,9 +38,6 @@ namespace lbann { -std::ofstream debug; -char b[1024]; - data_store_jag::data_store_jag( generic_data_reader *reader, model *m) : generic_data_store(reader, m), @@ -50,9 +47,7 @@ data_store_jag::data_store_jag( set_name("data_store_jag"); } -data_store_jag::~data_store_jag() { - debug.close(); -} +data_store_jag::~data_store_jag() {} void data_store_jag::setup() { double tm1 = get_time(); @@ -79,9 +74,6 @@ void data_store_jag::setup() { } } - sprintf(b, "debug.%d", m_rank); - debug.open(b); - if (m_master) { std::cout << "num shuffled_indices: " << m_shuffled_indices->size() << "\n"; } @@ -172,7 +164,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s m_comm->wait_all(m_send_requests); m_comm->wait_all(m_recv_requests); - // debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; //======================================================================== //part 3: construct the Nodes needed by me for the current minibatch @@ -198,10 +189,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s m_minibatch_data[atoi(t.c_str())][t].update_external(m_reconstituted[p][t]); } } - - // debug << "TOTAL Time to unpack and break up all incoming data: " << get_time() - tmw << "\n"; - - // debug << "TOTAL exchange_data Time: " << get_time() - tm1 << "\n"; } void data_store_jag::set_conduit_node(int data_id, conduit::Node &node) { @@ -261,16 +248,6 @@ const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { // code in the following method is a modification of code from // conduit/src/libs/relay/conduit_relay_mpi.cpp void data_store_jag::build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out) { - -/* -size_t i = node_in.number_of_children(); -debug << "num children: " << i << "\n"; -conduit::NodeConstIterator t = node_in.children(); -while (t.has_next()) { - debug << ">"<< t.name() <<"<\n"; -} -*/ - node_out.reset(); conduit::Schema s_data_compact; if( node_in.is_compact() && node_in.is_contiguous()) { @@ -306,20 +283,6 @@ while (t.has_next()) { void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) { - // double tm1 = get_time(); - - // debug.open(b, std::ios::app); - // debug << "\n============================================================\n" - // <<"starting exchange_data_by_sample; epoch: "<get_cur_epoch()<< " data size: "<get_cur_epoch()<< " data size: "< &indices = m_indices_to_recv[p]; -// debug << "starting " << indices.size() << " recvs from " << p << "\n"; for (auto index : indices) { m_recv_buffer[ss].set(conduit::DataType::uint8(m_compacted_sample_size)); El::byte *r = reinterpret_cast(m_recv_buffer[ss].data_ptr()); @@ -376,7 +336,6 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) } } - // if(m_master) std::cout << "\nRECV COMPLETE\n\n"; // sanity checks if (ss != m_recv_buffer.size()) { LBANN_ERROR("ss != m_recv_buffer.size; ss: " + std::to_string(ss) + " m_recv_buffer.size: " + std::to_string(m_recv_buffer.size())); @@ -389,17 +348,9 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) m_comm->wait_all(m_send_requests); m_comm->wait_all(m_recv_requests); -// debug << "TOTAL Time to exchange the actual data: " << get_time() - tma << "\n"; -// debug.close(); -// debug.open(b, std::ios::app); - -// tma = get_time(); - //======================================================================== //part 3: construct the Nodes needed by me for the current minibatch -// double tmw = get_time(); - conduit::Node nd; m_minibatch_data.clear(); for (size_t j=0; j < m_recv_buffer.size(); j++) { @@ -415,20 +366,8 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) n_msg["data"].set_external(rcv_schema,n_buff_ptr); int data_id = m_recv_data_ids[j]; - // m_minibatch_data[data_id].set(n_msg["data"]); m_minibatch_data[data_id].set_external(n_msg["data"]); } -// for (auto &t : m_minibatch_data) { -// debug << t.first << " "; -// } -// debug << "\n"; - -// debug << "TOTAL Time to unpack incoming data: " << get_time() - tmw << "\n"; - -// if (m_master) std::cout << "data_store_jag::exchange_data Time: " << get_time() - tm1 << "\n"; - -// debug << "TOTAL exchange_data Time: " << get_time() - tm1 << "\n"; -// debug.close(); debug.open(b, std::ios::app); } int data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { From e29f0a4c2d19ce797ff7f7daa2f2966d0f6494ec Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Wed, 13 Feb 2019 19:22:12 -0800 Subject: [PATCH 055/443] a bit of cleanup and tweaking prior to merging with develop --- include/lbann/data_store/data_store_jag.hpp | 9 ++++---- .../lbann/data_store/generic_data_store.hpp | 3 +-- src/data_store/data_store_jag.cpp | 22 +++++++++---------- src/data_store/generic_data_store.cpp | 10 --------- 4 files changed, 16 insertions(+), 28 deletions(-) diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index 0ea4a76bef3..be1f98ee97b 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -78,6 +78,7 @@ protected : } else { exchange_data_by_sample(current_pos, mb_size); } + ++m_n; } void exchange_data_by_super_node(size_t current_pos, size_t mb_size); void exchange_data_by_sample(size_t current_pos, size_t mb_size); @@ -111,11 +112,11 @@ protected : /// size of a compacted conduit::Node that contains a single sample int m_compacted_sample_size; - /// assignes values to m_super_node_overhead and m_compacted_sample_size + /// assigns values to m_super_node_overhead and m_compacted_sample_size void compute_super_node_overhead(); - /// ??? as our code currently stands (sun, 10 feb) this isn't necessary - /// -- but it's being used. @TODO: revisit + /// used in exchange_data_by_super_node(); contains the super_nodes, + /// after they have been converted from compacted format std::vector m_reconstituted; void setup_data_store_buffers(); @@ -123,7 +124,7 @@ protected : /// called by exchange_data void build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out); - /// fills in mowner, which maps index -> owning processor + /// fills in m_owner, which maps index -> owning processor void build_owner_map(); /// maps processor id -> set of indices (whose associated samples) diff --git a/include/lbann/data_store/generic_data_store.hpp b/include/lbann/data_store/generic_data_store.hpp index a26b5004861..920bcf81f95 100644 --- a/include/lbann/data_store/generic_data_store.hpp +++ b/include/lbann/data_store/generic_data_store.hpp @@ -141,8 +141,7 @@ class generic_data_store { virtual void setup_data_store_buffers() {}; protected : - // number of times set_shuffled_indices was called. This is - // a hack to get data_store_jag working correctly + // number of times exchange_data is called int m_n; virtual void exchange_data() = 0; diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 06101ea511d..62fc362f8b1 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -100,20 +100,19 @@ void data_store_jag::setup_data_store_buffers() { m_reconstituted.resize(m_np); } -// this gets called at the beginning of each epoch (except for epoch 0) -// // Note: conduit has a very nice interface for communicating nodes // in blocking scenarios. Unf, for non-blocking we need to // handle things ourselves. TODO: possibly modify conduit to // handle non-blocking comms void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_size) { - // double tm1 = get_time(); + + if (m_n == 0) { + setup_data_store_buffers(); + } //======================================================================== //part 1: construct the super_nodes - // double tma = get_time(); - build_indices_i_will_send(current_pos, mb_size); build_indices_i_will_recv(current_pos, mb_size); @@ -146,8 +145,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //======================================================================== //part 2: exchange the actual data - // tma = get_time(); - // start sends for outgoing data for (int p=0; p(m_send_buffer_2[p].data_ptr()); @@ -167,8 +164,6 @@ void data_store_jag::exchange_data_by_super_node(size_t current_pos, size_t mb_s //======================================================================== //part 3: construct the Nodes needed by me for the current minibatch - // double tmw = get_time(); - m_minibatch_data.clear(); for (int p=0; p &names = m_reconstituted[p].child_names(); @@ -232,7 +232,7 @@ const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { if (t != m_data.end()) { if(m_super_node) { return t->second; - }else { + } else { return t->second["data"]; } } @@ -281,7 +281,6 @@ void data_store_jag::build_node_for_sending(const conduit::Node &node_in, condui } } - void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) { int num_send_req = build_indices_i_will_send(current_pos, mb_size); int num_recv_req = build_indices_i_will_recv(current_pos, mb_size); @@ -322,7 +321,6 @@ void data_store_jag::exchange_data_by_sample(size_t current_pos, size_t mb_size) LBANN_ERROR("ss != m_send_requests.size; ss: " + std::to_string(ss) + " m_send_requests.size: " + std::to_string(m_send_requests.size())); } - // start recvs for incoming data ss = 0; for (int p=0; p *indices, bool exchange_indices) { if (m_master)std::cerr<<"starting set_shuffled_indices; epoch: "<get_cur_epoch()<<" role: " << m_reader->get_role()<<"; n: " << m_n << "\n"; m_shuffled_indices = indices; -// if (m_model->get_cur_epoch() > 0 && exchange_indices && m_in_memory) { - // if (m_n > 0) { - // exchange_data(); - // } - - if(m_n > 0) { - setup_data_store_buffers(); - } - - ++m_n; } void generic_data_store::exchange_mb_counts() { From ab0031e071a0a8071c88334dd80e6c666c3cc034 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 14 Feb 2019 01:19:14 -0800 Subject: [PATCH 056/443] fix mismatched tag in Superbuild --- superbuild/lbann/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/superbuild/lbann/CMakeLists.txt b/superbuild/lbann/CMakeLists.txt index 437691bec8f..fd1310769bd 100644 --- a/superbuild/lbann/CMakeLists.txt +++ b/superbuild/lbann/CMakeLists.txt @@ -62,7 +62,7 @@ if (TARGET ALUMINUM) list(APPEND _LBANN_DEPENDS ALUMINUM) endif (LBANN_WITH_ALUMINUM) -endif (TARGET Aluminum) +endif (TARGET ALUMINUM) if (TARGET CEREAL) list(APPEND _LBANN_DEPENDS CEREAL) set(LBANN_SB_FWD_LBANN_CEREAL_DIR "${CEREAL_DIR}") From 1ce81ac625ca1f079c6e80d98ca1bb0ad4087b19 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 14 Feb 2019 01:33:00 -0800 Subject: [PATCH 057/443] Add missing STATUS to message for information on protobuf so it gets directed to stdout instead of stderr --- cmake/modules/SetupProtobuf.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/modules/SetupProtobuf.cmake b/cmake/modules/SetupProtobuf.cmake index 51fac7f9f33..cfe37b87b30 100644 --- a/cmake/modules/SetupProtobuf.cmake +++ b/cmake/modules/SetupProtobuf.cmake @@ -41,7 +41,7 @@ else () if(NOT Protobuf_FOUND) find_package(Protobuf "${PROTOBUF_MIN_VERSION}" CONFIG QUIET REQUIRED) endif () - message("Found Protobuf: ${Protobuf_DIR}") + message(STATUS "Found Protobuf: ${Protobuf_DIR}") endif () if (NOT Protobuf_FOUND) From 951ac319b6283ae62d080bfbb1943a02244ccafa Mon Sep 17 00:00:00 2001 From: Naoya Maruyama Date: Thu, 14 Feb 2019 21:04:47 -0800 Subject: [PATCH 058/443] Make Cub always log certain performance-critical events. Once stabilized, it is generally expected that: - Memory request does not actually acquire a new memory chunk. - Memory release just returns the pointer to the memory pool and never calls cudaFree. These events can be a significant performance penalty. Cub will log messages when they happen only if its debug option is enabled, but the options also enables other logging, yielding extremely verbose output. This patch only enables logging at the above events no matter the debug option is enabled. --- .../cub/cub_enable_alloc_free_logging.patch | 22 +++++++++++++++++++ superbuild/cub/CMakeLists.txt | 3 ++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 external/cub/cub_enable_alloc_free_logging.patch diff --git a/external/cub/cub_enable_alloc_free_logging.patch b/external/cub/cub_enable_alloc_free_logging.patch new file mode 100644 index 00000000000..505fbff19e5 --- /dev/null +++ b/external/cub/cub_enable_alloc_free_logging.patch @@ -0,0 +1,22 @@ +diff --git a/cub/util_allocator.cuh b/cub/util_allocator.cuh +index 0e6dd048..f41f2e64 100644 +--- a/cub/util_allocator.cuh ++++ b/cub/util_allocator.cuh +@@ -446,7 +446,7 @@ struct CachingDeviceAllocator + if (CubDebug(error = cudaMalloc(&search_key.d_ptr, search_key.bytes)) == cudaErrorMemoryAllocation) + { + // The allocation attempt failed: free all cached blocks on device and retry +- if (debug) _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations", ++ _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations", + device, (long long) search_key.bytes, (long long) search_key.associated_stream); + + error = cudaSuccess; // Reset the error we will return +@@ -606,7 +606,7 @@ struct CachingDeviceAllocator + if (CubDebug(error = cudaFree(d_ptr))) return error; + if (CubDebug(error = cudaEventDestroy(search_key.ready_event))) return error; + +- if (debug) _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", ++ _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", + device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); + } + diff --git a/superbuild/cub/CMakeLists.txt b/superbuild/cub/CMakeLists.txt index 9391aa726c1..84af8a4ec71 100644 --- a/superbuild/cub/CMakeLists.txt +++ b/superbuild/cub/CMakeLists.txt @@ -29,7 +29,8 @@ ExternalProject_Add(CUB PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/CUBCMakeLists.txt - ${CMAKE_CURRENT_BINARY_DIR}/src/CMakeLists.txt + ${CMAKE_CURRENT_BINARY_DIR}/src/CMakeLists.txt && + patch -p1 < ${LBANN_SRC_DIR}/external/cub/cub_enable_alloc_free_logging.patch INSTALL_DIR ${CUB_CMAKE_INSTALL_PREFIX} USES_TERMINAL_BUILD 1 LOG_DOWNLOAD 1 From 31f23c649b4990cdaf19f7db6253da1b693958d5 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Fri, 15 Feb 2019 13:31:44 -0800 Subject: [PATCH 059/443] update comm to be managed by smart pointer --- include/lbann/base.hpp | 5 ++++- src/base.cpp | 5 +++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/include/lbann/base.hpp b/include/lbann/base.hpp index 895a963334d..18d66e2c1ab 100644 --- a/include/lbann/base.hpp +++ b/include/lbann/base.hpp @@ -42,6 +42,8 @@ namespace lbann { // Forward-declaration. class lbann_comm; +using lbann_comm_ptr = std::unique_ptr; + /** Create LBANN communicator. * * Initializes Elemental, which in turn initializes MPI, Aluminum, @@ -54,7 +56,8 @@ class lbann_comm; * @param seed RNG seed. * @return LBANN communicator. */ -lbann_comm* initialize(int& argc, char**& argv, int seed = -1); +lbann_comm_ptr initialize(int& argc, char**& argv, int seed = -1); + /** Destroy LBANN communicator. * * Finalizes Elemental, which in turn finalizes MPI, Aluminum, and diff --git a/src/base.cpp b/src/base.cpp index 60560f06d64..d528036d02f 100644 --- a/src/base.cpp +++ b/src/base.cpp @@ -47,12 +47,13 @@ namespace lbann { -lbann_comm* initialize(int& argc, char**& argv, int seed) { +lbann_comm_ptr initialize(int& argc, char**& argv, int seed) { // Initialize Elemental. El::Initialize(argc, argv); // Create a new comm object. // Initial creation with every process in one model. - auto* comm = new lbann_comm(0); + auto comm = lbann_comm_ptr{new lbann_comm(0), &lbann::finalize }; + #if defined(LBANN_TOPO_AWARE) // Determine the number of NUMA nodes present. hwloc_topology_t topo; From d7505142dfd3731ed4a0c04976336f9eebdcb9fe Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Fri, 15 Feb 2019 13:32:14 -0800 Subject: [PATCH 060/443] update lbann_library functions to return smart pointers --- include/lbann/utils/lbann_library.hpp | 21 +- src/utils/lbann_library.cpp | 290 +++++++++++++------------- 2 files changed, 155 insertions(+), 156 deletions(-) diff --git a/include/lbann/utils/lbann_library.hpp b/include/lbann/utils/lbann_library.hpp index ffcd7c5040b..3f01b4ef82e 100644 --- a/include/lbann/utils/lbann_library.hpp +++ b/include/lbann/utils/lbann_library.hpp @@ -34,15 +34,18 @@ namespace lbann { const int lbann_default_random_seed = 42; -std::shared_ptr construct_io_thread_pool(lbann_comm *comm); - -model *build_model_from_prototext(int argc, char **argv, - lbann_data::LbannPB &pb, - lbann_comm *comm, - std::shared_ptr io_thread_pool, - bool first_model); - -void print_lbann_configuration(lbann_data::Model *pb_model, lbann_comm *comm, int io_threads_per_process, int io_threads_offset); +std::unique_ptr construct_io_thread_pool(lbann_comm *comm); + +std::unique_ptr build_model_from_prototext( + int argc, char **argv, + lbann_data::LbannPB &pb, + lbann_comm *comm, + std::shared_ptr io_thread_pool, + bool first_model); + +void print_lbann_configuration( + lbann_data::Model *pb_model, lbann_comm *comm, + int io_threads_per_process, int io_threads_offset); } // namespace lbann diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index fe7d2f5bfc1..9f74500706c 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -30,7 +30,7 @@ namespace lbann { /// Setup I/O thread pool that is shared across all models -std::shared_ptr construct_io_thread_pool(lbann_comm *comm) { +std::unique_ptr construct_io_thread_pool(lbann_comm *comm) { int num_io_threads = num_free_cores_per_process(comm); options *opts = options::get(); @@ -48,193 +48,189 @@ std::shared_ptr construct_io_thread_pool(lbann_comm *comm) { " (Limited to # Unused Compute Cores or 1)" << std::endl; } - std::shared_ptr io_thread_pool = std::make_shared(); + auto io_thread_pool = make_unique(); io_thread_pool->launch_pinned_threads(num_io_threads, io_threads_offset); return io_thread_pool; } -model *build_model_from_prototext(int argc, char **argv, - lbann_data::LbannPB &pb, - lbann_comm *comm, - std::shared_ptr io_thread_pool, - bool first_model) { +std::unique_ptr build_model_from_prototext( + int argc, char **argv, + lbann_data::LbannPB &pb, + lbann_comm *comm, + std::shared_ptr io_thread_pool, + bool first_model) { + int random_seed = lbann_default_random_seed; bool master = comm->am_world_master(); - if (master) std::cerr << "starting build_model_from_prototext\n"; - model *model = nullptr; //d hysom bad namimg! should fix - try { - std::stringstream err; - options *opts = options::get(); + if (master) { + std::cerr << "starting build_model_from_prototext" << std::endl; + } + + std::stringstream err; + options *opts = options::get(); - // Optionally over-ride some values in prototext - get_cmdline_overrides(comm, pb); + // Optionally over-ride some values in prototext + get_cmdline_overrides(comm, pb); - customize_data_readers_index_list(comm, pb); + customize_data_readers_index_list(comm, pb); - lbann_data::Model *pb_model = pb.mutable_model(); + lbann_data::Model *pb_model = pb.mutable_model(); - // Adjust the number of parallel readers; this may be adjusted - // after calling split_trainers() - set_num_parallel_readers(comm, pb); + // Adjust the number of parallel readers; this may be adjusted + // after calling split_trainers() + set_num_parallel_readers(comm, pb); - // Check to see if the model wants to reduce the I/O parallelism - if(pb_model->serialize_background_io() && io_thread_pool->get_num_threads() != 1) { - if(master) { - std::cout << "Model " << pb_model->name() << " serialized the background I/O threads" << std::endl; - } - io_thread_pool->relaunch_pinned_threads(1); + // Check to see if the model wants to reduce the I/O parallelism + if(pb_model->serialize_background_io() && io_thread_pool->get_num_threads() != 1) { + if(master) { + std::cout << "Model " << pb_model->name() << " serialized the background I/O threads" << std::endl; } + io_thread_pool->relaunch_pinned_threads(1); + } - // Setup I/O threads - auto io_threads_per_process = io_thread_pool->get_num_threads(); - auto io_threads_offset = io_thread_pool->get_threads_offset(); + // Setup I/O threads + auto io_threads_per_process = io_thread_pool->get_num_threads(); + auto io_threads_offset = io_thread_pool->get_threads_offset(); - // Set algorithmic blocksize - if (pb_model->block_size() == 0 and master) { - err << "model does not provide a valid block size (" << pb_model->block_size() << ")"; - LBANN_ERROR(err.str()); - } - El::SetBlocksize(pb_model->block_size()); - - // Change random seed if needed. - if (pb_model->random_seed() > 0) { - random_seed = pb_model->random_seed(); - // Reseed here so that setup is done with this new seed. - init_random(random_seed); - init_data_seq_random(random_seed); - } - // Initialize models differently if needed. + // Set algorithmic blocksize + if (pb_model->block_size() == 0 and master) { + err << "model does not provide a valid block size (" << pb_model->block_size() << ")"; + LBANN_ERROR(err.str()); + } + El::SetBlocksize(pb_model->block_size()); + + // Change random seed if needed. + if (pb_model->random_seed() > 0) { + random_seed = pb_model->random_seed(); + // Reseed here so that setup is done with this new seed. + init_random(random_seed); + init_data_seq_random(random_seed); + } + // Initialize models differently if needed. #ifndef LBANN_DETERMINISTIC - if (pb_model->random_init_models_differently()) { - random_seed = random_seed + comm->get_trainer_rank(); - // Reseed here so that setup is done with this new seed. - init_random(random_seed); - init_data_seq_random(random_seed); - } + if (pb_model->random_init_models_differently()) { + random_seed = random_seed + comm->get_trainer_rank(); + // Reseed here so that setup is done with this new seed. + init_random(random_seed); + init_data_seq_random(random_seed); + } #else - if (pb_model->random_init_models_differently()) { - if (master) { - std::cout << "WARNING: Ignoring random_init_models_differently " << - "due to sequential consistency" << std::endl; - } + if (pb_model->random_init_models_differently()) { + if (master) { + std::cout << "WARNING: Ignoring random_init_models_differently " << + "due to sequential consistency" << std::endl; } + } #endif - // Set up the communicator and get the grid based on the first model's spec. - // We do not currently support splitting different models in different ways, - // as this implies different grids. - int procs_per_trainer = pb_model->procs_per_trainer(); - if (procs_per_trainer == 0) { - procs_per_trainer = comm->get_procs_in_world(); - } - if (first_model) { - comm->split_trainers(procs_per_trainer); - if (pb_model->num_parallel_readers() > procs_per_trainer) { - pb_model->set_num_parallel_readers(procs_per_trainer); - } - } else if (procs_per_trainer != comm->get_procs_per_trainer()) { - LBANN_ERROR("Model prototexts requesting different procs per model is not supported"); + // Set up the communicator and get the grid based on the first model's spec. + // We do not currently support splitting different models in different ways, + // as this implies different grids. + int procs_per_trainer = pb_model->procs_per_trainer(); + if (procs_per_trainer == 0) { + procs_per_trainer = comm->get_procs_in_world(); + } + if (first_model) { + comm->split_trainers(procs_per_trainer); + if (pb_model->num_parallel_readers() > procs_per_trainer) { + pb_model->set_num_parallel_readers(procs_per_trainer); } + } else if (procs_per_trainer != comm->get_procs_per_trainer()) { + LBANN_ERROR("Model prototexts requesting different procs per model is not supported"); + } - // Save info to file; this includes the complete prototext (with any over-rides - // from the cmd line) and various other info - save_session(comm, argc, argv, pb); + // Save info to file; this includes the complete prototext (with any over-rides + // from the cmd line) and various other info + save_session(comm, argc, argv, pb); - // Report useful information - if (master) { - print_lbann_configuration(pb_model, comm, io_threads_per_process, io_threads_offset); - } + // Report useful information + if (master) { + print_lbann_configuration(pb_model, comm, io_threads_per_process, io_threads_offset); + } - // Display how the OpenMP threads are provisioned - if (opts->has_string("print_affinity")) { - display_omp_setup(); - } + // Display how the OpenMP threads are provisioned + if (opts->has_string("print_affinity")) { + display_omp_setup(); + } - // Initialize data readers - //@todo: code not in place for correctly handling image preprocessing - std::map data_readers; - bool is_shared_training_data_reader = pb_model->shareable_training_data_reader(); - bool is_shared_testing_data_reader = pb_model->shareable_testing_data_reader(); - if (opts->has_string("share_testing_data_readers")) { - is_shared_testing_data_reader = opts->get_bool("share_testing_data_readers"); - } - init_data_readers(comm, pb, data_readers, is_shared_training_data_reader, is_shared_testing_data_reader); - - // hack to prevent all data readers from loading identical data; instead, - // share a single copy. See data_reader_jag_conduit_hdf5 for example - if (first_model) { - if (opts->has_string("share_data_reader_data")) { - for (auto t : data_readers) { - opts->set_ptr((void*)t.second); - } + // Initialize data readers + //@todo: code not in place for correctly handling image preprocessing + std::map data_readers; + bool is_shared_training_data_reader = pb_model->shareable_training_data_reader(); + bool is_shared_testing_data_reader = pb_model->shareable_testing_data_reader(); + if (opts->has_string("share_testing_data_readers")) { + is_shared_testing_data_reader = opts->get_bool("share_testing_data_readers"); + } + init_data_readers(comm, pb, data_readers, is_shared_training_data_reader, is_shared_testing_data_reader); + + // hack to prevent all data readers from loading identical data; instead, + // share a single copy. See data_reader_jag_conduit_hdf5 for example + if (first_model) { + if (opts->has_string("share_data_reader_data")) { + for (auto t : data_readers) { + opts->set_ptr((void*)t.second); } } + } - // User feedback - print_parameters(comm, pb); + // User feedback + print_parameters(comm, pb); - // Initalize model - model = proto::construct_model(comm, - data_readers, - pb.optimizer(), - pb.model()); - model->setup(io_thread_pool); + // Initalize model + std::unique_ptr ret_model{ + proto::construct_model(comm, + data_readers, + pb.optimizer(), + pb.model()) + }; + ret_model->setup(std::move(io_thread_pool)); - if(opts->get_bool("disable_background_io_activity")) { - model->allow_background_io_activity(false); - } + if(opts->get_bool("disable_background_io_activity")) { + ret_model->allow_background_io_activity(false); + } - //under development; experimental - if (opts->has_bool("use_data_store") && opts->get_bool("use_data_store")) { - if (master) { - std::cout << "\nUSING DATA STORE!\n\n"; - } - for (auto r : data_readers) { - if (!r.second) continue; - r.second->setup_data_store(model); - } + //under development; experimental + if (opts->has_bool("use_data_store") && opts->get_bool("use_data_store")) { + if (master) { + std::cout << "\nUSING DATA STORE!\n\n"; } - - if (opts->has_string("create_tarball")) { - finalize(comm); - return 0; + for (auto r : data_readers) { + if (!r.second) continue; + r.second->setup_data_store(ret_model.get()); } + } - // restart model from checkpoint if we have one - //@todo - //model->restartShared(); - if (comm->am_world_master()) { - std::cout << std::endl; - std::cout << model->get_description(); - std::cout << "Callbacks:" << std::endl; - for (lbann_callback *cb : model->get_callbacks()) { - std::cout << cb->name() << std::endl; - } + // restart model from checkpoint if we have one + //@todo + //model->restartShared(); + + if (comm->am_world_master()) { + std::cout << "\n" + << ret_model->get_description() + << "Callbacks:" << std::endl; + for (lbann_callback *cb : ret_model->get_callbacks()) { + std::cout << cb->name() << std::endl; } + } + if (first_model) { #ifndef LBANN_DETERMINISTIC - // Under normal conditions, reinitialize the random number generator so - // that regularization techniques (e.g. dropout) generate unique patterns - // on different ranks. - init_random(random_seed + comm->get_rank_in_world()); + // Under normal conditions, reinitialize the random number generator so + // that regularization techniques (e.g. dropout) generate unique patterns + // on different ranks. + init_random(random_seed + comm->get_rank_in_world()); #else - if(comm->am_world_master()) { - std::cout << - "--------------------------------------------------------------------------------\n" - "ALERT: executing in sequentially consistent mode -- performance will suffer\n" - "--------------------------------------------------------------------------------\n"; - } + if(comm->am_world_master()) { + std::cout << + "--------------------------------------------------------------------------------\n" + "ALERT: executing in sequentially consistent mode -- performance will suffer\n" + "--------------------------------------------------------------------------------\n"; + } #endif - - } catch (lbann_exception& e) { - El::mpi::Abort(El::mpi::COMM_WORLD, 1); - } catch (std::exception& e) { - El::ReportException(e); // Elemental exceptions } - - return model; + return ret_model; } void print_lbann_configuration(lbann_data::Model *pb_model, lbann_comm *comm, int io_threads_per_process, int io_threads_offset) { From 39d206c521b394025fe7581534de605c25eabfdb Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Fri, 15 Feb 2019 13:33:10 -0800 Subject: [PATCH 061/443] optimize smart pointer move in model::setup --- src/models/model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/model.cpp b/src/models/model.cpp index 09774714e7b..3f7bb9e7c71 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -566,7 +566,7 @@ void model::remap_pointers(const std::unordered_map& layer_map, void model::setup(std::shared_ptr io_thread_pool) { // Setup I/O threads - set up before setting up the layers (input // layer depends on having a properly initialized thread pool) - m_io_thread_pool = io_thread_pool; + m_io_thread_pool = std::move(io_thread_pool); // Setup layers setup_layer_topology(); From ac16e9c063476bf17c4e3b9a4f3e5d42772dcef5 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Fri, 15 Feb 2019 13:33:49 -0800 Subject: [PATCH 062/443] protobuf utils to use smart pointers; exception safety fixes Also moved the functions to be free functions in a namespace instead of static members of a stateless class. --- include/lbann/utils/protobuf_utils.hpp | 79 ++++++++++++-------------- src/utils/protobuf_utils.cpp | 73 ++++++++++++------------ 2 files changed, 74 insertions(+), 78 deletions(-) diff --git a/include/lbann/utils/protobuf_utils.hpp b/include/lbann/utils/protobuf_utils.hpp index fa94f15aa63..d6c150cb92a 100644 --- a/include/lbann/utils/protobuf_utils.hpp +++ b/include/lbann/utils/protobuf_utils.hpp @@ -1,5 +1,5 @@ -#ifndef __PROTOBUF_UTILS_HPP__ -#define __PROTOBUF_UTILS_HPP__ +#ifndef LBANN_UTILS_PROTOBUF_UTILS_HPP_INCLUDED +#define LBANN_UTILS_PROTOBUF_UTILS_HPP_INCLUDED #include #include "lbann/lbann.hpp" @@ -20,50 +20,45 @@ struct prototext_fn_triple { }; -class protobuf_utils +namespace protobuf_utils { -public : - - /** convience wrapper: calls parse_prototext_filenames_from_command_line(), - * then load_prototext(), then verify_prototext(). This is the only function - * that needs to be called from, e.g, model_zoo/lbann.cpp; the three called - * functions are made public for testing. - */ - static void load_prototext( - const bool master, - const int argc, - char **argv, - std::vector &models_out); - +/** convience wrapper: calls parse_prototext_filenames_from_command_line(), + * then load_prototext(), then verify_prototext(). This is the only function + * that needs to be called from, e.g, model_zoo/lbann.cpp; the three called + * functions are made public for testing. + */ +std::vector> +load_prototext( + const bool master, + const int argc, + char* const argv[]); - /** parses the command line for --model= --reader= - * optimizer= and their multi counterparts: - * --model={,,...} - * --reader={,,...} - * --optimizer={,,...} - * If the multi-model option is given, the reader and optimzier - * can either be single, or contain the same number of filenames - * as does the --model={...} specification - */ - static void parse_prototext_filenames_from_command_line( - bool master, - int argc, - char **argv, - std::vector &names); +/** parses the command line for --model= --reader= + * optimizer= and their multi counterparts: + * --model={,,...} + * --reader={,,...} + * --optimizer={,,...} + * If the multi-model option is given, the reader and optimzier + * can either be single, or contain the same number of filenames + * as does the --model={...} specification + */ +std::vector +parse_prototext_filenames_from_command_line( + const bool master, const int argc, char* const argv[]); - static void read_in_prototext_files( - bool master, - std::vector &names, - std::vector &models_out); +std::vector> +read_in_prototext_files( + const bool master, + const std::vector &names); - /** attempts to verify the all models are valid, and contain an - * optimizer and reader - */ - static void verify_prototext( - bool master, - const std::vector &models); +/** attempts to verify the all models are valid, and contain an + * optimizer and reader + */ +void verify_prototext( + const bool master, + const std::vector> &models); -}; +} // namespace protobuf_utils } //namespace lbann -#endif +#endif // LBANN_UTILS_PROTOBUF_UTILS_HPP_INCLUDED diff --git a/src/utils/protobuf_utils.cpp b/src/utils/protobuf_utils.cpp index f6d225c39cf..e55dcd7d9c0 100644 --- a/src/utils/protobuf_utils.cpp +++ b/src/utils/protobuf_utils.cpp @@ -34,12 +34,13 @@ */ namespace lbann { +namespace protobuf_utils { -void protobuf_utils::parse_prototext_filenames_from_command_line( - bool master, - int argc, - char **argv, - std::vector &names) { +std::vector +parse_prototext_filenames_from_command_line( + const bool master, + const int argc, + char * const argv[]) { std::vector models; std::vector optimizers; std::vector readers; @@ -111,7 +112,7 @@ void protobuf_utils::parse_prototext_filenames_from_command_line( } } - names.clear(); + std::vector names; for (size_t i=0; i &names, - std::vector &models_out) { - models_out.clear(); - for (auto t : names) { - lbann_data::LbannPB *pb = new lbann_data::LbannPB; +std::vector> +read_in_prototext_files( + const bool master, + const std::vector &names) +{ + std::vector> models_out; + for (auto const& t : names) { + auto pb = make_unique(); if (t.model != "none") read_prototext_file(t.model.c_str(), *pb, master); if (t.reader != "none") { @@ -165,36 +167,35 @@ void protobuf_utils::read_in_prototext_files( read_prototext_file(t.optimizer.c_str(), p, master); pb->MergeFrom(p); } - models_out.push_back(pb); + models_out.emplace_back(std::move(pb)); } + return models_out; } -void protobuf_utils::load_prototext( - const bool master, - const int argc, - char **argv, - std::vector &models_out) { - std::vector names; - parse_prototext_filenames_from_command_line(master, argc, argv, names); - read_in_prototext_files(master, names, models_out); - if (models_out.size() == 0) { - if (master) { - std::stringstream err; - err << __FILE__ << __LINE__ << " :: " - << " failed to load any prototext files"; - throw lbann_exception(err.str()); - } - } - verify_prototext(master, models_out); +std::vector> +load_prototext( + const bool master, + const int argc, + char* const argv[]) +{ + auto names = parse_prototext_filenames_from_command_line(master, argc, argv); + auto models_out = read_in_prototext_files(master, names); + if (models_out.size() == 0 && master) { + LBANN_ERROR("Failed to load any prototext files"); + } + verify_prototext(master, models_out); + return models_out; } -void protobuf_utils::verify_prototext(bool master, const std::vector &models) { +void verify_prototext( + const bool master, + const std::vector> &models) { if (master) { std::cout << "protobuf_utils::verify_prototext; starting verify for " << models.size() << " models\n"; } for (size_t j=0; jhas_data_reader()) { is_good = false; if (master) { @@ -242,5 +243,5 @@ void protobuf_utils::verify_prototext(bool master, const std::vector Date: Fri, 15 Feb 2019 13:34:42 -0800 Subject: [PATCH 063/443] update the executables to match the previous changes --- model_zoo/lbann.cpp | 33 +++++++------------ model_zoo/lbann2.cpp | 53 +++++++++++++----------------- model_zoo/lbann_aecycgan.cpp | 43 +++++++----------------- model_zoo/lbann_cycgan.cpp | 58 +++++++++------------------------ model_zoo/lbann_gan.cpp | 35 +++++--------------- model_zoo/lbann_inf.cpp | 40 ++++++++--------------- tests/test_shuffled_indices.cpp | 53 +++++++++++++----------------- 7 files changed, 106 insertions(+), 209 deletions(-) diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index 5a4d0741bc4..98c952d01c9 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -37,7 +37,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); if (master) { @@ -49,18 +49,13 @@ int main(int argc, char *argv[]) { std::cout << std::endl << std::endl; } -#ifdef EL_USE_CUBLAS - El::GemmUseGPU(32,32,32); -#endif - try { // Initialize options db (this parses the command line) options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } //this must be called after call to opts->init(); @@ -74,16 +69,19 @@ int main(int argc, char *argv[]) { stack_profiler::get()->activate(comm->get_rank_in_world()); // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); lbann_data::LbannPB pb = *(pbs[0]); lbann_data::Model *pb_model = pb.mutable_model(); - model *model = build_model_from_prototext(argc, argv, pb, - comm, io_thread_pool, true); + auto model = build_model_from_prototext(argc, argv, pb, + comm.get(), io_thread_pool, true); + + if (opts->has_string("create_tarball")) { + return EXIT_SUCCESS; + } if (! (opts->has_bool("exit_after_setup") && opts->get_bool("exit_after_setup"))) { @@ -109,10 +107,6 @@ int main(int argc, char *argv[]) { stack_profiler::get()->print(); } - // @todo: figure out and implement coherent strategy - // for freeing dynamically allocated memory - delete model; - } catch (exception& e) { if (options::get()->has_bool("stack_trace_to_file")) { std::stringstream ss("stack_trace"); @@ -123,16 +117,11 @@ int main(int argc, char *argv[]) { e.print_report(fs); } El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; - } diff --git a/model_zoo/lbann2.cpp b/model_zoo/lbann2.cpp index eaa61fbf3ca..9663e1c94c5 100644 --- a/model_zoo/lbann2.cpp +++ b/model_zoo/lbann2.cpp @@ -35,44 +35,40 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); -#ifdef EL_USE_CUBLAS - El::GemmUseGPU(32,32,32); -#endif - try { // Initialize options db (this parses the command line) options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } std::stringstream err; // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); - model *model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), - comm, io_thread_pool, true); - model *model_2 = nullptr; + auto model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), + comm.get(), io_thread_pool, true); + std::unique_ptr model_2; if (pbs.size() > 1) { model_2 = build_model_from_prototext(argc, argv, *(pbs[1]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } // Load layer weights from checkpoint if checkpoint directory given if(opts->has_string("ckpt_dir")){ - lbann_callback_save_model::load_model_weights(opts->get_string("ckpt_dir"), model_1); + lbann_callback_save_model::load_model_weights(opts->get_string("ckpt_dir"), model_1.get()); } // Train model - if (master) std::cerr << "\nSTARTING train - model 1\n\n"; + if (master) { + std::cerr << "\nSTARTING train - model 1\n\n"; + } const lbann_data::Model pb_model = pbs[0]->model(); // When using checkpoint states, skip training as those could be the result @@ -91,34 +87,29 @@ int main(int argc, char *argv[]) { for(size_t l2=0; l2 < layers2.size(); l2++) { for(size_t l1=0; l1 < layers1.size(); l1++) { if(layers2[l2]->get_name() == layers1[l1]->get_name()){ - if(master) std::cout << "Model 1 Layer " << layers1[l1]->get_name(); + if(master) { + std::cout << "Model 1 Layer " << layers1[l1]->get_name(); + } layers2[l2]->replace_weights(layers1[l1]); - if(master) std::cout << " copied to Model2 Layer " << std::endl; + if(master) { + std::cout << " copied to Model2 Layer " << std::endl; + } } } } - if (master) std::cerr << "\n STARTING train - model 2\n\n"; + if (master) { + std::cerr << "\n STARTING train - model 2\n\n"; + } const lbann_data::Model pb_model_2 = pbs[1]->model(); model_2->train( pb_model_2.num_epochs() ); model_2->evaluate(execution_mode::testing); } - delete model_1; - if (model_2 != nullptr) { - delete model_2; - } - for (auto t : pbs) { - delete t; - } - } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/lbann_aecycgan.cpp b/model_zoo/lbann_aecycgan.cpp index 43b6e93176d..851c3e6a22c 100644 --- a/model_zoo/lbann_aecycgan.cpp +++ b/model_zoo/lbann_aecycgan.cpp @@ -35,45 +35,40 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); -#ifdef EL_USE_CUBLAS - El::GemmUseGPU(32,32,32); -#endif - try { // Initialize options db (this parses the command line) options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } std::stringstream err; // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); - model *model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), - comm, io_thread_pool, true); //ae - model *model_2 = nullptr; //cycgan - model *model_3 = nullptr; //ae+cycgan + auto model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), + comm.get(), io_thread_pool, true); //ae + std::unique_ptr + model_2, //cycgan + model_3; //ae+cycgan if (pbs.size() > 1) { model_2 = build_model_from_prototext(argc, argv, *(pbs[1]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } if (pbs.size() > 2) { model_3 = build_model_from_prototext(argc, argv, *(pbs[2]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } @@ -100,24 +95,10 @@ int main(int argc, char *argv[]) { if(master) std::cout << " Evaluate pretrained autoencoder" << std::endl; model_3->evaluate(execution_mode::testing); - delete model_1; - if (model_2 != nullptr) { - delete model_2; - } - if (model_3 != nullptr) { - delete model_3; - } - for (auto t : pbs) { - delete t; - } - } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/lbann_cycgan.cpp b/model_zoo/lbann_cycgan.cpp index 54d7fe60c23..c40321c3d0e 100644 --- a/model_zoo/lbann_cycgan.cpp +++ b/model_zoo/lbann_cycgan.cpp @@ -35,7 +35,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); if (master) { @@ -47,18 +47,13 @@ int main(int argc, char *argv[]) { std::cout << std::endl << std::endl; } -#ifdef EL_USE_CUBLAS - El::GemmUseGPU(32,32,32); -#endif - try { // Initialize options db (this parses the command line) options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } //this must be called after call to opts->init(); @@ -74,39 +69,38 @@ int main(int argc, char *argv[]) { std::stringstream err; // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); - model *model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), - comm, io_thread_pool, true); //D1 solver + auto model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), + comm.get(), io_thread_pool, true); //D1 solver //hack, overide model name to make reporting easy, what can break?" - model *model_2 = nullptr; //G1 solver - model *model_3 = nullptr; //G2 solver + std::unique_ptr model_2, //G1 solver + model_3, //G2 solver - //Support for autoencoder models - model *ae_model = nullptr; - model *ae_cycgan_model = nullptr; //contain layer(s) from (cyc)GAN + //Support for autoencoder models + ae_model, + ae_cycgan_model; //contain layer(s) from (cyc)GAN if (pbs.size() > 1) { model_2 = build_model_from_prototext(argc, argv, *(pbs[1]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } if (pbs.size() > 2) { model_3 = build_model_from_prototext(argc, argv, *(pbs[2]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } if (pbs.size() > 3) { ae_model = build_model_from_prototext(argc, argv, *(pbs[3]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } if (pbs.size() > 4) { ae_cycgan_model = build_model_from_prototext(argc, argv, *(pbs[4]), - comm, io_thread_pool, false); + comm.get(), io_thread_pool, false); } const lbann_data::Model pb_model = pbs[0]->model(); @@ -178,30 +172,10 @@ int main(int argc, char *argv[]) { //has no affect unless option: --st_on was given stack_profiler::get()->print(); - delete model_1; - if (model_2 != nullptr) { - delete model_2; - } - if (model_3 != nullptr) { - delete model_3; - } - if (ae_model != nullptr) { - delete ae_model; - } - if (ae_cycgan_model != nullptr) { - delete ae_cycgan_model; - } - for (auto t : pbs) { - delete t; - } - } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/lbann_gan.cpp b/model_zoo/lbann_gan.cpp index a3632c76231..f640343199c 100644 --- a/model_zoo/lbann_gan.cpp +++ b/model_zoo/lbann_gan.cpp @@ -35,36 +35,30 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); -#ifdef EL_USE_CUBLAS - El::GemmUseGPU(32,32,32); -#endif - try { // Initialize options db (this parses the command line) options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } std::stringstream err; // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); - model *model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), comm, io_thread_pool, true); //discriminator + auto model_1 = build_model_from_prototext(argc, argv, *(pbs[0]), comm.get(), io_thread_pool, true); //discriminator //model - model *model_2 = nullptr; //adversarial model + std::unique_ptr model_2 = nullptr; //adversarial model if (pbs.size() > 1) { - model_2 = build_model_from_prototext(argc, argv, *(pbs[1]), comm, io_thread_pool, false); + model_2 = build_model_from_prototext(argc, argv, *(pbs[1]), comm.get(), io_thread_pool, false); } const lbann_data::Model pb_model = pbs[0]->model(); @@ -103,23 +97,10 @@ int main(int argc, char *argv[]) { super_step++; } - - - delete model_1; - if (model_2 != nullptr) { - delete model_2; - } - for (auto t : pbs) { - delete t; - } - } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/lbann_inf.cpp b/model_zoo/lbann_inf.cpp index b1f439a54a9..1444a232ae7 100644 --- a/model_zoo/lbann_inf.cpp +++ b/model_zoo/lbann_inf.cpp @@ -35,7 +35,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + auto comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); try { @@ -43,28 +43,28 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } std::stringstream err; // Initalize a global I/O thread pool - std::shared_ptr io_thread_pool = construct_io_thread_pool(comm); + std::shared_ptr io_thread_pool + = construct_io_thread_pool(comm.get()); - std::vector pbs; - protobuf_utils::load_prototext(master, argc, argv, pbs); - std::vector models; - for(auto pb_model : pbs) { - models.emplace_back(build_model_from_prototext(argc, argv, *pb_model, - comm, io_thread_pool, models.size() == 0)); + auto pbs = protobuf_utils::load_prototext(master, argc, argv); + std::vector> models; + for(auto&& pb_model : pbs) { + models.emplace_back( + build_model_from_prototext(argc, argv, *pb_model, + comm.get(), io_thread_pool, models.size() == 0)); } // Load layer weights from checkpoint if checkpoint directory given if(opts->has_string("ckpt_dir")){ - for(auto m : models) { - lbann_callback_save_model::load_model_weights(opts->get_string("ckpt_dir"), m); + for(auto&& m : models) { + lbann_callback_save_model::load_model_weights(opts->get_string("ckpt_dir"), m.get()); } }else { LBANN_ERROR("Unable to reload model"); @@ -74,27 +74,15 @@ int main(int argc, char *argv[]) { /// Enable shared testing data readers on the command line via --share_testing_data_readers=1 El::Int num_samples = models[0]->get_num_iterations_per_epoch(execution_mode::testing); for(El::Int s = 0; s < num_samples; s++) { - for(auto m : models) { + for(auto&& m : models) { m->evaluate(execution_mode::testing, 1); } } - for(auto m : models) { - delete m; - } - - for (auto t : pbs) { - delete t; - } - } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } - // Clean up - finalize(comm); return EXIT_SUCCESS; - } diff --git a/tests/test_shuffled_indices.cpp b/tests/test_shuffled_indices.cpp index 8bc9239512c..3078d644e5e 100644 --- a/tests/test_shuffled_indices.cpp +++ b/tests/test_shuffled_indices.cpp @@ -33,11 +33,11 @@ using namespace lbann; int mini_batch_size = 128; -void test_is_shuffled(generic_data_reader *reader, bool is_shuffled, const char *msg = nullptr); +void test_is_shuffled(const generic_data_reader &reader, bool is_shuffled, const char *msg = nullptr); int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + lbann_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); try { @@ -45,19 +45,18 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm); - finalize(comm); - return 0; + print_help(comm.get()); + return EXIT_SUCCESS; } //read data_reader prototext file if (not opts->has_string("fn")) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "you must run with: --fn= where is\n" - << "a data_reader prototext filePathName\n"; - throw lbann_exception(err.str()); + std::cerr << __FILE__ << " " << __LINE__ << " :: " + << "you must run with: --fn= where is\n" + << "a data_reader prototext filePathName\n"; + return EXIT_FAILURE; } + lbann_data::LbannPB pb; std::string reader_fn(opts->get_string("fn").c_str()); read_prototext_file(reader_fn.c_str(), pb, master); @@ -67,53 +66,48 @@ int main(int argc, char *argv[]) { for (int j=0; j(shuffle); + if (readme.data_filename() != "") { reader->set_data_filename( readme.data_filename() ); } if (readme.label_filename() != "") { reader->set_label_filename( readme.label_filename() ); } if (readme.data_filedir() != "") { reader->set_file_dir( readme.data_filedir() ); } reader->load(); - test_is_shuffled(reader, true, "TEST #1"); - delete reader; + test_is_shuffled(*reader, true, "TEST #1"); //test: indices should not be shuffled; same as previous, except we call // shuffle(true); shuffle = false; - reader = new mnist_reader(shuffle); + reader = make_unique(shuffle); if (readme.data_filename() != "") { reader->set_data_filename( readme.data_filename() ); } if (readme.label_filename() != "") { reader->set_label_filename( readme.label_filename() ); } if (readme.data_filedir() != "") { reader->set_file_dir( readme.data_filedir() ); } reader->set_shuffle(shuffle); reader->load(); - test_is_shuffled(reader, false, "TEST #2"); - delete reader; + test_is_shuffled(*reader, false, "TEST #2"); //test: indices should not be shuffled, due to ctor argument shuffle = false; - reader = new mnist_reader(shuffle); + reader = make_unique(shuffle); if (readme.data_filename() != "") { reader->set_data_filename( readme.data_filename() ); } if (readme.label_filename() != "") { reader->set_label_filename( readme.label_filename() ); } if (readme.data_filedir() != "") { reader->set_file_dir( readme.data_filedir() ); } reader->load(); - test_is_shuffled(reader, false, "TEST #3"); - delete reader; + test_is_shuffled(*reader, false, "TEST #3"); //test: set_shuffled_indices; indices should not be shuffled shuffle = true; - reader = new mnist_reader(shuffle); + reader = make_unique(shuffle); if (readme.data_filename() != "") { reader->set_data_filename( readme.data_filename() ); } if (readme.label_filename() != "") { reader->set_label_filename( readme.label_filename() ); } if (readme.data_filedir() != "") { reader->set_file_dir( readme.data_filedir() ); } reader->load(); //at this point the indices should be shuffled (same as first test) - test_is_shuffled(reader, true, "TEST #4"); + test_is_shuffled(*reader, true, "TEST #4"); std::vector indices(mini_batch_size); std::iota(indices.begin(), indices.end(), 0); reader->set_shuffled_indices(indices); - test_is_shuffled(reader, false, "TEST #5"); - delete reader; + test_is_shuffled(*reader, false, "TEST #5"); break; } @@ -121,15 +115,14 @@ int main(int argc, char *argv[]) { } catch (lbann_exception& e) { e.print_report(); - El::mpi::Abort(El::mpi::COMM_WORLD, 1); + return EXIT_FAILURE; } - finalize(comm); - return 0; + return EXIT_SUCCESS; } -void test_is_shuffled(generic_data_reader *reader, bool is_shuffled, const char *msg) { - const std::vector &indices = reader->get_shuffled_indices(); +void test_is_shuffled(const generic_data_reader &reader, bool is_shuffled, const char *msg) { + const std::vector &indices = reader.get_shuffled_indices(); std::cerr << "\nstarting test_is_suffled; mini_batch_size: " << mini_batch_size << " indices.size(): " << indices.size(); if (msg) { From f7c1249dfd1a00bfbb34c3531ea25830d2f9a1b7 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 18 Feb 2019 10:45:10 -0800 Subject: [PATCH 064/443] fstreams are constructible from strings --- model_zoo/lbann.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index 98c952d01c9..4295637d999 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -111,9 +111,11 @@ int main(int argc, char *argv[]) { if (options::get()->has_bool("stack_trace_to_file")) { std::stringstream ss("stack_trace"); const auto& rank = get_rank_in_world(); - if (rank >= 0) { ss << "_rank" << rank; } + if (rank >= 0) { + ss << "_rank" << rank; + } ss << ".txt"; - std::ofstream fs(ss.str().c_str()); + std::ofstream fs(ss.str()); e.print_report(fs); } El::ReportException(e); From 2dac0d46b88910d3057ddf97677a2d365c1bb14f Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 18 Feb 2019 11:17:08 -0800 Subject: [PATCH 065/443] more exception safety in protobuf parsing Also, avoid some extraneous string copies. Also also, make the decls in the hpp match the signatures in the cpp --- include/lbann/proto/proto_common.hpp | 6 +++--- src/proto/proto_common.cpp | 19 ++++++++++++------- src/utils/protobuf_utils.cpp | 8 ++++---- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/include/lbann/proto/proto_common.hpp b/include/lbann/proto/proto_common.hpp index 387177ae68b..be3ea93b98b 100644 --- a/include/lbann/proto/proto_common.hpp +++ b/include/lbann/proto/proto_common.hpp @@ -46,13 +46,13 @@ void save_session(lbann_comm *comm, int argc, char **argv, lbann_data::LbannPB& /// void read_prototext_file( - std::string fn, + const std::string& fn, lbann_data::LbannPB& pb, bool master); /// -void write_prototext_file( - std::string fn, +bool write_prototext_file( + const std::string& fn, lbann_data::LbannPB& pb); } // namespace lbann diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 30e6a509560..ffc622a50a9 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -495,7 +496,7 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< } } -void read_prototext_file(std::string fn, lbann_data::LbannPB& pb, bool master) +void read_prototext_file(const std::string& fn, lbann_data::LbannPB& pb, bool master) { std::stringstream err; int fd = open(fn.c_str(), O_RDONLY); @@ -505,21 +506,25 @@ void read_prototext_file(std::string fn, lbann_data::LbannPB& pb, bool master) throw lbann_exception(err.str()); } } - auto *input = new google::protobuf::io::FileInputStream(fd); - bool success = google::protobuf::TextFormat::Parse(input, &pb); + using FIS=google::protobuf::io::FileInputStream; + auto input = std::unique_ptr>( + new google::protobuf::io::FileInputStream(fd), + [](FIS* x) { + x->Close(); + delete x; + }); + bool success = google::protobuf::TextFormat::Parse(input.get(), &pb); if (!success) { if (master) { err << __FILE__ << " " << __LINE__ << " :: failed to read or parse prototext file: " << fn << std::endl; throw lbann_exception(err.str()); } } - input->Close(); - delete input; } -bool write_prototext_file(const char *fn, lbann_data::LbannPB& pb) +bool write_prototext_file(const std::string& fn, lbann_data::LbannPB& pb) { - int fd = open(fn, O_APPEND | O_CREAT | O_TRUNC, 0644); + int fd = open(fn.c_str(), O_APPEND | O_CREAT | O_TRUNC, 0644); if (fd == -1) { return false; } diff --git a/src/utils/protobuf_utils.cpp b/src/utils/protobuf_utils.cpp index e55dcd7d9c0..84feb007eff 100644 --- a/src/utils/protobuf_utils.cpp +++ b/src/utils/protobuf_utils.cpp @@ -151,20 +151,20 @@ read_in_prototext_files( for (auto const& t : names) { auto pb = make_unique(); if (t.model != "none") - read_prototext_file(t.model.c_str(), *pb, master); + read_prototext_file(t.model, *pb, master); if (t.reader != "none") { lbann_data::LbannPB p; - read_prototext_file(t.reader.c_str(), p, master); + read_prototext_file(t.reader, p, master); pb->MergeFrom(p); } if (t.data_set_metadata != "none") { lbann_data::LbannPB p; - read_prototext_file(t.data_set_metadata.c_str(), p, master); + read_prototext_file(t.data_set_metadata, p, master); pb->MergeFrom(p); } if (t.optimizer != "none") { lbann_data::LbannPB p; - read_prototext_file(t.optimizer.c_str(), p, master); + read_prototext_file(t.optimizer, p, master); pb->MergeFrom(p); } models_out.emplace_back(std::move(pb)); From f3941ca23b27e237a362bee7a48dc30ce7c2d932 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 18 Feb 2019 11:21:32 -0800 Subject: [PATCH 066/443] add missing header --- src/proto/proto_common.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index ffc622a50a9..7a5b6dd69d1 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include #include From 63c6b93887f0301e3968ef4c9a440fbe4923d245 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 18 Feb 2019 12:48:28 -0800 Subject: [PATCH 067/443] const comms in proto_common we should probably consider why communicator objects are passed around when generally they are used only to test "if world master". --- include/lbann/proto/proto_common.hpp | 18 ++--- model_zoo/lbann.cpp | 4 +- model_zoo/lbann2.cpp | 4 +- model_zoo/lbann_aecycgan.cpp | 4 +- model_zoo/lbann_cycgan.cpp | 4 +- model_zoo/lbann_gan.cpp | 4 +- model_zoo/lbann_inf.cpp | 4 +- src/proto/proto_common.cpp | 99 +++++++++++++++------------- src/utils/lbann_library.cpp | 12 ++-- tests/test_shuffled_indices.cpp | 2 +- 10 files changed, 83 insertions(+), 72 deletions(-) diff --git a/include/lbann/proto/proto_common.hpp b/include/lbann/proto/proto_common.hpp index be3ea93b98b..e0cffaa61bf 100644 --- a/include/lbann/proto/proto_common.hpp +++ b/include/lbann/proto/proto_common.hpp @@ -8,9 +8,9 @@ namespace lbann { /// Returns true if the Model contains at least one MotifLayer -bool has_motifs(lbann_comm *comm, const lbann_data::LbannPB& p); +bool has_motifs(const lbann_comm& comm, const lbann_data::LbannPB& p); -void expand_motifs(lbann_comm *comm, lbann_data::LbannPB& pb); +void expand_motifs(const lbann_comm& comm, lbann_data::LbannPB& pb); /** Customize the name of the index list with the following options: * - trainer ID @@ -19,7 +19,7 @@ void expand_motifs(lbann_comm *comm, lbann_data::LbannPB& pb); * == . * _t_. */ -void customize_data_readers_index_list(lbann::lbann_comm *comm, lbann_data::LbannPB& p); +void customize_data_readers_index_list(const lbann_comm& comm, lbann_data::LbannPB& p); /// instantiates one or more generic_data_readers and inserts them in &data_readers void init_data_readers( @@ -30,25 +30,25 @@ void init_data_readers( bool is_shareable_validation_data_reader = false); /// adjusts the number of parallel data readers -void set_num_parallel_readers(const lbann_comm *comm, lbann_data::LbannPB& p); +void set_num_parallel_readers(const lbann_comm& comm, lbann_data::LbannPB& p); /// adjusts the values in p by querying the options db -void get_cmdline_overrides(lbann_comm *comm, lbann_data::LbannPB& p); +void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p); /// print various params (learn_rate, etc) to cout -void print_parameters(lbann_comm *comm, lbann_data::LbannPB& p); +void print_parameters(const lbann_comm& comm, lbann_data::LbannPB& p); /// prints usage information -void print_help(lbann_comm *comm); +void print_help(const lbann_comm& comm); /// prints prototext file, cmd line, etc to file -void save_session(lbann_comm *comm, int argc, char **argv, lbann_data::LbannPB& p); +void save_session(const lbann_comm& comm, const int argc, char * const* argv, lbann_data::LbannPB& p); /// void read_prototext_file( const std::string& fn, lbann_data::LbannPB& pb, - bool master); + const bool master); /// bool write_prototext_file( diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index 4295637d999..ba54c2a8bfa 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -54,7 +54,7 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } @@ -109,7 +109,7 @@ int main(int argc, char *argv[]) { } catch (exception& e) { if (options::get()->has_bool("stack_trace_to_file")) { - std::stringstream ss("stack_trace"); + std::ostringstream ss("stack_trace"); const auto& rank = get_rank_in_world(); if (rank >= 0) { ss << "_rank" << rank; diff --git a/model_zoo/lbann2.cpp b/model_zoo/lbann2.cpp index 9663e1c94c5..1d9c281113c 100644 --- a/model_zoo/lbann2.cpp +++ b/model_zoo/lbann2.cpp @@ -43,11 +43,11 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } - std::stringstream err; + std::ostringstream err; // Initalize a global I/O thread pool std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); diff --git a/model_zoo/lbann_aecycgan.cpp b/model_zoo/lbann_aecycgan.cpp index 851c3e6a22c..80672bcc450 100644 --- a/model_zoo/lbann_aecycgan.cpp +++ b/model_zoo/lbann_aecycgan.cpp @@ -43,11 +43,11 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } - std::stringstream err; + std::ostringstream err; // Initalize a global I/O thread pool std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); diff --git a/model_zoo/lbann_cycgan.cpp b/model_zoo/lbann_cycgan.cpp index c40321c3d0e..16f603648fe 100644 --- a/model_zoo/lbann_cycgan.cpp +++ b/model_zoo/lbann_cycgan.cpp @@ -52,7 +52,7 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) { //to activate, must specify --st_on on cmd line stack_profiler::get()->activate(comm->get_rank_in_world()); - std::stringstream err; + std::ostringstream err; // Initalize a global I/O thread pool std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); diff --git a/model_zoo/lbann_gan.cpp b/model_zoo/lbann_gan.cpp index f640343199c..45dc22bb417 100644 --- a/model_zoo/lbann_gan.cpp +++ b/model_zoo/lbann_gan.cpp @@ -43,11 +43,11 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } - std::stringstream err; + std::ostringstream err; // Initalize a global I/O thread pool std::shared_ptr io_thread_pool = construct_io_thread_pool(comm.get()); diff --git a/model_zoo/lbann_inf.cpp b/model_zoo/lbann_inf.cpp index 1444a232ae7..b26a3d22657 100644 --- a/model_zoo/lbann_inf.cpp +++ b/model_zoo/lbann_inf.cpp @@ -43,11 +43,11 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } - std::stringstream err; + std::ostringstream err; // Initalize a global I/O thread pool std::shared_ptr io_thread_pool diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 7a5b6dd69d1..f26bacd9315 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -18,9 +18,11 @@ namespace lbann { -bool has_motifs(lbann_comm *comm, const lbann_data::LbannPB& p) { - bool master = comm->am_world_master(); - if (master) std::cout << "starting has_motifs\n"; +bool has_motifs(const lbann_comm& comm, const lbann_data::LbannPB& p) { + const bool master = comm.am_world_master(); + if (master) { + std::cout << "starting has_motifs\n"; + } const lbann_data::Model& m = p.model(); const int num_layers = m.layer_size(); for (int j=0; jam_world_master(); - if (master) std::cout << "starting expand_motifs\n"; +void expand_motifs(const lbann_comm& comm, lbann_data::LbannPB& pb) { + const bool master = comm.am_world_master(); + if (master) { + std::cout << "starting expand_motifs\n"; + } const lbann_data::MotifDefinitions& m = pb.motif_definitions(); const int num_motifs = m.motif_size(); for (int j=0; j& data_readers, - bool is_shareable_training_data_reader, bool is_shareable_testing_data_reader, bool is_shareable_validation_data_reader) +void init_data_readers( + lbann_comm* comm, const lbann_data::LbannPB& p, + std::map& data_readers, + bool is_shareable_training_data_reader, + bool is_shareable_testing_data_reader, + bool is_shareable_validation_data_reader) { #ifdef LBANN_HAS_CONDUIT static std::unordered_map leading_reader_jag_conduit; #endif - bool master = comm->am_world_master(); - std::stringstream err; + const bool master = comm->am_world_master(); + std::ostringstream err; options *opts = options::get(); - bool create_tarball = opts->has_string("create_tarball") ? true : false; + const bool create_tarball + = opts->has_string("create_tarball") ? true : false; const lbann_data::DataReader & d_reader = p.data_reader(); - int size = d_reader.reader_size(); + const int size = d_reader.reader_size(); const lbann_data::DataSetMetaData& pb_metadata = p.data_set_metadata(); @@ -99,7 +109,7 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< using var_t = data_reader_jag::variable_t; // composite independent variable - std::vector< std::vector > independent_type(pb_schema.independent_size()); + std::vector> independent_type(pb_schema.independent_size()); for (int i=0; i < pb_schema.independent_size(); ++i) { const lbann_data::DataSetMetaData::Schema::JAGDataSlice& slice = pb_schema.independent(i); @@ -113,7 +123,7 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< reader_jag->set_independent_variable_type(independent_type); // composite dependent variable - std::vector< std::vector > dependent_type(pb_schema.dependent_size()); + std::vector> dependent_type(pb_schema.dependent_size()); for (int i=0; i < pb_schema.dependent_size(); ++i) { const lbann_data::DataSetMetaData::Schema::JAGDataSlice& slice = pb_schema.dependent(i); @@ -161,7 +171,7 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< const auto& params = proto_layer.input(); const auto& io_buffer = params.io_buffer(); reader_jag_conduit->set_io_buffer_type(io_buffer); - const auto num_readers = get_requested_num_parallel_readers(comm, p); + const auto num_readers = get_requested_num_parallel_readers(*comm, p); reader_jag_conduit->set_num_parallel_readers(num_readers); reader_jag_conduit->set_local_id(readme.role()); break; @@ -497,9 +507,9 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< } } -void read_prototext_file(const std::string& fn, lbann_data::LbannPB& pb, bool master) +void read_prototext_file(const std::string& fn, lbann_data::LbannPB& pb, const bool master) { - std::stringstream err; + std::ostringstream err; int fd = open(fn.c_str(), O_RDONLY); if (fd == -1) { if (master) { @@ -540,14 +550,14 @@ bool write_prototext_file(const std::string& fn, lbann_data::LbannPB& pb) return true; } -bool check_if_num_parallel_readers_set(const lbann_comm *comm, const lbann_data::Model& model) +bool check_if_num_parallel_readers_set(const lbann_comm& comm, const lbann_data::Model& model) { - const bool master = comm->am_world_master(); + const bool master = comm.am_world_master(); const int parallel_io = model.num_parallel_readers(); if (parallel_io == 0) { if (master) { - std::cout << "\tMax Parallel I/O Fetch: " << comm->get_procs_per_trainer() << + std::cout << "\tMax Parallel I/O Fetch: " << comm.get_procs_per_trainer() << " (Limited to # Processes)" << std::endl; } return false; @@ -558,29 +568,30 @@ bool check_if_num_parallel_readers_set(const lbann_comm *comm, const lbann_data: return true; } -void set_num_parallel_readers(const lbann_comm *comm, lbann_data::LbannPB& p) +void set_num_parallel_readers(const lbann_comm& comm, lbann_data::LbannPB& p) { lbann_data::Model *model = p.mutable_model(); const bool is_set = check_if_num_parallel_readers_set(comm, *model); if (!is_set) { - const int parallel_io = comm->get_procs_per_trainer(); + const int parallel_io = comm.get_procs_per_trainer(); model->set_num_parallel_readers(parallel_io); //adjust the prototext } } -int get_requested_num_parallel_readers(const lbann_comm *comm, const lbann_data::LbannPB& p) +int get_requested_num_parallel_readers(const lbann_comm& comm, const lbann_data::LbannPB& p) { const lbann_data::Model& model = p.model(); const bool is_set = check_if_num_parallel_readers_set(comm, model); if (!is_set) { - return comm->get_procs_per_trainer(); + return comm.get_procs_per_trainer(); } return model.num_parallel_readers(); } -void set_data_readers_filenames(std::string which, lbann_data::LbannPB& p) +void set_data_readers_filenames( + const std::string& which, lbann_data::LbannPB& p) { options *opts = options::get(); lbann_data::DataReader *readers = p.mutable_data_reader(); @@ -588,7 +599,7 @@ void set_data_readers_filenames(std::string which, lbann_data::LbannPB& p) for (int j=0; jmutable_reader(j); if (r->role() == which) { - std::stringstream s; + std::ostringstream s; s << "data_filedir_" << which; if (opts->has_string(s.str())) { r->set_data_filedir(opts->get_string(s.str())); @@ -621,7 +632,7 @@ void set_data_readers_percent(lbann_data::LbannPB& p) options *opts = options::get(); double percent = opts->get_float("data_reader_percent"); if (percent <= 0 || percent > 1.0) { - std::stringstream err; + std::ostringstream err; err << __FILE__ << " " << __LINE__ << " :: " << " --data_reader_percent= must be > 0 and <= 1.0"; throw lbann_exception(err.str()); @@ -634,21 +645,21 @@ void set_data_readers_percent(lbann_data::LbannPB& p) } } -void customize_data_readers_index_list(lbann_comm *comm, lbann_data::LbannPB& p) +void customize_data_readers_index_list(const lbann_comm& comm, lbann_data::LbannPB& p) { lbann_data::DataReader *readers = p.mutable_data_reader(); const lbann_data::Model& pb_model = p.model(); int size = readers->reader_size(); for (int j=0; jmutable_reader(j); - std::stringstream s; + std::ostringstream s; std::string basename = get_basename_without_ext(r->index_list()); std::string ext = get_ext_name(r->index_list()); if(r->index_list_per_model()) { s << pb_model.name() << "_"; } if(r->index_list_per_trainer()) { - s << "t" << comm->get_trainer_rank() << "_"; + s << "t" << comm.get_trainer_rank() << "_"; } s << basename; s << "." << ext; @@ -656,10 +667,10 @@ void customize_data_readers_index_list(lbann_comm *comm, lbann_data::LbannPB& p) } } -void get_cmdline_overrides(lbann_comm *comm, lbann_data::LbannPB& p) +void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) { - bool master = comm->am_world_master(); - std::stringstream err; + bool master = comm.am_world_master(); + std::ostringstream err; options *opts = options::get(); lbann_data::Model *model = p.mutable_model(); @@ -796,9 +807,9 @@ void get_cmdline_overrides(lbann_comm *comm, lbann_data::LbannPB& p) } } -void print_parameters(lbann_comm *comm, lbann_data::LbannPB& p) +void print_parameters(const lbann_comm& comm, lbann_data::LbannPB& p) { - if (!comm->am_world_master()) { + if (!comm.am_world_master()) { return; } @@ -820,9 +831,9 @@ void print_parameters(lbann_comm *comm, lbann_data::LbannPB& p) << " (only used for metrics)\n"; } -void print_help(lbann_comm *comm) +void print_help(const lbann_comm& comm) { - if (!comm->am_world_master()) { + if (!comm.am_world_master()) { return; } @@ -912,19 +923,19 @@ void copy_file(std::string fn, std::ofstream &out) { std::ifstream in(fn.c_str()); if (!in.is_open()) { - std::stringstream err; + std::ostringstream err; err << __FILE__ << " " << __LINE__ << " :: failed to open file for reading: " << fn; throw std::runtime_error(err.str()); } - std::stringstream s; + std::ostringstream s; s << in.rdbuf(); out << s.str(); } -void save_session(lbann_comm *comm, int argc, char **argv, lbann_data::LbannPB& p) +void save_session(const lbann_comm& comm, const int argc, char * const* argv, lbann_data::LbannPB& p) { - if (!comm->am_world_master()) { + if (!comm.am_world_master()) { return; } @@ -952,7 +963,7 @@ void save_session(lbann_comm *comm, int argc, char **argv, lbann_data::LbannPB& //open output file std::ofstream out(file_name.c_str()); if (!out.is_open()) { - std::stringstream err; + std::ostringstream err; err << "failed to open file (" << file_name << ") for writing"; LBANN_ERROR(err.str()); } @@ -977,7 +988,7 @@ void save_session(lbann_comm *comm, int argc, char **argv, lbann_data::LbannPB& << tm << "\n#\n#\n# Experiment was run with lbann version: " << lbann_version << "\n#\n#\n# To rerun the experiment: \n" - << "# $ srun -n" << comm->get_procs_in_world() << " " << argv[0] + << "# $ srun -n" << comm.get_procs_in_world() << " " << argv[0] << " --prototext=" << file_name << "\n#\n#\n"; out << "# Selected SLURM Environment Variables:\n"; diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index 9f74500706c..3ef86e1119f 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -67,19 +67,19 @@ std::unique_ptr build_model_from_prototext( std::cerr << "starting build_model_from_prototext" << std::endl; } - std::stringstream err; + std::ostringstream err; options *opts = options::get(); // Optionally over-ride some values in prototext - get_cmdline_overrides(comm, pb); + get_cmdline_overrides(*comm, pb); - customize_data_readers_index_list(comm, pb); + customize_data_readers_index_list(*comm, pb); lbann_data::Model *pb_model = pb.mutable_model(); // Adjust the number of parallel readers; this may be adjusted // after calling split_trainers() - set_num_parallel_readers(comm, pb); + set_num_parallel_readers(*comm, pb); // Check to see if the model wants to reduce the I/O parallelism if(pb_model->serialize_background_io() && io_thread_pool->get_num_threads() != 1) { @@ -142,7 +142,7 @@ std::unique_ptr build_model_from_prototext( // Save info to file; this includes the complete prototext (with any over-rides // from the cmd line) and various other info - save_session(comm, argc, argv, pb); + save_session(*comm, argc, argv, pb); // Report useful information if (master) { @@ -175,7 +175,7 @@ std::unique_ptr build_model_from_prototext( } // User feedback - print_parameters(comm, pb); + print_parameters(*comm, pb); // Initalize model std::unique_ptr ret_model{ diff --git a/tests/test_shuffled_indices.cpp b/tests/test_shuffled_indices.cpp index 3078d644e5e..94a2b383b9b 100644 --- a/tests/test_shuffled_indices.cpp +++ b/tests/test_shuffled_indices.cpp @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) { options *opts = options::get(); opts->init(argc, argv); if (opts->has_string("h") or opts->has_string("help") or argc == 1) { - print_help(comm.get()); + print_help(*comm); return EXIT_SUCCESS; } From ffe52d1fc57472f7e97e134f2ed58d9c52930af7 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Tue, 19 Feb 2019 10:20:55 -0800 Subject: [PATCH 068/443] testing --- src/data_store/jag_io.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/data_store/jag_io.cpp b/src/data_store/jag_io.cpp index 54b2cd24868..77a2714ba16 100644 --- a/src/data_store/jag_io.cpp +++ b/src/data_store/jag_io.cpp @@ -16,6 +16,7 @@ namespace lbann { + jag_io::~jag_io() { if (m_data_stream != nullptr && m_data_stream->is_open()) { m_data_stream->close(); From 0c9560227f65d36a9f0952de5c2a0e5a2db400ba Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 19 Feb 2019 10:43:36 -0800 Subject: [PATCH 069/443] Removing legacy data store implementations from the code base. Cleaning up the data readers to avoid calls to explicit data stores. This will clear the path for the new conduit data store based implementations. --- include/lbann/CMakeLists.txt | 1 + include/lbann/data_readers/data_reader.hpp | 36 - .../lbann/data_readers/data_reader_csv.hpp | 3 - .../data_readers/data_reader_imagenet.hpp | 3 - .../data_reader_imagenet_patches.hpp | 2 - .../data_reader_merge_features.hpp | 3 - .../data_reader_merge_samples.hpp | 7 - .../data_reader_mnist_siamese.hpp | 3 - .../data_readers/data_reader_multi_images.hpp | 3 - .../data_reader_pilot2_molecular.hpp | 3 - .../data_readers/data_reader_triplet.hpp | 3 - include/lbann/data_store/data_store_csv.hpp | 99 -- include/lbann/data_store/data_store_image.hpp | 157 --- .../lbann/data_store/data_store_imagenet.hpp | 77 -- .../data_store_imagenet_patches.hpp | 64 -- .../data_store/data_store_merge_features.hpp | 68 -- .../data_store/data_store_merge_samples.hpp | 95 -- .../data_store/data_store_multi_images.hpp | 79 -- .../data_store_pilot2_molecular.hpp | 132 --- .../lbann/data_store/data_store_triplet.hpp | 68 -- .../layers/io/input/generic_input_layer.hpp | 4 - include/lbann/lbann.hpp | 1 - src/data_readers/data_reader.cpp | 104 +- src/data_readers/data_reader_csv.cpp | 27 +- src/data_readers/data_reader_imagenet.cpp | 20 +- .../data_reader_imagenet_patches.cpp | 19 +- .../data_reader_merge_features.cpp | 11 - .../data_reader_merge_samples.cpp | 51 - .../data_reader_mnist_siamese.cpp | 34 +- src/data_readers/data_reader_multi_images.cpp | 19 +- .../data_reader_pilot2_molecular.cpp | 60 -- src/data_readers/data_reader_triplet.cpp | 20 +- src/data_store/CMakeLists.txt | 10 - src/data_store/data_store_csv.cpp | 225 ----- src/data_store/data_store_image.cpp | 922 ------------------ src/data_store/data_store_imagenet.cpp | 235 ----- .../data_store_imagenet_patches.cpp | 61 -- src/data_store/data_store_merge_features.cpp | 111 --- src/data_store/data_store_merge_samples.cpp | 117 --- src/data_store/data_store_multi_images.cpp | 218 ----- .../data_store_pilot2_molecular.cpp | 305 ------ src/data_store/data_store_triplet.cpp | 66 -- src/data_store/generic_data_store.cpp | 9 +- 43 files changed, 56 insertions(+), 3499 deletions(-) delete mode 100644 include/lbann/data_store/data_store_csv.hpp delete mode 100644 include/lbann/data_store/data_store_image.hpp delete mode 100644 include/lbann/data_store/data_store_imagenet.hpp delete mode 100644 include/lbann/data_store/data_store_imagenet_patches.hpp delete mode 100644 include/lbann/data_store/data_store_merge_features.hpp delete mode 100644 include/lbann/data_store/data_store_merge_samples.hpp delete mode 100644 include/lbann/data_store/data_store_multi_images.hpp delete mode 100644 include/lbann/data_store/data_store_pilot2_molecular.hpp delete mode 100644 include/lbann/data_store/data_store_triplet.hpp delete mode 100644 src/data_store/data_store_csv.cpp delete mode 100644 src/data_store/data_store_image.cpp delete mode 100644 src/data_store/data_store_imagenet.cpp delete mode 100644 src/data_store/data_store_imagenet_patches.cpp delete mode 100644 src/data_store/data_store_merge_features.cpp delete mode 100644 src/data_store/data_store_merge_samples.cpp delete mode 100644 src/data_store/data_store_multi_images.cpp delete mode 100644 src/data_store/data_store_pilot2_molecular.cpp delete mode 100644 src/data_store/data_store_triplet.cpp diff --git a/include/lbann/CMakeLists.txt b/include/lbann/CMakeLists.txt index a717a52fca6..399cdbc3d5c 100644 --- a/include/lbann/CMakeLists.txt +++ b/include/lbann/CMakeLists.txt @@ -10,6 +10,7 @@ set_full_path(THIS_DIR_HEADERS add_subdirectory(callbacks) add_subdirectory(data_distributions) add_subdirectory(data_readers) +add_subdirectory(data_store) add_subdirectory(io) add_subdirectory(layers) add_subdirectory(metrics) diff --git a/include/lbann/data_readers/data_reader.hpp b/include/lbann/data_readers/data_reader.hpp index f05fee6e69b..44e6f662cc7 100644 --- a/include/lbann/data_readers/data_reader.hpp +++ b/include/lbann/data_readers/data_reader.hpp @@ -90,8 +90,6 @@ class generic_data_reader : public lbann_image_preprocessor { m_shuffle(shuffle), m_absolute_sample_count(0), m_validation_percent(0.0), m_use_percent(1.0), m_master(false), - m_save_minibatch_indices(false), - m_compound_rank(0), m_gan_labelling(false), //default, not GAN m_gan_label_value(0), //If GAN, default for fake label, discriminator model m_is_partitioned(false), @@ -695,30 +693,6 @@ class generic_data_reader : public lbann_image_preprocessor { /// sets up a data_store. virtual void setup_data_store(model *m); - /** This call changes the functionality of fetch_data(); when set, - * indices are added to m_my_minibatch_indices, but fetch_datum() - * is not called. This method is added to support data store functionality. - */ - void set_save_minibatch_entries(bool b); - - /// support of data store functionality - void init_minibatch(); - - /// support of data store functionality - const std::vector > & get_minibatch_indices() const { - return m_my_minibatch_indices; - } - - /// support of data store functionality - int get_compound_rank() { - return m_compound_rank; - } - - /// support of data store functionality - void set_compound_rank(int r) { - m_compound_rank = r; - } - void set_gan_labelling(bool has_gan_labelling) { m_gan_labelling = has_gan_labelling; } @@ -869,16 +843,6 @@ class generic_data_reader : public lbann_image_preprocessor { friend class data_reader_merge_samples; protected : - /// added to support data store functionality - bool m_save_minibatch_indices; - - /// added to support data store functionality - std::vector > m_my_minibatch_indices; - - /// added to support data store functionality - int m_compound_rank; - - //var to support GAN bool m_gan_labelling; //boolean flag of whether its GAN binary label, default is false int m_gan_label_value; //zero(0) or 1 label value for discriminator, default is 0 diff --git a/include/lbann/data_readers/data_reader_csv.hpp b/include/lbann/data_readers/data_reader_csv.hpp index ab03f66e08a..34d5b46e8a8 100644 --- a/include/lbann/data_readers/data_reader_csv.hpp +++ b/include/lbann/data_readers/data_reader_csv.hpp @@ -129,9 +129,6 @@ class csv_reader : public generic_data_reader { */ std::vector fetch_line_label_response(int data_id); - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: /** * Fetch the data associated with data_id. diff --git a/include/lbann/data_readers/data_reader_imagenet.hpp b/include/lbann/data_readers/data_reader_imagenet.hpp index 750df07948c..7335c918137 100644 --- a/include/lbann/data_readers/data_reader_imagenet.hpp +++ b/include/lbann/data_readers/data_reader_imagenet.hpp @@ -55,9 +55,6 @@ class imagenet_reader : public image_data_reader { virtual CPUMat create_datum_view(CPUMat& X, const int mb_idx) const; bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: /// preprocessor duplicated for each omp thread std::vector > m_pps; diff --git a/include/lbann/data_readers/data_reader_imagenet_patches.hpp b/include/lbann/data_readers/data_reader_imagenet_patches.hpp index 8ec20390720..9b37dd64019 100644 --- a/include/lbann/data_readers/data_reader_imagenet_patches.hpp +++ b/include/lbann/data_readers/data_reader_imagenet_patches.hpp @@ -56,8 +56,6 @@ class imagenet_reader_patches : public image_data_reader { return {m_num_patches*m_image_num_channels, m_image_height, m_image_width}; } - void setup_data_store(model *m) override; - protected: void set_defaults() override; virtual bool replicate_processor(const cv_process_patches& pp, const int nthreads); diff --git a/include/lbann/data_readers/data_reader_merge_features.hpp b/include/lbann/data_readers/data_reader_merge_features.hpp index d161d35c94e..0bb4b5b2282 100644 --- a/include/lbann/data_readers/data_reader_merge_features.hpp +++ b/include/lbann/data_readers/data_reader_merge_features.hpp @@ -70,9 +70,6 @@ class data_reader_merge_features : public generic_compound_data_reader { return {get_linearized_data_size()}; } - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; bool fetch_label(CPUMat& Y, int data_id, int mb_idx) override; diff --git a/include/lbann/data_readers/data_reader_merge_samples.hpp b/include/lbann/data_readers/data_reader_merge_samples.hpp index f09775b45d0..d09d208361f 100644 --- a/include/lbann/data_readers/data_reader_merge_samples.hpp +++ b/include/lbann/data_readers/data_reader_merge_samples.hpp @@ -71,9 +71,6 @@ class data_reader_merge_samples : public generic_compound_data_reader { return m_num_samples_psum; } - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; bool fetch_label(CPUMat& Y, int data_id, int mb_idx) override; @@ -82,10 +79,6 @@ class data_reader_merge_samples : public generic_compound_data_reader { /// Partial sums of the number of samples in each reader. std::vector m_num_samples_psum; - /// support for data store functionality; load() will call - /// this method when using data store - void load_using_data_store(); - /// code common to both load() and load_using_data_store() void setup_indices(int num_samples); diff --git a/include/lbann/data_readers/data_reader_mnist_siamese.hpp b/include/lbann/data_readers/data_reader_mnist_siamese.hpp index 6dd8ffbb6b5..3a53594eb23 100644 --- a/include/lbann/data_readers/data_reader_mnist_siamese.hpp +++ b/include/lbann/data_readers/data_reader_mnist_siamese.hpp @@ -81,9 +81,6 @@ class data_reader_mnist_siamese : public data_reader_multi_images { /// Fetch this mini-batch's labels into Y by calling the new overloaded fetch_label() int fetch_labels(CPUMat& Y) override; - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: /** * Set the default configuration such as the width, height, and number of diff --git a/include/lbann/data_readers/data_reader_multi_images.hpp b/include/lbann/data_readers/data_reader_multi_images.hpp index 18ec3e3fdaa..d4eb3a1c2f7 100644 --- a/include/lbann/data_readers/data_reader_multi_images.hpp +++ b/include/lbann/data_readers/data_reader_multi_images.hpp @@ -96,9 +96,6 @@ class data_reader_multi_images : public imagenet_reader { return m_num_img_srcs; } - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: void set_defaults() override; virtual std::vector create_datum_views(CPUMat& X, const int mb_idx) const; diff --git a/include/lbann/data_readers/data_reader_pilot2_molecular.hpp b/include/lbann/data_readers/data_reader_pilot2_molecular.hpp index 5fa84185445..d28b29d3923 100644 --- a/include/lbann/data_readers/data_reader_pilot2_molecular.hpp +++ b/include/lbann/data_readers/data_reader_pilot2_molecular.hpp @@ -132,9 +132,6 @@ class pilot2_molecular_reader : public generic_data_reader { return m_neighbors_data_size; } - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: /// Fetch a molecule and its neighbors. bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; diff --git a/include/lbann/data_readers/data_reader_triplet.hpp b/include/lbann/data_readers/data_reader_triplet.hpp index 6b582a2f785..4e8cc2188e6 100644 --- a/include/lbann/data_readers/data_reader_triplet.hpp +++ b/include/lbann/data_readers/data_reader_triplet.hpp @@ -78,9 +78,6 @@ class data_reader_triplet : public data_reader_multi_images { return m_samples.get_sample(idx); } - /// sets up a data_store. - void setup_data_store(model *m) override; - protected: void set_defaults() override; bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; diff --git a/include/lbann/data_store/data_store_csv.hpp b/include/lbann/data_store/data_store_csv.hpp deleted file mode 100644 index f180124de88..00000000000 --- a/include/lbann/data_store/data_store_csv.hpp +++ /dev/null @@ -1,99 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_CSV_HPP__ -#define __DATA_STORE_CSV_HPP__ - -#include "lbann/data_store/generic_data_store.hpp" -#include - -namespace lbann { - -class csv_reader; -class data_store_merge_features; - -/** - * todo - */ - -class data_store_csv : public generic_data_store { - public: - - //! ctor - data_store_csv(generic_data_reader *reader, model *m); - - //! copy ctor - data_store_csv(const data_store_csv&) = default; - - //! operator= - data_store_csv& operator=(const data_store_csv&) = default; - - data_store_csv * copy() const override { return new data_store_csv(*this); } - - //! dtor - ~data_store_csv() override; - - void get_data_buf_DataType(int data_id, std::vector *&buf) override; - - void setup() override; - -protected : - - friend data_store_merge_features; - - csv_reader *m_csv_reader; - - /// size of the vectors that are returned by - /// reader->fetch_line_label_response(data_id) - int m_vector_size; - - /// buffers for data that will be passed to the data reader's fetch_datum method - std::unordered_map> m_my_minibatch_data; - - /// retrive data needed for passing to the data reader for the next epoch - void exchange_data() override; - /// returns, in "indices," the set of indices that processor "p" - /// needs for the next epoch. Called by exchange_data - void get_indices(std::unordered_set &indices, int p); - - /// returns, in "indices," the subset of indices that processor "p" - /// needs for the next epoch and that this processor owns. - /// Called by exchange_data - void get_my_indices(std::unordered_set &indices, int p); - - /// will contain the data that this processor owns; - /// Maps a global index to its associated data - std::map> m_data; - //std::unordered_map> m_data; - - /// fills in m_data (the data store) - void populate_datastore(); -}; - -} // namespace lbann - -#endif // __DATA_STORE_CSV_HPP__ diff --git a/include/lbann/data_store/data_store_image.hpp b/include/lbann/data_store/data_store_image.hpp deleted file mode 100644 index 12316888926..00000000000 --- a/include/lbann/data_store/data_store_image.hpp +++ /dev/null @@ -1,157 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_IMAGE_HPP__ -#define __DATA_STORE_IMAGE_HPP__ - -#include "lbann/data_store/generic_data_store.hpp" -#include - -namespace lbann { - -/** - * todo - */ - -class data_store_image : public generic_data_store { - public: - - //! ctor - data_store_image(generic_data_reader *reader, model *m) : - generic_data_store(reader, m), - m_num_img_srcs(1) {} - - //! copy ctor - data_store_image(const data_store_image&) = default; - - //! operator= - data_store_image& operator=(const data_store_image&) = default; - - generic_data_store * copy() const override = 0; - - //! dtor - ~data_store_image() override; - - void setup() override; - - using generic_data_store::get_data_buf; - - /// data readers call this method - void get_data_buf(int data_id, std::vector *&buf, int multi_idx = 0) override; - - protected : - - void exchange_data() override; - - /// maps a global index (wrt image_list) to number of bytes in the file - std::unordered_map m_file_sizes; - /// fills in m_file_sizes - virtual void get_file_sizes() = 0; - - /// fills in m_file_sizes; this is called when we're using files - /// from a tarball - virtual void read_file_sizes(); - - /// called by get_file_sizes - void exchange_file_sizes( - std::vector &global_indices, - std::vector &num_bytes); - - /// buffers that will be passed to reader::fetch_datum - std::unordered_map > m_my_minibatch_data; - - /// loads file from disk into *p; checks that bytes read = sz - void load_file(const std::string &dir, const std::string &fn, unsigned char *p, size_t sz); - - /// reads all files assigned to this processor into memory (m_data) - /// version for in-memory mode - virtual void read_files() = 0; - /// version for out-of-memory mode - virtual void read_files(const std::unordered_set &indices) = 0; - - /// in multi-image scenarios, the number of images in each sample - unsigned int m_num_img_srcs; - - /// the actual data store! - std::unordered_map> m_data; - - /// returns memory required to hold p's files in memory - size_t get_my_num_file_bytes(); - - /// returns number of bytes in the data set - size_t get_global_num_file_bytes(); - - /// parses /proc/meminfo to determine available memory; returned - /// value is memory in kB - size_t get_available_memory(); - - /// attempts to determine if there is sufficient RAM for - /// in-memory data store; may call MPI_Abort - void report_memory_constraints(); - - /// for out-of-memory mode: read files from, e.g, lscratchX, and write - /// to local store, e.g, /l/ssd - void stage_files(); - - /// for out-of-memory mode: unpack files from a previously created tarball - void stage_tarball(); - - /// called by data_reader::fetch_data; supports out-of-memory mode - void fetch_data() override; - - /// creates a tarball of files written to local disk, then - /// copies the tarball to, e.g, lscratchX. Activated by the cmd line - /// options: --create_tarball where is the directory - /// to which to copy the tarball. - void create_tarball(); - - /// returns the string that will be passed to a system call to - /// create the tarball on local store (/l/ssd), and string for copying - /// to remote store (lscratchX) - std::pair get_tarball_exe(); - - /// called by create_tarball - void write_file_sizes(); - - /// called by create_tarball - void write_datastore_indices(); - - void read_datastore_indices(); - - /// called by create_tarball - void write_data_filepaths(); - - void read_data_filepaths(); - - /// returns true if option: --create_tarball is in use; - /// print info to screen, and performs error checking - bool are_we_creating_tarballs(); -}; - -} // namespace lbann - -#endif // __DATA_STORE_IMAGE_HPP__ diff --git a/include/lbann/data_store/data_store_imagenet.hpp b/include/lbann/data_store/data_store_imagenet.hpp deleted file mode 100644 index 38a0299c08a..00000000000 --- a/include/lbann/data_store/data_store_imagenet.hpp +++ /dev/null @@ -1,77 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_IMAGENET_HPP__ -#define __DATA_STORE_IMAGENET_HPP__ - -#include "lbann/data_store/data_store_image.hpp" - -namespace lbann { - -/** - * todo - */ - -class data_store_imagenet : public data_store_image { - public: - - //! ctor - data_store_imagenet(generic_data_reader *reader, model *m) : - data_store_image(reader, m) {} - - //! copy ctor - data_store_imagenet(const data_store_imagenet&) = default; - - //! operator= - data_store_imagenet& operator=(const data_store_imagenet&) = default; - - data_store_imagenet * copy() const override { return new data_store_imagenet(*this); } - - //! dtor - ~data_store_imagenet() override {}; - - void setup() override; - - protected : - - void get_file_sizes() override; - - /// for use during development and testing - virtual void test_data(); - - /// for use during development and testing - void test_file_sizes(); - - void read_files() override; - void read_files(const std::unordered_set &indices) override; - - void build_data_filepaths() override; -}; - -} // namespace lbann - -#endif // __DATA_STORE_IMAGENET_HPP__ diff --git a/include/lbann/data_store/data_store_imagenet_patches.hpp b/include/lbann/data_store/data_store_imagenet_patches.hpp deleted file mode 100644 index 238448d44ec..00000000000 --- a/include/lbann/data_store/data_store_imagenet_patches.hpp +++ /dev/null @@ -1,64 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_IMAGENET_PATCHES_HPP__ -#define __DATA_STORE_IMAGENET_PATCHES_HPP__ - -#include "lbann/data_store/data_store_imagenet.hpp" - -namespace lbann { - -/** - * todo - */ - -class data_store_imagenet_patches : public data_store_imagenet { - public: - - //! ctor - data_store_imagenet_patches(generic_data_reader *reader, model *m) : - data_store_imagenet(reader, m) {} - - //! copy ctor - data_store_imagenet_patches(const data_store_imagenet_patches&) = default; - - //! operator= - data_store_imagenet_patches& operator=(const data_store_imagenet_patches&) = default; - - data_store_imagenet_patches * copy() const override { return new data_store_imagenet_patches(*this); } - - //! dtor - ~data_store_imagenet_patches() override {}; - - void setup() override; - - protected : -}; - -} // namespace lbann - -#endif // __DATA_STORE_IMAGENET_PATCHES_HPP__ diff --git a/include/lbann/data_store/data_store_merge_features.hpp b/include/lbann/data_store/data_store_merge_features.hpp deleted file mode 100644 index c38cfe80871..00000000000 --- a/include/lbann/data_store/data_store_merge_features.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_MERGE_FEATURES_HPP__ -#define __DATA_STORE_MERGE_FEATURES_HPP__ - -#include "lbann/data_store/generic_data_store.hpp" - - -namespace lbann { - -/** - * todo - */ - -class data_store_merge_features : public generic_data_store { - public: - - //! ctor - data_store_merge_features(generic_data_reader *reader, model *m); - - //! copy ctor - data_store_merge_features(const data_store_merge_features&) = default; - - //! operator= - data_store_merge_features& operator=(const data_store_merge_features&) = default; - - data_store_merge_features * copy() const override { return new data_store_merge_features(*this); } - - //! dtor - ~data_store_merge_features() override; - - void setup() override; - - protected : - - void exchange_data() override; - - std::vector m_subsidiary_stores; -}; - -} // namespace lbann - -#endif // __DATA_STORE_MERGE_FEATURES_HPP__ diff --git a/include/lbann/data_store/data_store_merge_samples.hpp b/include/lbann/data_store/data_store_merge_samples.hpp deleted file mode 100644 index cb21f206e9c..00000000000 --- a/include/lbann/data_store/data_store_merge_samples.hpp +++ /dev/null @@ -1,95 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_MERGE_SAMPLES_HPP__ -#define __DATA_STORE_MERGE_SAMPLES_HPP__ - -#include "lbann/data_store/generic_data_store.hpp" - - -namespace lbann { - -/** - * todo - */ - -class data_store_pilot2_molecular; - -class data_store_merge_samples : public generic_data_store { - public: - - //! ctor - data_store_merge_samples(lbann_comm *comm, generic_data_reader *reader, model *m); - - //! copy ctor - data_store_merge_samples(const data_store_merge_samples&) = default; - - //! operator= - data_store_merge_samples& operator=(const data_store_merge_samples&) = default; - - data_store_merge_samples * copy() const override { return new data_store_merge_samples(*this); } - - //! dtor - ~data_store_merge_samples() override; - - using generic_data_store::get_data_buf; - void get_data_buf(int data_id, std::vector *&buf, int multi_idx = 0) override {} - - void setup() override; - - protected : - - void exchange_data() override; - - /// this contains a concatenation of the indices in m_minibatch_indices - /// (see: generic_data_reader.hpp) - std::vector m_my_minibatch_indices; - - std::vector m_subsidiary_stores; - - - /// when running in in-memory mode, this buffer will contain - /// the concatenated data - //std::vector m_data; - - /// allocate mem for m_data - //void allocate_memory(); - - //void read_files(); - - /// will contain data to be passed to the data_reader - //std::vector > m_my_data; - - /// maps indices wrt shuffled indices to indices in m_my_data - //std::unordered_map m_my_data_hash; - - MPI_Win m_win; -}; - -} // namespace lbann - -#endif // __DATA_STORE_MERGE_SAMPLES_HPP__ diff --git a/include/lbann/data_store/data_store_multi_images.hpp b/include/lbann/data_store/data_store_multi_images.hpp deleted file mode 100644 index dc238db3e92..00000000000 --- a/include/lbann/data_store/data_store_multi_images.hpp +++ /dev/null @@ -1,79 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_MULTI_IMAGES_HPP__ -#define __DATA_STORE_MULTI_IMAGES_HPP__ - -#include "lbann/data_store/data_store_imagenet.hpp" - -namespace lbann { - -/** - * todo - */ - -class data_store_multi_images : public data_store_imagenet { - public: - - //! ctor - data_store_multi_images(generic_data_reader *reader, model *m) : - data_store_imagenet(reader, m) { - set_name("data_store_multi_images"); - } - - //! copy ctor - data_store_multi_images(const data_store_multi_images&) = default; - - //! operator= - data_store_multi_images& operator=(const data_store_multi_images&) = default; - - data_store_multi_images * copy() const override { return new data_store_multi_images(*this); } - - //! dtor - ~data_store_multi_images() override {}; - - void setup() override; - - protected : - - void get_file_sizes() override; - - void read_files() override; - void read_files(const std::unordered_set &indices) override; - - - virtual std::vector get_sample(size_t idx) const; - - /// for use during development and testing - void extended_testing() override; - - void build_data_filepaths() override; -}; - -} // namespace lbann - -#endif // __DATA_STORE_MULTI_IMAGES_HPP__ diff --git a/include/lbann/data_store/data_store_pilot2_molecular.hpp b/include/lbann/data_store/data_store_pilot2_molecular.hpp deleted file mode 100644 index cc7a5172f78..00000000000 --- a/include/lbann/data_store/data_store_pilot2_molecular.hpp +++ /dev/null @@ -1,132 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_PILOT2_MOLECULAR_HPP__ -#define __DATA_STORE_PILOT2_MOLECULAR_HPP__ - -#include "lbann/data_store/generic_data_store.hpp" -#include - -namespace lbann { - -class pilot2_molecular_reader; -class data_store_merge_samples; - -/** - * todo - */ - -class data_store_pilot2_molecular : public generic_data_store { - public: - - //! ctor - data_store_pilot2_molecular(generic_data_reader *reader, model *m); - - //! copy ctor - data_store_pilot2_molecular(const data_store_pilot2_molecular&) = default; - - //! operator= - data_store_pilot2_molecular& operator=(const data_store_pilot2_molecular&) = default; - - data_store_pilot2_molecular * copy() const override { return new data_store_pilot2_molecular(*this); } - - //! dtor - ~data_store_pilot2_molecular() override; - - using generic_data_store::get_data_buf; - void get_data_buf(int data_id, int tid, std::vector *&buf) override; - - void setup() override; - - /// needed to support data_reader_merge_samples (compound reader) - void clear_minibatch_indices() { - m_my_minibatch_indices_v.clear(); - } - - /// needed to support data_reader_merge_samples (compound reader) - void add_minibatch_index(int idx) { - m_my_minibatch_indices_v.push_back(idx); - } - - /// needed to support data_reader_merge_samples (compound reader) - void set_no_shuffle() { - m_shuffle = false; - } - - protected : - - friend data_store_merge_samples; - - pilot2_molecular_reader *m_pilot2_reader; - - /// fills in m_data - void construct_data_store(); - /// the data store. Note that this will break if word size = 4; - /// only meaningful on the owning processor - std::unordered_map> m_data; - /// called by construct_data_store() - void fill_in_data( - const int data_id, - const int num_samples_per_frame, - const int num_features, - double *features); - - /// maps: a shuffled index to the corresponding molecule's neighbors' indices - std::unordered_map > m_neighbors; - /// fills in m_neighbors - void build_nabor_map(); - - /// fills in m_my_molecules using non-blocking MPI send/recv - void exchange_data() override; - - /// contains the data of all molecules required by this processor - /// to execute one epoch. Maps: molecule data_id to set of neighbors (including - /// self: data_id); this is the set of molecules required in one call - /// to fetch_datum by the data reader - std::unordered_map> m_my_molecules; - - /// returns, in 's,' the set of molecules required for processor 'p' - /// for the next epoch - void get_required_molecules(std::unordered_set &s, int p); - - /// the buffers that will be passed to data_readers::fetch_datum - std::vector > m_data_buffer; - - /// the process that "owns" the data, i.e, this is the only process - /// whose m_reader will load data from disk - int m_owner_rank; - - /// true if this processor "owns" the data - bool m_owner; - - /// support for data_store_merge_samples - bool m_shuffle; -}; - -} // namespace lbann - -#endif // __DATA_STORE_PILOT2_MOLECULAR_HPP__ diff --git a/include/lbann/data_store/data_store_triplet.hpp b/include/lbann/data_store/data_store_triplet.hpp deleted file mode 100644 index 5c004f06722..00000000000 --- a/include/lbann/data_store/data_store_triplet.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __DATA_STORE_TRIPLET_HPP__ -#define __DATA_STORE_TRIPLET_HPP__ - -#include "lbann/data_store/data_store_multi_images.hpp" - -namespace lbann { - -/** - * todo - */ - -class data_store_triplet : public data_store_multi_images { - public: - - //! ctor - data_store_triplet(generic_data_reader *reader, model *m) : - data_store_multi_images(reader, m) { - set_name("data_store_triplet"); - } - - //! copy ctor - data_store_triplet(const data_store_triplet&) = default; - - //! operator= - data_store_triplet& operator=(const data_store_triplet&) = default; - - data_store_triplet * copy() const override { return new data_store_triplet(*this); } - - //! dtor - ~data_store_triplet() override {}; - - void setup() override; - - protected : - - std::vector get_sample(size_t idx) const override; -}; - -} // namespace lbann - -#endif // __DATA_STORE_TRIPLET_HPP__ diff --git a/include/lbann/layers/io/input/generic_input_layer.hpp b/include/lbann/layers/io/input/generic_input_layer.hpp index 4b6bc06a393..b81850c821e 100644 --- a/include/lbann/layers/io/input/generic_input_layer.hpp +++ b/include/lbann/layers/io/input/generic_input_layer.hpp @@ -250,10 +250,6 @@ class generic_input_layer : public io_layer { void fp_compute() override { execution_mode mode = this->m_model->get_execution_mode(); - /// support for data_store out-of-memory mode; this instructs - /// the data_store (via the data_reader) to read in the - /// next mb from file, then exchange data as needed - get_data_reader()->init_minibatch(); increment_active_buffer_idx(mode); generic_io_buffer* io_buffer = m_io_buffers[get_active_buffer_idx(mode) % m_io_buffers.size()]; diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index e67c9d084a7..8b3da4d1d83 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -126,7 +126,6 @@ /// Data stores #include "lbann/data_store/generic_data_store.hpp" -#include "lbann/data_store/data_store_imagenet.hpp" /// Callbacks #include "lbann/callbacks/callback_check_init.hpp" diff --git a/src/data_readers/data_reader.cpp b/src/data_readers/data_reader.cpp index 0345c0d89dc..6c8bcab2735 100644 --- a/src/data_readers/data_reader.cpp +++ b/src/data_readers/data_reader.cpp @@ -108,10 +108,8 @@ int lbann::generic_data_reader::fetch_data(CPUMat& X, El::Matrix& indic const int mb_size = std::min(El::Int{((end_pos - m_current_pos) + m_sample_stride - 1) / m_sample_stride}, X.Width()); - if (!m_save_minibatch_indices) { - El::Zeros_seq(X, X.Height(), X.Width()); - El::Zeros_seq(indices_fetched, mb_size, 1); - } + El::Zeros_seq(X, X.Height(), X.Width()); + El::Zeros_seq(indices_fetched, mb_size, 1); if(!position_valid()) { if(position_is_overrun()) { @@ -127,12 +125,10 @@ int lbann::generic_data_reader::fetch_data(CPUMat& X, El::Matrix& indic m_data_store->exchange_mini_batch_data(m_current_pos-m_base_offset-m_model_offset, loaded_batch_size); } - if (!m_save_minibatch_indices) { - /// Allow each thread to perform any preprocessing necessary on the - /// data source prior to fetching data - for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { - preprocess_data_source(t); - } + /// Allow each thread to perform any preprocessing necessary on the + /// data source prior to fetching data + for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { + preprocess_data_source(t); } static bool fix_jag = true; @@ -141,36 +137,26 @@ int lbann::generic_data_reader::fetch_data(CPUMat& X, El::Matrix& indic set_jag_variables(mb_size); } - if (m_save_minibatch_indices) { - m_my_minibatch_indices.resize(m_my_minibatch_indices.size() + 1); - for (int s = 0; s < mb_size; s++) { - int n = m_current_pos + (s * m_sample_stride); - m_my_minibatch_indices.back().push_back(n); + for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { + // Queue up work into other threads and then finish off the + // mini-batch in the active thread + if(t == m_io_thread_pool->get_local_thread_id()) { + continue; + }else { + m_io_thread_pool->submit_job_to_work_group( + std::bind(&generic_data_reader::fetch_data_block, this, std::ref(X), t, + mb_size, std::ref(indices_fetched))); } } + fetch_data_block(X, m_io_thread_pool->get_local_thread_id(), mb_size, indices_fetched); - else { - for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { - // Queue up work into other threads and then finish off the - // mini-batch in the active thread - if(t == m_io_thread_pool->get_local_thread_id()) { - continue; - }else { - m_io_thread_pool->submit_job_to_work_group( - std::bind(&generic_data_reader::fetch_data_block, this, std::ref(X), t, - mb_size, std::ref(indices_fetched))); - } - } - fetch_data_block(X, m_io_thread_pool->get_local_thread_id(), mb_size, indices_fetched); - - // Wait for all of the threads to finish - m_io_thread_pool->finish_work_group(); + // Wait for all of the threads to finish + m_io_thread_pool->finish_work_group(); - /// Allow each thread to perform any postprocessing necessary on the - /// data source prior to fetching data - for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { - postprocess_data_source(t); - } + /// Allow each thread to perform any postprocessing necessary on the + /// data source prior to fetching data + for (int t = 0; t < static_cast(m_io_thread_pool->get_num_threads()); t++) { + postprocess_data_source(t); } return mb_size; @@ -222,22 +208,17 @@ int lbann::generic_data_reader::fetch_labels(CPUMat& Y) { } } -// if (m_data_store != nullptr) { - //@todo: get it to work, then add omp support - //m_data_store->fetch_labels(...); - // } -// else { - std::string error_message; - for (int s = 0; s < mb_size; s++) { - int n = m_current_pos + (s * m_sample_stride); - int index = m_shuffled_indices[n]; - bool valid = fetch_label(Y, index, s); - if (!valid) { - error_message = "invalid label (index " + std::to_string(index) + ")"; - } + std::string error_message; + for (int s = 0; s < mb_size; s++) { + int n = m_current_pos + (s * m_sample_stride); + int index = m_shuffled_indices[n]; + bool valid = fetch_label(Y, index, s); + if (!valid) { + error_message = "invalid label (index " + std::to_string(index) + ")"; } - if (!error_message.empty()) { LBANN_ERROR(error_message); } - //} + } + if (!error_message.empty()) { LBANN_ERROR(error_message); } + return mb_size; } @@ -306,11 +287,9 @@ bool generic_data_reader::update(bool is_active_reader) { + std::to_string(m_stride_to_last_mini_batch)); } - if (!m_save_minibatch_indices) { - shuffle_indices(); - if (priming_data_store()) { - m_data_store->set_shuffled_indices(&m_shuffled_indices); - } + shuffle_indices(); + if (priming_data_store()) { + m_data_store->set_shuffled_indices(&m_shuffled_indices); } set_initial_position(); @@ -738,13 +717,6 @@ bool generic_data_reader::priming_data_store() const { && m_model->get_cur_epoch() == 0); } -void generic_data_reader::set_save_minibatch_entries(bool b) { - m_save_minibatch_indices = b; - if (b) { - m_my_minibatch_indices.reserve(get_num_iterations_per_epoch()); - } -} - void generic_data_reader::set_data_store(generic_data_store *g) { if (m_data_store != nullptr) { delete m_data_store; @@ -752,12 +724,6 @@ void generic_data_reader::set_data_store(generic_data_store *g) { m_data_store = g; } -void generic_data_reader::init_minibatch() { - if (m_data_store != nullptr) { - m_data_store->init_minibatch(); - } -} - void generic_data_reader::set_partitioned(bool partitioned_yes, double overlap, int mode) { if (m_comm->get_num_trainers() == 1 || m_comm->get_procs_in_world() == 1) { m_is_partitioned = false; diff --git a/src/data_readers/data_reader_csv.cpp b/src/data_readers/data_reader_csv.cpp index 60ea89c81c8..25c67bcc7d7 100644 --- a/src/data_readers/data_reader_csv.cpp +++ b/src/data_readers/data_reader_csv.cpp @@ -28,7 +28,6 @@ #include #include "lbann/data_readers/data_reader_csv.hpp" -#include "lbann/data_store/data_store_csv.hpp" #include "lbann/utils/options.hpp" #include @@ -270,18 +269,10 @@ void csv_reader::load() { } bool csv_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx) { - if (m_data_store != nullptr) { - std::vector *buf; - m_data_store->get_data_buf_DataType(data_id, buf); - for (size_t i = 0; i < buf->size(); ++i) { - X(i, mb_idx) = (*buf)[i]; - } - } else { - auto line = fetch_line_label_response(data_id); - // TODO: Avoid unneeded copies. - for (size_t i = 0; i < line.size(); ++i) { - X(i, mb_idx) = line[i]; - } + auto line = fetch_line_label_response(data_id); + // TODO: Avoid unneeded copies. + for (size_t i = 0; i < line.size(); ++i) { + X(i, mb_idx) = line[i]; } return true; } @@ -405,14 +396,4 @@ void csv_reader::setup_ifstreams() { } } -void csv_reader::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_csv(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_imagenet.cpp b/src/data_readers/data_reader_imagenet.cpp index d1822836261..3ee457f15d9 100644 --- a/src/data_readers/data_reader_imagenet.cpp +++ b/src/data_readers/data_reader_imagenet.cpp @@ -28,7 +28,6 @@ #include "lbann/data_readers/data_reader_imagenet.hpp" #include "lbann/data_readers/image_utils.hpp" -#include "lbann/data_store/data_store_imagenet.hpp" #include namespace lbann { @@ -130,17 +129,10 @@ bool imagenet_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx) { int width=0, height=0, img_type=0; - std::vector *image_buf; - CPUMat X_v = create_datum_view(X, mb_idx); bool ret; - if (m_data_store != nullptr) { - m_data_store->get_data_buf(data_id, image_buf, 0); - ret = lbann::image_utils::load_image(*image_buf, width, height, img_type, *(m_pps[tid]), X_v); - } else { - ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v, m_thread_buffer[tid], &m_thread_cv_buffer[tid]); - } + ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v, m_thread_buffer[tid], &m_thread_cv_buffer[tid]); if(!ret) { throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " @@ -156,14 +148,4 @@ bool imagenet_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx) { return true; } -void imagenet_reader::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_imagenet(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_imagenet_patches.cpp b/src/data_readers/data_reader_imagenet_patches.cpp index fb66a6345f5..6c4be8ee89d 100644 --- a/src/data_readers/data_reader_imagenet_patches.cpp +++ b/src/data_readers/data_reader_imagenet_patches.cpp @@ -28,7 +28,6 @@ #include "lbann/data_readers/data_reader_imagenet_patches.hpp" #include "lbann/data_readers/image_utils.hpp" -#include "lbann/data_store/data_store_imagenet_patches.hpp" #include @@ -152,13 +151,7 @@ bool imagenet_reader_patches::fetch_datum(CPUMat& X, int data_id, int mb_idx) { int width=0, height=0, img_type=0; std::vector X_v = create_datum_views(X, mb_idx); bool ret; - if (m_data_store != nullptr) { - std::vector *image_buf; - m_data_store->get_data_buf(data_id, image_buf, 0); - ret = lbann::image_utils::load_image(*image_buf, width, height, img_type, *(m_pps[tid]), X_v); - } else { - ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v, m_thread_buffer[tid], &m_thread_cv_buffer[tid]); - } + ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v, m_thread_buffer[tid], &m_thread_cv_buffer[tid]); //ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v); if (m_pps[tid]->is_self_labeling()) { @@ -179,14 +172,4 @@ bool imagenet_reader_patches::fetch_datum(CPUMat& X, int data_id, int mb_idx) { return true; } -void imagenet_reader_patches::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_imagenet_patches(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_merge_features.cpp b/src/data_readers/data_reader_merge_features.cpp index 5442b6f5ef1..4e501932ab2 100644 --- a/src/data_readers/data_reader_merge_features.cpp +++ b/src/data_readers/data_reader_merge_features.cpp @@ -27,7 +27,6 @@ //////////////////////////////////////////////////////////////////////////////// #include "lbann/data_readers/data_reader_merge_features.hpp" -#include "lbann/data_store/data_store_merge_features.hpp" #include "lbann/utils/options.hpp" #include "lbann/utils/timer.hpp" @@ -110,14 +109,4 @@ bool data_reader_merge_features::fetch_response(CPUMat& Y, int data_id, int mb_i return m_label_reader->fetch_response(Y, data_id, mb_idx); } -void data_reader_merge_features::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_merge_features(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_merge_samples.cpp b/src/data_readers/data_reader_merge_samples.cpp index 3d88c992ece..be3416ed120 100644 --- a/src/data_readers/data_reader_merge_samples.cpp +++ b/src/data_readers/data_reader_merge_samples.cpp @@ -27,7 +27,6 @@ //////////////////////////////////////////////////////////////////////////////// #include "lbann/data_readers/data_reader_merge_samples.hpp" -#include "lbann/data_store/data_store_merge_samples.hpp" #include "lbann/utils/options.hpp" namespace lbann { @@ -52,38 +51,6 @@ data_reader_merge_samples& data_reader_merge_samples::operator=( data_reader_merge_samples::~data_reader_merge_samples() {} -void data_reader_merge_samples::load_using_data_store() { - // load the subsidiary data readers - int global_num_readers = m_data_readers.size(); - int np = m_comm->get_procs_per_trainer(); - for (int j=0; jset_compound_rank(owner); - m_data_readers[j]->set_comm(m_comm); - //only the processor whose rank == owner loads the NpyArray - m_data_readers[j]->load(); - } - - // do some sanity checks. - int num_labels, data_size, label_size; - num_labels = m_data_readers[0]->get_num_labels(); - data_size = m_data_readers[0]->get_linearized_data_size(); - label_size = m_data_readers[0]->get_linearized_label_size(); - const std::vector data_dims = m_data_readers[0]->get_data_dims(); - /* - MPI_Comm comm = m_comm->get_trainer_comm().comm; - std::vector data_dims_2 = data_dims; - MPI_Bcast(&num_labels, 1, MPI_INT, 0, comm); - MPI_Bcast(&data_size, 1, MPI_INT, 0, comm); - MPI_Bcast(&label_size, 1, MPI_INT, 0, comm); - MPI_Bcast(&data_dims_2[0], data_dims_2.size(), MPI_INT, 0, comm); - */ - sanity_check_for_consistency(num_labels, data_size, label_size, data_dims); - - size_t global_num_samples = compute_num_samples_psum(); - setup_indices(global_num_samples); -} - size_t data_reader_merge_samples::compute_num_samples_psum() { size_t global_num_samples = 0; // Prepend a 0 to make things easier. @@ -129,11 +96,6 @@ void data_reader_merge_samples::setup_indices(int num_samples) { } void data_reader_merge_samples::load() { - if (options::get()->has_bool("use_data_store") && options::get()->get_bool("use_data_store")) { - data_reader_merge_samples::load_using_data_store(); - return; - } - // Load each subsidiary data reader. for (auto&& reader : m_data_readers) { reader->set_comm(m_comm); @@ -189,17 +151,4 @@ bool data_reader_merge_samples::fetch_response(CPUMat& Y, int data_id, int mb_id std::to_string(data_id)); } -void data_reader_merge_samples::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = nullptr; -/* - m_data_store = new data_store_merge_samples(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -*/ -} - } // namespace lbann diff --git a/src/data_readers/data_reader_mnist_siamese.cpp b/src/data_readers/data_reader_mnist_siamese.cpp index 813db041bc9..960f303ef93 100644 --- a/src/data_readers/data_reader_mnist_siamese.cpp +++ b/src/data_readers/data_reader_mnist_siamese.cpp @@ -29,7 +29,6 @@ #include "lbann/data_readers/data_reader_mnist_siamese.hpp" #include "lbann/data_readers/image_utils.hpp" -#include "lbann/data_store/data_store_multi_images.hpp" #include "lbann/utils/file_utils.hpp" #include #include @@ -156,23 +155,17 @@ int data_reader_mnist_siamese::fetch_labels(CPUMat& Y) { El::Zeros(Y, Y.Height(), Y.Width()); -// if (m_data_store != nullptr) { - //@todo: get it to work, then add omp support - //m_data_store->fetch_labels(...); - // } - -// else { - std::string error_message; - for (int s = 0; s < mb_size; s++) { - int n = m_current_pos + (s * m_sample_stride); - sample_t index = std::make_pair(m_shuffled_indices[n], m_shuffled_indices2[n]); - bool valid = fetch_label(Y, index, s); - if (!valid) { - error_message = "invalid label"; - } + std::string error_message; + for (int s = 0; s < mb_size; s++) { + int n = m_current_pos + (s * m_sample_stride); + sample_t index = std::make_pair(m_shuffled_indices[n], m_shuffled_indices2[n]); + bool valid = fetch_label(Y, index, s); + if (!valid) { + error_message = "invalid label"; } - if (!error_message.empty()) { LBANN_ERROR(error_message); } - //} + } + if (!error_message.empty()) { LBANN_ERROR(error_message); } + return mb_size; } @@ -300,11 +293,4 @@ void data_reader_mnist_siamese::shuffle_indices() { } -void data_reader_mnist_siamese::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = nullptr; -} - } // namespace lbann diff --git a/src/data_readers/data_reader_multi_images.cpp b/src/data_readers/data_reader_multi_images.cpp index ed141a895c1..707f6702b69 100644 --- a/src/data_readers/data_reader_multi_images.cpp +++ b/src/data_readers/data_reader_multi_images.cpp @@ -29,7 +29,6 @@ #include "lbann/data_readers/data_reader_multi_images.hpp" #include "lbann/data_readers/image_utils.hpp" -#include "lbann/data_store/data_store_multi_images.hpp" #include "lbann/utils/file_utils.hpp" #include #include @@ -107,13 +106,7 @@ bool data_reader_multi_images::fetch_datum(CPUMat& X, int data_id, int mb_idx) { int width=0, height=0, img_type=0; const std::string imagepath = get_file_dir() + img_src[i]; bool ret = true; - if (m_data_store != nullptr) { - std::vector *image_buf; - m_data_store->get_data_buf(data_id, image_buf, i); - ret = lbann::image_utils::load_image(*image_buf, width, height, img_type, *(m_pps[tid]), X_v[i]); - } else { - ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v[i], m_thread_buffer[tid], &m_thread_cv_buffer[tid]); - } + ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v[i], m_thread_buffer[tid], &m_thread_cv_buffer[tid]); if(!ret) { throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " @@ -214,14 +207,4 @@ void data_reader_multi_images::load() { select_subset_of_data(); } -void data_reader_multi_images::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_multi_images(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_pilot2_molecular.cpp b/src/data_readers/data_reader_pilot2_molecular.cpp index 750bf393480..5e60c1fc027 100644 --- a/src/data_readers/data_reader_pilot2_molecular.cpp +++ b/src/data_readers/data_reader_pilot2_molecular.cpp @@ -27,7 +27,6 @@ //////////////////////////////////////////////////////////////////////////////// #include "lbann/data_readers/data_reader_pilot2_molecular.hpp" -#include "lbann/data_store/data_store_pilot2_molecular.hpp" #include "lbann/utils/options.hpp" namespace lbann { @@ -40,16 +39,9 @@ void pilot2_molecular_reader::load() { // support for data store functionality: when not using data store, all procs // load the data; when using data store, only one does so bool is_mine = true; - int rank = m_comm->get_rank_in_trainer(); // note: when support for merge_samples is in place, the condition // "get_role() == "test" will go away. For now we need it, else // merge_samples will break - options *opts = options::get(); - if (opts->has_bool("use_data_store") && opts->get_bool("use_data_store") && get_role() == "test") { - if (rank != get_compound_rank()) { - is_mine = false; - } - } if (is_mine) { std::string infile = get_file_dir() + get_data_filename(); @@ -119,34 +111,6 @@ void pilot2_molecular_reader::load() { m_shape[2] = m_features.shape[3]; } - // when using data store, need to bcast some variable to all procs - if (options::get()->get_bool("use_data_store")) { - std::vector tmp(8); - if (rank == get_compound_rank()) { - //@todo: fix if we have floats! - m_neighbors_data_size = m_neighbors.data_holder->size() / 8; - - tmp[0] = m_num_samples; - tmp[1] = m_num_samples_per_frame; - tmp[2] = m_num_features; - tmp[3] = m_num_neighbors + 1; - tmp[4] = m_features.shape[2]; - tmp[5] = m_features.shape[3]; - tmp[6] = m_word_size; - tmp[7] = m_neighbors_data_size; - } - MPI_Bcast(tmp.data(), 8, MPI_INT, get_compound_rank(), m_comm->get_trainer_comm().GetMPIComm()); - m_num_samples = tmp[0]; - m_num_samples_per_frame = tmp[1]; - m_num_features = tmp[2]; - m_shape.resize(3); - m_shape[0] = tmp[3]; - m_shape[1] = tmp[4]; - m_shape[2] = tmp[5]; - m_word_size = tmp[6]; - m_neighbors_data_size = tmp[7]; - } - // Reset indices. m_shuffled_indices.clear(); m_shuffled_indices.resize(m_num_samples); @@ -156,20 +120,6 @@ void pilot2_molecular_reader::load() { bool pilot2_molecular_reader::fetch_datum( CPUMat& X, int data_id, int mb_idx) { - int tid = m_io_thread_pool->get_local_thread_id(); - - if (m_data_store != nullptr) { - std::vector *buf; - size_t jj = 0; - m_data_store->get_data_buf(data_id, tid, buf); - for (int idx = 0; idx < m_num_neighbors+1; idx++) { - for (int i = 0; i < m_num_features; ++i) { - X(m_num_features * idx + i, mb_idx) = (*buf)[jj++]; - //note: scale_data was already computed by the data_store - } - } - return true; - } const int frame = get_frame(data_id); // Fetch the actual molecule. @@ -227,14 +177,4 @@ void pilot2_molecular_reader::fetch_molecule(CPUMat& X, int data_id, int idx, } } -void pilot2_molecular_reader::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_pilot2_molecular(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_readers/data_reader_triplet.cpp b/src/data_readers/data_reader_triplet.cpp index 8e70a618e7a..dab1e60b860 100644 --- a/src/data_readers/data_reader_triplet.cpp +++ b/src/data_readers/data_reader_triplet.cpp @@ -29,7 +29,6 @@ #include "lbann/data_readers/data_reader_triplet.hpp" #include "lbann/data_readers/image_utils.hpp" -#include "lbann/data_store/data_store_triplet.hpp" #include "lbann/utils/file_utils.hpp" #include #include @@ -89,14 +88,7 @@ bool data_reader_triplet::fetch_datum(Mat& X, int data_id, int mb_idx) { int width=0, height=0, img_type=0; const std::string imagepath = get_file_dir() + sample.first[i]; bool ret = true; - if (m_data_store != nullptr) { - std::vector *image_buf; - m_data_store->get_data_buf(data_id, image_buf, i); - // This could probably have used image_utils::import_image() - ret = lbann::image_utils::load_image(*image_buf, width, height, img_type, *(m_pps[tid]), X_v[i]); - } else { - ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v[i], m_thread_buffer[tid], &m_thread_cv_buffer[tid]); - } + ret = lbann::image_utils::load_image(imagepath, width, height, img_type, *(m_pps[tid]), X_v[i], m_thread_buffer[tid], &m_thread_cv_buffer[tid]); if(!ret) { throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " " @@ -170,14 +162,4 @@ void data_reader_triplet::load() { select_subset_of_data(); } -void data_reader_triplet::setup_data_store(model *m) { - if (m_data_store != nullptr) { - delete m_data_store; - } - m_data_store = new data_store_triplet(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -} - } // namespace lbann diff --git a/src/data_store/CMakeLists.txt b/src/data_store/CMakeLists.txt index 1e3ac36a98f..920a08401f2 100644 --- a/src/data_store/CMakeLists.txt +++ b/src/data_store/CMakeLists.txt @@ -1,15 +1,6 @@ # Add the source files for this directory set_full_path(THIS_DIR_SOURCES generic_data_store.cpp - data_store_csv.cpp - data_store_image.cpp - data_store_multi_images.cpp - data_store_imagenet.cpp - data_store_imagenet_patches.cpp - data_store_merge_samples.cpp - data_store_merge_features.cpp - data_store_pilot2_molecular.cpp - data_store_triplet.cpp data_store_jag.cpp jag_io.cpp jag_store.cpp @@ -21,4 +12,3 @@ set_target_properties(jag_converter-bin PROPERTIES OUTPUT_NAME jag_converter) set(SOURCES "${SOURCES}" "${THIS_DIR_SOURCES}" PARENT_SCOPE) - diff --git a/src/data_store/data_store_csv.cpp b/src/data_store/data_store_csv.cpp deleted file mode 100644 index cbadaa26e49..00000000000 --- a/src/data_store/data_store_csv.cpp +++ /dev/null @@ -1,225 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_csv.hpp" -#include "lbann/data_readers/data_reader_csv.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" -#include - -namespace lbann { - -data_store_csv::data_store_csv( - generic_data_reader *reader, model *m) : - generic_data_store(reader, m) { - set_name("data_store_csv"); -} - -data_store_csv::~data_store_csv() { -} - -void data_store_csv::setup() { - double tm1 = get_time(); - std::stringstream err; - - if (m_master) { - std::cerr << "starting data_store_csv::setup() for role: " - << m_reader->get_role() << "\n" - << "calling generic_data_store::setup()\n"; - } - generic_data_store::setup(); - build_index_owner(); - - if (! m_in_memory) { - err << __FILE__ << " " << __LINE__ << " :: " - << "not yet implemented"; - throw lbann_exception(err.str()); - } - - else { - //sanity check - csv_reader *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - m_csv_reader = reader; - - if (m_np != reader->get_num_parallel_readers() && ! is_subsidiary_store()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "num_parallel_readers(): " << reader->get_num_parallel_readers() - << " m_np: " << m_np - << "; for this data_store num_readers must be the same as procs per model;\n" - << " if this isn't acceptable, please notify Dave Hysom so he can fix.\n" - << "reader role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - - if (m_master) { - std::vector v = reader->fetch_line_label_response(0); - m_vector_size = v.size(); - } - m_comm->world_broadcast(0, m_vector_size); - - if (is_subsidiary_store()) { - return; - } - - if (m_master) std::cerr << "calling get_minibatch_index_vector\n"; - get_minibatch_index_vector(); - - if (m_master) std::cerr << "calling exchange_mb_indices()\n"; - exchange_mb_indices(); - - if (m_master) std::cerr << "calling get_my_datastore_indices\n"; - get_my_datastore_indices(); - - if (m_master) std::cerr << "calling populate_datastore()\n"; - populate_datastore(); - - if (m_master) std::cerr << "calling exchange_data()\n"; - exchange_data(); - } - - if (m_master) { - std::cerr << "TIME for data_store_csv setup: " << get_time() - tm1 << "\n"; - } -} - -void data_store_csv::get_data_buf_DataType(int data_id, std::vector *&buf) { -static int n = 0; - if (m_my_minibatch_data.find(data_id) == m_my_minibatch_data.end()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to find data_id: " << data_id << " in m_my_minibatch_data\n" - << "m_my_minibatch_data.size(): " << m_my_minibatch_data.size() << "\n" - << "role: " << m_reader->get_role() << " n: " << n; - throw lbann_exception(err.str()); - } - n += 1; - buf = &m_my_minibatch_data[data_id]; -} - -void data_store_csv::get_my_indices(std::unordered_set &indices, int p) { - indices.clear(); - std::vector &v = m_all_minibatch_indices[p]; - for (auto t : v) { - int index = (*m_shuffled_indices)[t]; - if (m_data.find(index) != m_data.end()) { - indices.insert(index); - } - } -} - -void data_store_csv::get_indices(std::unordered_set &indices, int p) { - indices.clear(); - std::vector &v = m_all_minibatch_indices[p]; - for (auto t : v) { - indices.insert((*m_shuffled_indices)[t]); - } -} - - -void data_store_csv::exchange_data() { - double tm1 = get_time(); - std::stringstream err; - - //get indices I need for the next epoch, and start receives - std::unordered_set indices; - get_indices(indices, m_rank); - std::vector> recv_req(indices.size()); - - m_my_minibatch_data.clear(); - size_t jj = 0; - for (auto data_id : indices) { - m_my_minibatch_data[data_id].resize(m_vector_size); - int owner = get_index_owner(data_id); - if (owner >= m_np or owner < 0) { - err << __FILE__ << " " << __LINE__ << " :: " - << " ERROR: bad rank for owner in nb_recv; owner: " << owner << " data_id: " << data_id << " jj: " << jj+1 << " of " << indices.size(); - throw lbann_exception(err.str()); - } - m_comm->nb_tagged_recv(m_my_minibatch_data[data_id].data(), m_vector_size, owner, data_id, recv_req[jj++], m_comm->get_trainer_comm()); - } - - //start sends to all processors - std::vector>> send_req(m_np); - for (int p=0; pget_role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - m_comm->nb_tagged_send(m_data[data_id].data(), m_vector_size, p, data_id, send_req[p][jj++], m_comm->get_trainer_comm()); - } - } - - //wait for sends to finish - if (m_master) { - for (size_t i=0; iwait_all(send_req[i]); - } - } - - //wait for recvs to finish - m_comm->wait_all(recv_req); - - if (m_master) { - std::cerr << "TIME for data_store_csv::exchange_data(): " - << get_time() - tm1 << "; role: " << m_reader->get_role() << "\n"; - } -} - -void data_store_csv::populate_datastore() { - for (auto idx : m_my_datastore_indices) { - m_data[idx] = m_csv_reader->fetch_line_label_response(idx); - if (m_data[idx].size() != (size_t) m_vector_size) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "m_data[" << idx << "].size() is " << m_data[idx].size() - << " but should be: " << m_vector_size - << "; m_data.size: " << m_data.size() << "\n"; - throw lbann_exception(err.str()); - } - } -} - -} // namespace lbann diff --git a/src/data_store/data_store_image.cpp b/src/data_store/data_store_image.cpp deleted file mode 100644 index d411584e7e9..00000000000 --- a/src/data_store/data_store_image.cpp +++ /dev/null @@ -1,922 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_image.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/data_readers/data_reader.hpp" -#include "lbann/utils/timer.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/io/file_io.hpp" - -#ifdef LBANN_SYS_SENDFILE_OK -#include -#endif // LBANN_SYS_SENDFILE_OK - -#include - -namespace lbann { - -data_store_image::~data_store_image() { -} - -void data_store_image::setup() { - set_name("data_store_image"); - if (m_master) std::cerr << "starting data_store_image::setup()\n"; - - options *opts = options::get(); - bool using_tarball = opts->has_string("use_tarball") ? true : false; - bool creating_tarball = are_we_creating_tarballs(); - if (using_tarball && creating_tarball) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + - " :: you cannot use both --using_tarball and --creating_tarball options"); - } - - if (using_tarball) { - stage_tarball(); - m_comm->global_barrier(); - } - - if (m_master) std::cerr << "data_store_image - calling generic_data_store::setup()\n"; - generic_data_store::setup(); - - double tm; - - //========================================================================== - // block for running in out-of-memory mode - //========================================================================== - if (! m_in_memory) { - if (!creating_tarball) { - if (m_master) std::cerr << "data_store_image - calling exchange_partitioned_indices\n"; - exchange_partitioned_indices(); - } - - if (m_master) std::cerr << "data_store_image - calling get_my_datastore_indices\n"; - if (creating_tarball) { - get_my_tarball_indices(); - } else { - get_my_datastore_indices(); - } - - if (m_master) std::cerr << "data_store_image - calling build_data_filepaths\n"; - if (using_tarball) { - read_data_filepaths(); - } else { - build_data_filepaths(); - } - - if (m_master) std::cerr << "data_store_image - calling get_file_sizes\n"; - tm = get_time(); - if (using_tarball) { - read_file_sizes(); - read_datastore_indices(); - } else { - get_file_sizes(); - } - if (m_master) std::cerr << "TIME for get_file_sizes: " << get_time() - tm << "\n"; - - if (m_master) std::cerr << "data_store_image - calling build_index_owner\n"; - tm = get_time(); - build_index_owner(); - if (m_master) std::cerr << "TIME for build_index_owner: " << get_time() - tm << "\n"; - - if (! using_tarball) { - if (m_master) std::cerr << "data_store_image - calling stage_files\n"; - tm = get_time(); - stage_files(); - if (m_master) std::cerr << "TIME for stage_files: " << get_time() - tm << "\n"; - } - - // create tarball and copy to lscratch (or where ever) - if (creating_tarball) { - if (m_master) std::cerr << "data_store_image - creating tarball\n"; - tm = get_time(); - create_tarball(); - if (m_master) std::cerr << "TIME for creating tarball: " << get_time() - tm << "\n"; - } - - m_is_setup = true; - } - - //========================================================================== - // block for running in in-memory mode - //========================================================================== - else { - if (m_master) std::cerr << "data_store_image - calling get_minibatch_index_vector\n"; - get_minibatch_index_vector(); - - if (m_master) std::cerr << "data_store_image - calling exchange_mb_indices\n"; - exchange_mb_indices(); - - if (m_master) std::cerr << "data_store_image - calling get_my_datastore_indices\n"; - get_my_datastore_indices(); - - if (m_master) std::cerr << "data_store_image - calling get_file_sizes\n"; - double tma = get_time(); - get_file_sizes(); - size_t num_bytes = get_global_num_file_bytes(); - if (m_master) std::cerr << "TIME for get_file_sizes: " << get_time() - tma << " global num files: " << m_file_sizes.size() << " data set size: " << ((double)num_bytes/1000000) << " MB\n"; - - if (m_master) std::cerr << "data_store_image - calling build_index_owner\n"; - tm = get_time(); - build_index_owner(); - if (m_master) std::cerr << "TIME for build_index_owner: " << get_time() - tm << "\n"; - - if (m_master) std::cerr << "data_store_image - calling report_memory_constrains\n"; - report_memory_constraints(); - - if (m_master) std::cerr << "data_store_image - calling read_files\n"; - tma = get_time(); - read_files(); - if (m_master) std::cerr << "TIME for read_files: " << get_time() - tma << "\n"; - - if (m_master) std::cerr << "data_store_image - calling exchange_data\n"; - exchange_data(); - - if (m_extended_testing) { - if (m_master) std::cerr << "data_store_image - calling extended_testing\n"; - extended_testing(); - } - } -} - - -void data_store_image::get_data_buf(int data_id, std::vector *&buf, int multi_idx) { - std::stringstream err; - int index = data_id * m_num_img_srcs + multi_idx; - if (m_my_minibatch_data.find(index) == m_my_minibatch_data.end()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to find index: " << index << " in m_my_minibatch_data; size: " - << m_my_minibatch_data.size() << " role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - - buf = &m_my_minibatch_data[index]; -} - -void data_store_image::load_file(const std::string &dir, const std::string &fn, unsigned char *p, size_t sz) { - std::string imagepath; - if (dir != "") { - imagepath = dir + fn; - } else { - imagepath = fn; - } - std::ifstream in(imagepath.c_str(), std::ios::in | std::ios::binary); - if (!in) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << imagepath << " for reading" - << "; dir: " << dir << " fn: " << fn << "\n" - << "hostname: " << getenv("SLURMD_NODENAME") << " role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - in.read((char*)p, sz); - if ((int)sz != in.gcount()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to read " << sz << " bytes from " << imagepath - << " num bytes read: " << in.gcount() - << "\nhostname: " << getenv("SLURMD_NODENAME") << " role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - in.close(); -} - -void data_store_image::exchange_data() { - double tm1 = get_time(); - std::stringstream err; - - //build map: proc -> global indices that proc needs for this epoch, and - // which I own - std::unordered_map> proc_to_indices; - for (size_t p=0; p>> send_req(m_np); - for (int p=0; pnb_tagged_send( - m_data[index].data(), len, p, index, - send_req[p][jj++], m_comm->get_trainer_comm()); - - } - } - if (jj != send_req[p].size()) throw lbann_exception("ERROR 1"); - } //start sends - - //build map: proc -> global indices that proc owns that I need - proc_to_indices.clear(); - for (auto idx : m_my_minibatch_indices_v) { - int index = (*m_shuffled_indices)[idx]; - int owner = get_index_owner(index); - proc_to_indices[owner].insert(index); - } - - //start recvs - m_my_minibatch_data.clear(); - std::vector>> recv_req(m_np); - for (auto t : proc_to_indices) { - int owner = t.first; - size_t jj = 0; - const std::unordered_set &s = t.second; - recv_req[owner].resize(s.size()*m_num_img_srcs); - for (auto idx : s) { - for (size_t k=0; knb_tagged_recv( - m_my_minibatch_data[index].data(), len, owner, - index, recv_req[owner][jj++], m_comm->get_trainer_comm()); - } - } - } - - //wait for sends to finish - for (size_t i=0; iwait_all(send_req[i]); - } - - //wait for recvs to finish - for (size_t i=0; iwait_all(recv_req[i]); - } - - if (m_master) { - std::cerr << "TIME for exchange_data: " << get_time() - tm1 - << "; role: " << m_reader->get_role() << "\n"; - } -} - - -void data_store_image::exchange_file_sizes( - std::vector &my_global_indices, - std::vector &my_num_bytes) { - - if (my_global_indices.size() == 0) { - my_global_indices.push_back(-1); - my_num_bytes.push_back(-1); - } - - std::vector rcv_counts(m_np); - int nbytes = my_global_indices.size(); - m_comm->trainer_all_gather(nbytes, rcv_counts); - int num_global_indices = std::accumulate(rcv_counts.begin(), rcv_counts.end(), 0); - - std::vector disp(m_np); //@todo: fix for model - disp[0] = 0; - for (int h=1; h all_global_indices(num_global_indices); - std::vector all_num_bytes(num_global_indices); - - m_comm->all_gather(my_global_indices, all_global_indices, rcv_counts, disp, m_comm->get_world_comm()); - - m_comm->all_gather(my_num_bytes, all_num_bytes, rcv_counts, disp, m_comm->get_world_comm()); - - for (size_t j=0; jreduce(n, m_comm->get_world_comm()); - } else { - m_comm->reduce(n, 0, m_comm->get_world_comm()); - } - return g; -} - -size_t data_store_image::get_my_num_file_bytes() { - size_t count = 0; - for (auto idx : m_my_datastore_indices) { - for (size_t i=0; i> name >> size >> units; - if (name.find("MemFree") != std::string::npos) { - found = true; - break; - } - } - in.close(); - - if (!found) { - if (m_master) { - std::cerr << - "\nWARNING: data_store_image::get_available_memory failed\n" - "failed to find 'MemFree in /proc/meminfo\n" - "therefore we cannot advise whether you have enough resources\n" - "to contain all data files in memory\n"; - } - return 0; - } - return size; -} - - -//note: this could be done on P_0 with no communication, -// but it's a cheap operation, so I'm coding it the -// easy way -void data_store_image::report_memory_constraints() { - size_t count = get_my_num_file_bytes(); - - std::vector counts(m_np); - if (m_master) { - m_comm->gather(count, counts.data(), m_comm->get_world_comm()); - } else { - m_comm->gather(count, 0, m_comm->get_world_comm()); - } - - double global = get_global_num_file_bytes()/1000000; - - if (!m_master) { - return; - } - - /// determine the amount of memory required for files for all - /// processors on this node - double required = 0; - for (int p=0; pis_rank_node_local(p, m_comm->get_world_comm())) { - required += counts[p]; - } - } - required /= 1000000; - - double available = get_available_memory(); - if (available == 0) { - std::cerr << required << " kB of memory are required for files on this node\n"; - return; - } - available /= 1000; - - double percent = required / available * 100.0; - std::cerr << "\n" - << "===============================================\n" - << "Memory Constraints for: " << m_reader->get_role() << "\n" - << "Global data set size: " << global << " MB\n" - << "Required for data set on this node: " << required << " MB\n" - << "Available memory on this node: " << available << " MB\n" - << "Required is " << percent << " % of Available\n" - << "===============================================\n\n"; - - double limit = 0.8; - if (options::get()->has_float("mem_limit")) { - limit = options::get()->get_float("mem_limit"); - } - if (required > limit*available) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "You have insufficient memory to hold all required files;\n" - << "required is > 80% of available\n" - << "quitting now, so you don't waste your time\n"; - } -} - - -// the input string "s" should be one of the forms: -// dir1/[dir2/...]/filename -// /dir1/[dir2/...]/filename -// /dir1/[dir2/...]/ - -void data_store_image::stage_files() { - std::stringstream err; - - //create directory structure on local file store - std::string local_dir = m_reader->get_local_file_dir(); - create_dirs(local_dir); - m_comm->global_barrier(); - std::unordered_set make_dirs; - for (auto t : m_data_filepaths) { - size_t j = t.second.rfind('/'); - if (j != std::string::npos) { - make_dirs.insert(t.second.substr(0, j+1)); - } - } - - std::string dir = m_reader->get_file_dir(); - std::stringstream ss; - for (auto t : make_dirs) { - ss.clear(); - ss.str(""); - ss << local_dir << "/" << t; - create_dirs(ss.str()); - } - m_comm->global_barrier(); - - // copy files to local store - size_t j = 0; - struct stat stat_buf; - double tm = get_time(); - std::stringstream s; - int write_fd; - - for (auto t : m_data_filepaths) { - s.clear(); - s.str(""); - s << local_dir << '/' << t.second; - ++j; - if (j % 100 == 0 and m_master) { - double e = get_time() - tm; - double time_per_file = e / j; - int remaining_files = m_data_filepaths.size()-j; - double estimated_remaining_time = time_per_file * remaining_files; - std::cerr << "P_0: staged " << j << " of " << m_data_filepaths.size() - << " files; elapsed time: " << get_time() - tm - << "s est. remaining time: " << estimated_remaining_time << "s\n"; - } - if (access(s.str().c_str(), F_OK | R_OK) == -1 ) { - write_fd = open(s.str().c_str(), O_RDWR | O_CREAT, S_IRWXU); - if (write_fd == -1) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << s.str() << " for writing;\n" - << "error code is: " << std::strerror(errno) << "\n" - << "local_dir: " << local_dir << " m_cur_minibatch: " << 1+m_cur_minibatch; - throw lbann_exception(err.str()); - } - off_t offset = 0; - s.clear(); - s.str(""); - s << dir << '/' << t.second; - int read_fd = open(s.str().c_str(), O_RDONLY); - if (read_fd == -1) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << s.str() << " for reading;\n" - << "error code is: " << std::strerror(errno); - throw lbann_exception(err.str()); - } - int e2 = fstat(read_fd, &stat_buf); - if (e2 == -1) { - err << __FILE__ << " " << __LINE__ << " :: " - << "fstat failed for file: " << s.str(); - throw lbann_exception(err.str()); - } -#ifdef LBANN_SYS_SENDFILE_OK - ssize_t e = sendfile(write_fd, read_fd, &offset, stat_buf.st_size); - if (e == -1) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to copy file to location: " << s.str() - << ";\nerror code is: " << std::strerror(errno); - throw lbann_exception(err.str()); - } -#else - // FIXME: This is the fastest way to deal with this issue for - // OSX pending a "real" fix. - (void) offset;// Silence a warning - err << __FILE__ << " " << __LINE__ << " :: " - << "Header is not found on this system. " - << "sendfile() won't work. This is not a permanent fix."; - throw lbann_exception(err.str()); -#endif // LBANN_SYS_SENDFILE_OK - - close(read_fd); - close(write_fd); - } - } -} - -void data_store_image::fetch_data() { - if (!m_is_setup) { - return; - } - std::stringstream err; - double tm1 = get_time(); - - ++m_cur_minibatch; - if (m_cur_minibatch >= m_all_partitioned_indices[0].size()) { - m_cur_minibatch = 0; - } - - //build map: proc -> global indices that proc needs for this epoch, and - // which I own - std::unordered_map> proc_to_indices; - - for (int p = 0; p m_all_partitioned_indices[p].size() -1) { - err << __FILE__ << " " << __LINE__ << " :: " - << "send to: P_" << p << " m_cur_minibatch: " << m_cur_minibatch - << " m_all_partitioned_indices[p].size(): " << m_all_partitioned_indices[p].size(); - throw lbann_exception(err.str()); - } - const std::vector &v = m_all_partitioned_indices[p][m_cur_minibatch]; - for (auto idx : v) { - int index = (*m_shuffled_indices)[idx]; - if (m_my_datastore_indices.find(index) != m_my_datastore_indices.end()) { - proc_to_indices[p].insert(index); - } - } - } - - //read required files and start sends - m_data.clear(); - - //compute number of sends, and allocate Request vector - size_t num_sends = 0; - for (auto t : proc_to_indices) { - num_sends += t.second.size(); - } - num_sends *= m_num_img_srcs; - std::vector> send_req(num_sends); - - size_t req_idx = 0; - for (int p=0; p= m_cur_minibatch - && proc_to_indices.find(p) != proc_to_indices.end()) { - const std::unordered_set &s = proc_to_indices[p]; - read_files(s); - for (auto idx : s) { - for (size_t k=0; knb_tagged_send( - m_data[index].data(), len, p, index, - send_req[req_idx++], m_comm->get_trainer_comm()); - } - } - } - } - - - //build map: proc -> global indices that proc owns that I need - proc_to_indices.clear(); - if (m_cur_minibatch < m_my_minibatch_indices->size()) { - for (auto idx : (*m_my_minibatch_indices)[m_cur_minibatch]) { - int index = (*m_shuffled_indices)[idx]; - int owner = get_index_owner(index); - proc_to_indices[owner].insert(index); - } - } - - //compute number recvs, and allocate Request vector - size_t num_recvs = 0; - for (auto t : proc_to_indices) { - num_recvs += t.second.size(); - } - num_recvs *= m_num_img_srcs; - - - //start recvs - m_my_minibatch_data.clear(); - req_idx = 0; - std::vector> recv_req(num_recvs); - for (auto t : proc_to_indices) { - int owner = t.first; - const std::unordered_set &s = t.second; - for (auto idx : s) { - //note: for imagenet_reader, m_num_img_srcs = 1; - // for other readers (multi, triplet) it is larger, probably three - for (size_t k=0; knb_tagged_recv( - m_my_minibatch_data[index].data(), len, owner, - index, recv_req[req_idx++], m_comm->get_trainer_comm()); - } - } - } - - //wait for sends to finish - m_comm->wait_all(send_req); - - //wait for recvs to finish - m_comm->wait_all(recv_req); - - if (m_master && m_verbose) { - std::cerr << "TIME (P_0) for reading from local disk: " - << get_time() - tm1 << "; role: " << m_reader->get_role() - << " minibatch " << 1+m_cur_minibatch << " of " - << m_num_minibatches << "; " << m_reader->get_role() << "\n"; - } -} - -void data_store_image::write_data_filepaths() { - std::string local_dir = m_reader->get_local_file_dir(); - std::stringstream s; - s << local_dir << "/data_filepaths.txt"; - std::ofstream out(s.str().c_str()); - if (! out.good()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << s.str() << " for reading"; - throw lbann_exception(err.str()); - } - for (auto t : m_data_filepaths) { - out << t.first << " " << t.second << "\n"; - } - out.close(); -} - -void data_store_image::read_data_filepaths() { - std::string local_dir = m_reader->get_local_file_dir(); - std::stringstream s; - s << local_dir << "/data_filepaths.txt"; - std::ifstream in(s.str().c_str()); - //note: file_sizes.txt may not exist on all processors; - // this is the case where we're using tarballed data, and - // running with more processors than were used to create - // the tarball - if (in.good()) { - std::string path; - int idx; - while (in >> idx >> path) { - m_data_filepaths[idx] = path; - } - in.close(); - } -} - -void data_store_image::write_file_sizes() { - std::string local_dir = m_reader->get_local_file_dir(); - std::stringstream s; - s << local_dir << "/file_sizes.txt"; - std::ofstream out(s.str().c_str()); - if (! out.good()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << s.str() << " for reading"; - throw lbann_exception(err.str()); - } - for (auto t : m_file_sizes) { - out << t.first << " " << t.second << "\n"; - } - out.close(); -} - -std::pair data_store_image::get_tarball_exe() { - std::pair names = get_pathname_and_prefix( - options::get()->get_string("create_tarball")); - if (m_master) { - std::cerr << "tarball dir: " << names.second << "\n" - << "tarball prefix: " << names.first << "\n\n"; - } - - //This is somewhat fragile. For now I assume the local_dir is - //of the form: /l/ssd/train - std::string local_dir = m_reader->get_local_file_dir(); - size_t j = local_dir.rfind('/', local_dir.size()-2); - std::string work_dir = local_dir.substr(0, j); - - std::stringstream tarball_name; - tarball_name << names.first << "_" << m_reader->get_role() - << "_rank=" << m_rank << "_np=" << m_np << ".tar"; - std::stringstream exe_1; - std::stringstream exe_2; - exe_1 << "tar cf " << work_dir << '/' << tarball_name.str() - << " " << local_dir; - exe_2 << "cp -f " << work_dir << '/' << tarball_name.str() - << " " << names.second; - return std::make_pair(exe_1.str(), exe_2.str()); -} - -void data_store_image::create_tarball() { - write_file_sizes(); - write_datastore_indices(); - write_data_filepaths(); - std::pair cmds = get_tarball_exe(); - if (m_master) std::cerr << "\nabout to execute: " << cmds.first << "\n"; - run_cmd(cmds.first); - if (m_master) std::cerr << "\nabout to execute: " << cmds.second << "\n"; - run_cmd(cmds.second); -} - -bool data_store_image::are_we_creating_tarballs() { - bool retval = false; - options *opts = options::get(); - if (opts->has_string("create_tarball")) { - if (m_comm->get_procs_per_node() != 1) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "--create_tarball= was specified; you have " - << m_comm->get_procs_per_node() << "; you must use a" - << "single core per node when creating tarballs"; - throw lbann_exception(err.str()); - } - retval = true; - if (m_reader->get_role() == "validate") { - return retval; - } - } - return retval; -} - -void data_store_image::read_file_sizes() { - std::stringstream s; - s << m_reader->get_local_file_dir() << "/file_sizes.txt"; - //note: file_sizes.txt may not exist on all processors; - // this is the case where we're using tarballed data, and - // running with more processors than were used to create - // the tarball - std::ifstream in(s.str().c_str()); - if (in.good()) { - size_t idx; - size_t len; - while (in >> idx >> len) { - m_file_sizes[idx] = len; - } - } - m_comm->global_barrier(); - int n = m_file_sizes.size(); - m_comm->broadcast(0, n, m_comm->get_world_comm()); - std::vector s2(n*2); - if (m_rank == 0) { - size_t j = 0; - for (auto t : m_file_sizes) { - s2[j++] = t.first; - s2[j++] = t.second; - } - } - - m_comm->broadcast(0, s2.data(), s2.size(), m_comm->get_trainer_comm()); - for (size_t j=0; jget_local_file_dir(); - std::stringstream s; - s << local_dir << "/datastore_indices.txt"; - std::ofstream out(s.str().c_str()); - if (! out.good()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << s.str() << " for reading"; - throw lbann_exception(err.str()); - } - for (auto t: m_my_datastore_indices) { - out << t << "\n"; - } -} - -void data_store_image::read_datastore_indices() { - m_my_datastore_indices.clear(); - if (m_comm->get_rank_in_node() == 0) { - std::stringstream s; - s << m_reader->get_local_file_dir() << "/datastore_indices.txt"; - std::ifstream in(s.str().c_str()); - - //note: datastore_indices.txt may not exist on all processors; - // this is the case where we're using tarballed data, and - // running with more processors than were used to create - // the tarball - if (in.good()) { - int idx; - while (in >> idx) { - m_my_datastore_indices.insert(idx); - } - in.close(); - } - } -} - -void data_store_image::stage_tarball() { - if (m_reader->get_role() == "validate") { - return; - } - std::stringstream err; - int procs_per_node = m_comm->get_procs_per_node(); - int max = m_comm->allreduce(procs_per_node, m_comm->get_world_comm(),El::mpi::MAX); - int min = m_comm->allreduce(procs_per_node, m_comm->get_world_comm(),El::mpi::MIN); - if (max != min) { - err << __FILE__ << " " << __LINE__ << " :: " - << "all nodes must contain the same number of active processors"; - throw lbann_exception(err.str()); - } - -/* - for (int j=0; jglobal_barrier(); - if (m_rank == j) { - std::cerr << "rank: " << m_rank << " rank in node: " << m_comm->get_rank_in_node() << " procs_per_node: " << procs_per_node << " - } - } -*/ - - options *opts = options::get(); - if (!opts->has_int("num_tarballs")) { - err << __FILE__ << " " << __LINE__ << " :: " - << "you must specify --num_tarballs="; - throw lbann_exception(err.str()); - } - int num_tarballs = opts->get_int("num_tarballs"); - - int fake_rank = m_rank / procs_per_node; - if (m_comm->get_rank_in_node() == 0 && fake_rank < num_tarballs) { - std::string raw_name = options::get()->get_string("use_tarball"); - std::pair names = get_pathname_and_prefix(raw_name); - - if (m_master) std::cerr << "num tarballs: " << num_tarballs << "\n"; - std::stringstream tarball_filename(names.first); - tarball_filename << names.first << "_" << m_reader->get_role() - << "_rank=" << fake_rank << "_np=" << num_tarballs << ".tar"; - - //This is somewhat fragile. For now I assume the local_dir is - //of the form: /l/ssd/train - std::string local_dir = m_reader->get_local_file_dir(); - size_t j = local_dir.rfind('/', local_dir.size()-2); - std::string ssd = local_dir.substr(0, j); - - std::stringstream s; - s << "cp -f " << names.second << "/" << tarball_filename.str() << " " << ssd; - if (m_master) std::cerr << "\nabout to execute: " << s.str()<< "\n"; - run_cmd(s.str()); - - s.clear(); - s.str(""); - s << "cd " << ssd << "; tar xf " << tarball_filename.str(); - if (m_master) std::cerr << "\nabout to execute: " << s.str()<< "\n"; - run_cmd(s.str()); - - s.clear(); - s.str(""); - s << "mv " << ssd << "/" << local_dir << " " << ssd; - if (m_master) std::cerr << "\nabout to execute: " << s.str()<< "\n"; - run_cmd(s.str()); - } - m_comm->global_barrier(); -} - -} // namespace lbann diff --git a/src/data_store/data_store_imagenet.cpp b/src/data_store/data_store_imagenet.cpp deleted file mode 100644 index 0eea79eb0c7..00000000000 --- a/src/data_store/data_store_imagenet.cpp +++ /dev/null @@ -1,235 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_imagenet.hpp" -#include "lbann/data_readers/data_reader_imagenet.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - -namespace lbann { - -void data_store_imagenet::setup() { - double tm1 = get_time(); - if (m_rank == 0) { - std::cerr << "starting data_store_imagenet::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - set_name("data_store_imagenet"); - - //sanity check - image_data_reader *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - - - //optionally run some tests at the end of setup() - bool run_tests = false; - if (options::get()->has_bool("test_data_store") && options::get()->get_bool("test_data_store")) { - run_tests = true; - options::get()->set_option("exit_after_setup", true); - } - - data_store_image::setup(); - - - if (run_tests && m_in_memory) { - test_file_sizes(); - test_data(); - } - - double tm2 = get_time(); - if (m_rank == 0) { - std::cerr << "TIME for data_store_imagenet setup: " << tm2 - tm1 << std::endl; - } -} - - -void data_store_imagenet::test_data() { - image_data_reader *reader = dynamic_cast(m_reader); - const std::vector > & image_list = reader->get_image_list(); - std::vector b; - std::vector *datastore_buf; - for (auto t : m_my_minibatch_indices_v) { - int idx = (*m_shuffled_indices)[t]; - - //read directly from file - std::string imagepath = m_dir + image_list[idx].first; - std::ifstream in(imagepath.c_str(), std::ios::in | std::ios::binary); - if (! in.good()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << imagepath << " for reading"; - throw lbann_exception(err.str()); - } - - in.seekg(0, std::ios::end); - size_t sz = in.tellg(); - in.seekg(0, std::ios::beg); - b.resize(sz); - in.read((char*)&b[0], sz); - in.close(); - - //get from datastore - get_data_buf(idx, datastore_buf, 0); - if (b != *datastore_buf) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << " :: data_store_imagenet::test_data, b != v; b.size: " - << b.size() << " datstore_buf->size: " << datastore_buf->size(); - throw lbann_exception(err.str()); - } - } - - std::cerr << "rank: " << m_rank << " role: " << m_reader->get_role() << " :: data_store_imagenet::test_data: PASSES!\n"; -} - -void data_store_imagenet::test_file_sizes() { - if (m_master) { - std::cerr << m_rank << " :: STARTING data_store_imagenet::test_file_sizes()\n"; - } - image_data_reader *reader = dynamic_cast(m_reader); - const std::vector > & image_list = reader->get_image_list(); - for (auto t : m_file_sizes) { - size_t len = get_file_size(m_dir, image_list[t.first].first); - if (t.second != len || len == 0) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "m_file_sizes[" << t.first << "] = " << t.second - << " but actual size appears to be " << len; - throw lbann_exception(err.str()); - } - } - std::cerr << "rank: " << m_rank << " role: " << m_reader->get_role() << " :: data_store_imagenet::test_file_sizes: PASSES!\n"; -} - -void data_store_imagenet::read_files(const std::unordered_set &indices) { - std::stringstream err; - std::string local_dir = m_reader->get_local_file_dir(); - std::stringstream fp; - int n = 0; - double tm = get_time(); - for (auto index : indices) { - ++n; - if (n % 100 == 0 && m_master) { - double time_per_file = (get_time() - tm) / n; - int remaining_files = indices.size() - n; - double estimated_remaining_time = time_per_file * remaining_files; - std::cerr << "P_0, " << m_reader->get_role() << "; read " << n << " of " - << indices.size() << " files; elapsed time " << (get_time() - tm) - << "s; est. remaining time: " << estimated_remaining_time << "\n"; - } - if (m_file_sizes.find(index) == m_file_sizes.end()) { - err << __FILE__ << " " << __LINE__ << " :: " - << " m_file_sizes.find(index) failed for index: " << index - << " role: " << m_reader->get_role(); - throw lbann_exception(err.str()); - } - if (m_data_filepaths.find(index) == m_data_filepaths.end()) { - err << __FILE__ << " " << __LINE__ << " :: " - << " m_data_filepaths.find(index) failed for index: " << index - << " m_data_filepaths.size: " << m_data_filepaths.size() - << "\nhostname: " << getenv("SLURMD_NODENAME"); - throw lbann_exception(err.str()); - } - size_t file_len = m_file_sizes[index]; - fp.clear(); - fp.str(""); - fp << local_dir << "/" << m_data_filepaths[index]; - m_data[index].resize(file_len); - try { - load_file("", fp.str(), m_data[index].data(), file_len); - } catch (std::bad_alloc& ba) { - err << m_rank << " caught std::bad_alloc, what: " << ba.what() - << " " << getenv("SLURMD_NODENAME") << " file: " - << fp.str() << " length: " << file_len << "\n"; - throw lbann_exception(err.str()); - } - } -} - -void data_store_imagenet::read_files() { - image_data_reader *reader = dynamic_cast(m_reader); - const std::vector > & image_list = reader->get_image_list(); - for (auto index : m_my_datastore_indices) { - if (m_file_sizes.find(index) == m_file_sizes.end()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << " m_file_sizes.find(index) failed for index: " << index; - throw lbann_exception(err.str()); - } - size_t file_len = m_file_sizes[index]; - m_data[index].resize(file_len); - load_file(m_dir, image_list[index].first, m_data[index].data(), file_len); - } -} - -void data_store_imagenet::get_file_sizes() { - image_data_reader *reader = dynamic_cast(m_reader); - const std::vector > & image_list = reader->get_image_list(); - - std::vector global_indices(m_my_datastore_indices.size()); - std::vector bytes(m_my_datastore_indices.size()); - - size_t j = 0; - double tm = get_time(); - for (auto index : m_my_datastore_indices) { - global_indices[j] = index; - bytes[j] = get_file_size(m_dir, image_list[index].first); - ++j; - if (j % 100 == 0 and m_master) { - double e = get_time() - tm; - double time_per_file = e / j; - int remaining_files = m_my_datastore_indices.size()-j; - double estimated_remaining_time = time_per_file * remaining_files; - std::cerr << "P_0: got size for " << j << " of " << m_my_datastore_indices.size() - << " files; elapsed time: " << get_time() - tm - << "s est. remaining time: " << estimated_remaining_time << "s\n"; - } - } - if (m_master) { - std::cerr << "P_0: got size for " << j << " of " << m_my_datastore_indices.size() - << " files; elapsed time: " << get_time() - tm << "\n"; - } - - exchange_file_sizes(global_indices, bytes); -} - -void data_store_imagenet::build_data_filepaths() { - m_data_filepaths.clear(); - image_data_reader *reader = dynamic_cast(m_reader); - const std::vector > & image_list = reader->get_image_list(); - for (auto index : m_my_datastore_indices) { - m_data_filepaths[index] = image_list[index].first; - } -} - -} // namespace lbann diff --git a/src/data_store/data_store_imagenet_patches.cpp b/src/data_store/data_store_imagenet_patches.cpp deleted file mode 100644 index eb33dfeb35a..00000000000 --- a/src/data_store/data_store_imagenet_patches.cpp +++ /dev/null @@ -1,61 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_imagenet_patches.hpp" -#include "lbann/data_readers/data_reader_imagenet_patches.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - -namespace lbann { - -void data_store_imagenet_patches::setup() { - double tm1 = get_time(); - if (m_rank == 0) { - std::cerr << "starting data_store_imagenet_patches::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - set_name("data_store_imagenet_patches"); - - //sanity check - imagenet_reader_patches *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - - data_store_imagenet::setup(); - - if (m_rank == 0) { - std::cerr << "TIME for data_store_imagenet setup: " << get_time() - tm1 << std::endl; - } -} - - -} // namespace lbann diff --git a/src/data_store/data_store_merge_features.cpp b/src/data_store/data_store_merge_features.cpp deleted file mode 100644 index 78f6d5c99f5..00000000000 --- a/src/data_store/data_store_merge_features.cpp +++ /dev/null @@ -1,111 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_merge_features.hpp" -#include "lbann/data_store/data_store_csv.hpp" -#include "lbann/data_readers/data_reader_merge_features.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - -namespace lbann { - -data_store_merge_features::data_store_merge_features(generic_data_reader *reader, model *m) : - generic_data_store(reader, m) { - set_name("data_store_merge_features"); -} - - -data_store_merge_features::~data_store_merge_features() { -} - -void data_store_merge_features::exchange_data() { - for (auto s : m_subsidiary_stores) { - data_store_csv *store = dynamic_cast(s); - store->set_shuffled_indices(m_shuffled_indices, false); - store->exchange_data(); - } -} - -void data_store_merge_features::setup() { - double tm1 = get_time(); - if (m_master) { - std::cerr << "starting data_store_merge_features::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - generic_data_store::setup(); - - if (! m_in_memory) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "not yet implemented"; - throw lbann_exception(err.str()); - } - - else { - //sanity check - data_reader_merge_features *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - - // get list of indices used in calls to generic_data_reader::fetch_data - if (m_master) std::cerr << "calling get_minibatch_index_vector\n"; - get_minibatch_index_vector(); - - if (m_master) std::cerr << "calling get_my_datastore_indices\n"; - get_my_datastore_indices(); - - if (m_master) std::cerr << "calling exchange_mb_indices()\n"; - exchange_mb_indices(); - - std::vector &readers = reader->get_data_readers(); - m_subsidiary_stores.reserve(readers.size()); - for (auto r : readers) { - data_store_csv *store = new data_store_csv(r, m_model); - m_subsidiary_stores.push_back(store); - r->set_data_store(store); - store->set_is_subsidiary_store(); - store->set_minibatch_indices(get_minibatch_indices()); - store->set_all_minibatch_indices(get_all_minibatch_indices()); - store->set_minibatch_indices_v(get_minibatch_indices_v()); - store->set_datastore_indices(get_datastore_indices()); - store->setup(); - store->set_shuffled_indices(m_shuffled_indices, false); - store->populate_datastore(); - store->exchange_data(); - } - } - if (m_master) { - std::cerr << "TIME for data_store_merge_features setup: " << get_time() - tm1 << "\n"; - } -} - -} // namespace lbann diff --git a/src/data_store/data_store_merge_samples.cpp b/src/data_store/data_store_merge_samples.cpp deleted file mode 100644 index 43f3d4844a6..00000000000 --- a/src/data_store/data_store_merge_samples.cpp +++ /dev/null @@ -1,117 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_merge_samples.hpp" -#include "lbann/data_store/data_store_pilot2_molecular.hpp" -#include "lbann/data_readers/data_reader_pilot2_molecular.hpp" -#include "lbann/data_readers/data_reader_merge_samples.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - -namespace lbann { - -data_store_merge_samples::data_store_merge_samples(lbann_comm *comm, generic_data_reader *reader, model *m) : - generic_data_store(reader, m) { - set_name("data_store_merge_samples"); -} - - -data_store_merge_samples::~data_store_merge_samples() { - MPI_Win_free( &m_win ); -} - -void data_store_merge_samples::setup() { - if (m_rank == 0) std::cerr << "STARTING data_store_merge_samples::setup()\n"; - //double tm1 = get_time(); - - generic_data_store::setup(); - -/* - bool run_tests = false; - if (options::get()->has_bool("test_data_store") && options::get()->get_bool("test_data_store")) { - run_tests = true; - } - */ - - if (m_rank == 0) { - std::cout << "starting data_store_merge_samples::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - if (! m_in_memory) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "not yet implemented"; - throw lbann_exception(err.str()); - } - - else { - //sanity check - data_reader_merge_samples *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - - - // get list of indices used in calls to generic_data_reader::fetch_data - get_minibatch_index_vector(); - - std::vector &readers = reader->get_data_readers(); - for (size_t j=0; jget_data_store(); - pilot2_molecular_reader *pilot2_reader = dynamic_cast(m_reader); - generic_data_store *store = pilot2_reader->get_data_store(); - data_store_pilot2_molecular *s = dynamic_cast(store); - s->clear_minibatch_indices(); - m_subsidiary_stores.push_back(s); - } - - for (auto t : m_subsidiary_stores) { - t->set_no_shuffle(); - } - - const std::vector &num_samples_psum = reader->get_num_samples_psum(); - for (auto data_id : m_my_minibatch_indices_v) { - for (size_t i = 0; i < m_subsidiary_stores.size(); ++i) { - if (data_id < num_samples_psum[i + 1]) { - data_id -= num_samples_psum[i]; - m_subsidiary_stores[i]->add_minibatch_index(data_id); - } - } - } - } -} - -void data_store_merge_samples::exchange_data() { - //for (auto t : m_subsidiary_stores) { - -} - -} // namespace lbann diff --git a/src/data_store/data_store_multi_images.cpp b/src/data_store/data_store_multi_images.cpp deleted file mode 100644 index 23108ab4b81..00000000000 --- a/src/data_store/data_store_multi_images.cpp +++ /dev/null @@ -1,218 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_multi_images.hpp" -#include "lbann/data_readers/data_reader_multi_images.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - - -namespace lbann { - -std::vector data_store_multi_images::get_sample(size_t idx) const { - const data_reader_multi_images *reader = dynamic_cast(m_reader); - data_reader_multi_images::sample_t sample = reader->get_sample(idx); - return sample.first; -} - - -void data_store_multi_images::setup() { - double tm1 = get_time(); - if (m_rank == 0) { - std::cerr << "starting data_store_multi_images::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - set_name("data_store_multi_images"); - - //sanity check - data_reader_multi_images *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed\n"; - throw lbann_exception(err.str()); - } - - m_num_img_srcs = reader->get_num_img_srcs(); - - data_store_imagenet::setup(); - - if (m_rank == 0) { - std::cerr << "TIME for data_store_multi_images setup: " << get_time() - tm1 << std::endl; - } -} - -void data_store_multi_images::get_file_sizes() { - std::vector global_indices(m_my_datastore_indices.size()*m_num_img_srcs); - std::vector bytes(m_my_datastore_indices.size()*m_num_img_srcs); - - std::unordered_map names; - size_t jj = 0; - size_t j = 0; - double tm = get_time(); - for (auto base_index : m_my_datastore_indices) { - ++j; - if (j % 100 == 0 and m_master) { - double e = get_time() - tm; - double time_per_file = e / j; - int remaining_files = (m_my_datastore_indices.size()-j)*m_num_img_srcs; - double estimated_remaining_time = time_per_file * remaining_files; - std::cerr << "P_0: got size for " << j*m_num_img_srcs << " of " << m_data_filepaths.size() - << " files; elapsed time: " << get_time() - tm - << "s est. remaining time: " << estimated_remaining_time << "s\n"; - } - const std::vector sample(get_sample(base_index)); - for (size_t k=0; k &indices) { - std::stringstream err; - std::string local_dir = m_reader->get_local_file_dir(); - std::stringstream fp; - double tm = get_time(); - int n = 0; - for (auto base_index : indices) { - ++n; - if (n % 100 == 0 && m_master) { - double time_per_file = (get_time() - tm) / n; - int remaining_files = indices.size() - n; - double estimated_remaining_time = time_per_file * remaining_files; - std::cerr << "P_0, " << m_reader->get_role() << "; read " << n - << " of " << indices.size() << " files; elapsed time " - << (get_time() - tm) - << "s; est. remaining time: " << estimated_remaining_time << "\n"; - } - const std::vector sample(get_sample(base_index)); - for (size_t k=0; k sample(get_sample(base_index)); - for (size_t k=0; k v; - for (auto idx : m_my_minibatch_indices_v) { - int base_index = (*m_shuffled_indices)[idx]; - const std::vector sample(get_sample(base_index)); - for (size_t k=0; k names; - for (auto base_index : m_my_datastore_indices) { - const std::vector sample(get_sample(base_index)); - for (size_t k=0; k -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_pilot2_molecular.hpp" -#include "lbann/data_readers/data_reader_pilot2_molecular.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" -#include -#include - -namespace lbann { - -data_store_pilot2_molecular::data_store_pilot2_molecular( - generic_data_reader *reader, model *m) : - generic_data_store(reader, m) { - set_name("data_store_pilot2_molecular"); -} - -data_store_pilot2_molecular::~data_store_pilot2_molecular() { -} - -void data_store_pilot2_molecular::setup() { - double tm1 = get_time(); - std::stringstream err; - m_owner = (int)m_reader->get_compound_rank() == (int)m_rank; - m_owner_rank = m_reader->get_compound_rank(); - - if (m_owner) std::cerr << "starting data_store_pilot2_molecular::setup() for role: " - << m_reader->get_role() << "; owning processor: " << m_owner_rank << std::endl; - if (m_owner) std::cerr << "calling generic_data_store::setup()\n"; - generic_data_store::setup(); - - if (! m_in_memory) { - err << __FILE__ << " " << __LINE__ << " :: " - << "not yet implemented"; - throw lbann_exception(err.str()); - } - - else { - //sanity check - pilot2_molecular_reader *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - m_pilot2_reader = reader; - - // get list of indices used in calls to generic_data_reader::fetch_data - // for this processor - get_minibatch_index_vector(); - - // get list of indices used in calls to generic_data_reader::fetch_data - // for all processors - if (m_master) std::cerr << "calling exchange_mb_indices\n"; - exchange_mb_indices(); - - // allocate storage for the data that will be passed to the data reader's - // fetch_datum method. - m_data_buffer.resize(omp_get_max_threads()); - int num_features = m_pilot2_reader->get_num_features(); - int num_neighbors = m_pilot2_reader->get_num_neighbors(); - for (size_t j=0; jget_word_size() == 4) { - err << __FILE__ << " " << __LINE__ << " :: " - << "not implemented for word_size = 4; please ask Dave Hysom to fix"; - throw lbann_exception(err.str()); - } - double *features_8 = m_pilot2_reader->get_features_8(); - - int num_samples_per_frame = m_pilot2_reader->get_num_samples_per_frame(); - for (size_t j=0; jget_num_features(); - fill_in_data(data_id, num_samples_per_frame, num_features, features_8); - } -} - - -// replicated code from data_reader_pilot2_molecular::fetch_molecule -void data_store_pilot2_molecular::fill_in_data( - const int data_id, - const int num_samples_per_frame, - const int num_features, - double *features) { - const int frame = m_pilot2_reader->get_frame(data_id); - const int frame_offset = frame * num_features * num_samples_per_frame; - const int intra_frame_data_id = data_id - frame * num_samples_per_frame; - double *data = features + frame_offset + intra_frame_data_id * num_features; - if (m_data.find(data_id) != m_data.end()) { - std::stringstream err; - err << __FILE__ << " :: " << __LINE__ << " :: " - << " duplicate data_id: " << data_id; - throw lbann_exception(err.str()); - } - m_data[data_id].resize(num_features); - for (int i=0; iscale_data(i, data[i]); - } -} - - - - -void data_store_pilot2_molecular::build_nabor_map() { - //bcast neighbor data - size_t sz; - if (m_owner) { - sz = m_pilot2_reader->get_neighbors_data_size(); - } - m_comm->world_broadcast(0, sz); - - double *neighbors_8; - std::vector work; - if (m_owner) { - neighbors_8 = m_pilot2_reader->get_neighbors_8(); - } else { - work.resize(sz); - neighbors_8 = work.data(); - } - m_comm->world_broadcast(0, neighbors_8, sz); - - //fill in the nabors map - for (auto data_id : (*m_shuffled_indices)) { - int frame = m_pilot2_reader->get_frame(data_id); - int max_neighborhood = m_pilot2_reader->get_max_neighborhood(); - int num_samples_per_frame = m_pilot2_reader->get_num_samples_per_frame(); - const int neighbor_frame_offset = frame * num_samples_per_frame * (2 * max_neighborhood); - const int intra_frame_data_id = data_id - frame * num_samples_per_frame; - int num_neighbors = m_pilot2_reader->get_num_neighbors(); - m_neighbors[data_id].reserve(num_neighbors); - double *neighbor_data = neighbors_8 + neighbor_frame_offset + intra_frame_data_id * (2 * max_neighborhood); - for (int i=1; i *&buf) { - std::stringstream err; - std::vector &v = m_data_buffer[tid]; - std::fill(v.begin(), v.end(), 0.0); - if (m_neighbors.find(data_id) == m_neighbors.end()) { - err << __FILE__ << " " << __LINE__ << " :: " - << data_id << " not found in m_neighbors (primary molecule)"; - throw lbann_exception(err.str()); - } - if (m_my_molecules.find(data_id) == m_my_molecules.end()) { - err << __FILE__ << " " << __LINE__ << " :: " - << data_id << " not found in m_my_molecules"; - throw lbann_exception(err.str()); - } - - //fill in data for the primary molecule - size_t jj = 0; - std::vector &d1 = m_my_molecules[data_id]; - for (size_t j=0; j &nabors = m_neighbors[data_id]; - int num_features = m_pilot2_reader->get_num_features(); - for (size_t h=0; h &d2 = m_my_molecules[nabors[h]]; - for (size_t i=0; i &required_molecules, int p) { - required_molecules.clear(); - std::vector &v = m_all_minibatch_indices[p]; - for (auto t : v) { - int data_id = (*m_shuffled_indices)[t]; - required_molecules.insert(data_id); - if (m_neighbors.find(data_id) == m_neighbors.end()) { - std::stringstream err; - err << __FILE__ << " :: " << __LINE__ << " :: " - << " m_neighbors.find(" << data_id << " failed"; - throw lbann_exception(err.str()); - } - for (auto t2 : m_neighbors[data_id]) { - if (t2 != -1) { - required_molecules.insert(t2); - } - } - } -} - -void data_store_pilot2_molecular::exchange_data() { - double tm1 = get_time(); - std::stringstream err; - - //get set of molecules required for the next epoch for myself - std::unordered_set required_molecules; - get_required_molecules(required_molecules, m_rank); - - //start receives for my required molecules - m_my_molecules.clear(); - int num_features = m_pilot2_reader->get_num_features(); - - std::vector> recv_req(required_molecules.size()); - size_t jj = 0; - for (auto data_id : required_molecules) { - m_my_molecules[data_id].resize(num_features); - m_comm->nb_tagged_recv( - m_my_molecules[data_id].data(), num_features, m_owner_rank, - data_id, recv_req[jj++], m_comm->get_world_comm()); - } - - //owner starts sends - std::vector>> send_req; - if (m_owner) { - send_req.resize(m_np); - for (int p = 0; pnb_tagged_send( - m_data[data_id].data(), num_features, p, - data_id, send_req[p][jj++], m_comm->get_world_comm()); - } - } - } - - //wait for sends to finish - if (m_owner) { - for (size_t i=0; iwait_all(send_req[i]); - } - } - - //wait for recvs to finish - m_comm->wait_all(recv_req); - - if (m_owner) { - std::cout << "TIME for data_store_pilot2_molecular::exchange_data(): " - << get_time() - tm1 << "; role: " << m_reader->get_role() << "\n"; - } -} - -} // namespace lbann diff --git a/src/data_store/data_store_triplet.cpp b/src/data_store/data_store_triplet.cpp deleted file mode 100644 index 41d1a7bfb53..00000000000 --- a/src/data_store/data_store_triplet.cpp +++ /dev/null @@ -1,66 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_store/data_store_triplet.hpp" -#include "lbann/data_readers/data_reader_triplet.hpp" -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "lbann/utils/timer.hpp" - -namespace lbann { - -std::vector data_store_triplet::get_sample(size_t idx) const { - const data_reader_triplet *reader = dynamic_cast(m_reader); - data_reader_triplet::sample_t sample = reader->get_sample(idx); - return sample.first; -} - -void data_store_triplet::setup() { - double tm1 = get_time(); - if (m_rank == 0) { - std::cerr << "starting data_store_triplet::setup() for data reader with role: " << m_reader->get_role() << std::endl; - } - - set_name("data_store_triplet"); - - //sanity check - data_reader_triplet *reader = dynamic_cast(m_reader); - if (reader == nullptr) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "dynamic_cast(m_reader) failed"; - throw lbann_exception(err.str()); - } - - data_store_multi_images::setup(); - - if (m_rank == 0) { - std::cerr << "TIME for data_store_triplet setup: " << get_time() - tm1 << std::endl; - } -} - -} // namespace lbann diff --git a/src/data_store/generic_data_store.cpp b/src/data_store/generic_data_store.cpp index 6b815003aac..6bed2560593 100644 --- a/src/data_store/generic_data_store.cpp +++ b/src/data_store/generic_data_store.cpp @@ -144,6 +144,7 @@ void generic_data_store::setup() { return; } + #if 0 // get the set of global indices used by this processor in // generic_data_reader::fetch_data(). Note that these are // "original' indices, not shuffled indices, i.e, these indices @@ -168,7 +169,7 @@ void generic_data_store::setup() { if (m_master) { std::cerr << "my num minibatch indices: " << m_my_minibatch_indices->size() << "\n"; } - +#endif } void generic_data_store::print_partitioned_indices() { @@ -317,12 +318,6 @@ void generic_data_store::exchange_partitioned_indices() { } } -void generic_data_store::init_minibatch() { - if (! m_in_memory) { - fetch_data(); - } -} - std::pair generic_data_store::get_pathname_and_prefix(std::string s) { int num_slash = std::count(s.begin(), s.end(), '/'); if (num_slash < 1 || s.back() == '/') { From cbb9270f5ca82fa7284c929ed75b5e733af2efac Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Tue, 19 Feb 2019 10:52:42 -0800 Subject: [PATCH 070/443] test --- src/data_store/jag_io.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/data_store/jag_io.cpp b/src/data_store/jag_io.cpp index 54b2cd24868..42419e86d64 100644 --- a/src/data_store/jag_io.cpp +++ b/src/data_store/jag_io.cpp @@ -14,6 +14,7 @@ #include #include + namespace lbann { jag_io::~jag_io() { From 276644481615b1261da2ea566ff8c4be45de1b7c Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Tue, 19 Feb 2019 13:46:46 -0800 Subject: [PATCH 071/443] removed code associated with the data_reader_jag_conduit_hdf5 class --- src/proto/factories/layer_factory.cpp | 49 ++------------------------- src/proto/init_image_data_readers.cpp | 13 +------ src/proto/lbann.proto | 6 ---- 3 files changed, 3 insertions(+), 65 deletions(-) diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 9a6d1bbd0dc..4c8f698d02f 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -78,35 +78,7 @@ std::unique_ptr construct_layer( int num_neurons = 0; std::string num_neurons_method_name; - if (params.get_input_dimension_from_reader() - || params.get_image_dimension_from_reader() - || params.get_scalar_dimension_from_reader() - || params.get_image_and_scalar_dimension_from_reader()) { - num_neurons_method_name = "get_*_dimension_from_reader"; - #if defined(LBANN_HAS_CONDUIT) - const auto dr_generic = lbann::peek_map(data_readers, execution_mode::training); - const auto dr = dynamic_cast(dr_generic); - if (dr != nullptr) { - size_t input_dim = dr->get_linearized_input_size(); - size_t scalar_dim = dr->get_linearized_scalar_size(); - size_t image_dim = dr->get_linearized_channel_size() * dr->get_num_channels(); - size_t num_images = dr->get_num_img_srcs(); - - if (params.get_input_dimension_from_reader()) { - num_neurons += input_dim; - } - if (params.get_image_dimension_from_reader()) { - num_neurons += (num_images * image_dim); - } - if (params.get_scalar_dimension_from_reader()) { - num_neurons += scalar_dim; - } - if (params.get_image_and_scalar_dimension_from_reader()) { - num_neurons += (num_images * image_dim + scalar_dim); - } - } - #endif // defined(LBANN_HAS_CONDUIT) - } else if (params.get_num_neurons_of_slice_from_reader_size() > 0) { + if (params.get_num_neurons_of_slice_from_reader_size() > 0) { num_neurons_method_name = "get_num_neurons_of_slice_from_reader"; #if defined(LBANN_HAS_CONDUIT) const auto dr_generic = lbann::peek_map(data_readers, execution_mode::training); @@ -266,24 +238,7 @@ std::unique_ptr construct_layer( bool is_supported = false; std::string slice_point_method_name; - if (params.get_slice_points_from_reader_bool()) { - slice_point_method_name = "'get_slice_points_from_reader_bool'"; - #if defined(LBANN_HAS_CONDUIT) - size_t total = 0; - slice_points.push_back(total); - const auto dr_generic = lbann::peek_map(data_readers, execution_mode::training); - if (dynamic_cast(dr_generic) != nullptr) { - is_supported = true; - const auto dr1 = lbann::peek_map(data_readers, execution_mode::training); - lbann::data_reader_jag_conduit_hdf5 *dr = dynamic_cast(dr1); - total += dr->get_num_img_srcs() * dr->get_linearized_channel_size() * dr->get_num_channels() - + dr->get_linearized_scalar_size(); - slice_points.push_back(total); - total += dr->get_linearized_input_size(); - slice_points.push_back(total); - } - #endif // defined(LBANN_HAS_CONDUIT) - } else if (params.get_slice_points_from_reader() != "") { + if (params.get_slice_points_from_reader() != "") { slice_point_method_name = "'get_slice_points_from_reader'"; #if defined(LBANN_HAS_CONDUIT) const auto dr_generic = lbann::peek_map(data_readers, execution_mode::training); diff --git a/src/proto/init_image_data_readers.cpp b/src/proto/init_image_data_readers.cpp index e3297ac3d28..f313335dd44 100644 --- a/src/proto/init_image_data_readers.cpp +++ b/src/proto/init_image_data_readers.cpp @@ -318,7 +318,7 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const lbann_dat std::shared_ptr pp; // set up the image preprocessor - if ((name == "imagenet") || (name == "jag_conduit") || (name == "jag_conduit_hdf5") || + if ((name == "imagenet") || (name == "jag_conduit") || (name == "triplet") || (name == "mnist_siamese") || (name == "multi_images") || (name == "moving_mnist")) { pp = std::make_shared(); @@ -354,17 +354,6 @@ void init_image_data_reader(const lbann_data::Reader& pb_readme, const lbann_dat } else if (name == "moving_mnist") { reader = new moving_mnist_reader(7, 40, 40, 2); #ifdef LBANN_HAS_CONDUIT - } else if (name =="jag_conduit_hdf5") { - data_reader_jag_conduit_hdf5* reader_jag = new data_reader_jag_conduit_hdf5(pp, shuffle); - const lbann_data::DataSetMetaData::Schema& pb_schema = pb_metadata.schema(); - reader_jag->set_image_dims(width, height); - reader_jag->set_scalar_keys(pb_schema.scalar_keys()); - reader_jag->set_input_keys(pb_schema.input_keys()); - reader_jag->set_image_views(pb_schema.image_views()); - reader_jag->set_image_channels(pb_schema.image_channels()); - reader = reader_jag; - if (master) std::cout << reader->get_type() << " is set" << std::endl; - return; } else if (name =="jag_conduit") { data_reader_jag_conduit* reader_jag = new data_reader_jag_conduit(pp, shuffle); const lbann_data::DataSetMetaData::Schema& pb_schema = pb_metadata.schema(); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 8d5cb67798e..4d817d8f4c0 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -219,12 +219,6 @@ message DataSetMetaData { } repeated JAGDataSlice independent = 97; repeated JAGDataSlice dependent = 98; - - // for jag_conduit_hdf5 - string scalar_keys = 1004; - string input_keys = 1005; - string image_views = 1006; - string image_channels = 1007; //------------------ end of only for jag_conduit ----------------------- } From 0dc36677fa0ae78996911224969d05bcab4988eb Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Tue, 19 Feb 2019 13:51:21 -0800 Subject: [PATCH 072/443] deleting code associated with jag_store, which was used by the now-deleted data_reader_jag_conduit_hdf5 class. deleting the jag_converter.cpp driver, which converted conduit files toa bonary format. --- include/lbann/data_store/jag_io.hpp | 193 ----- include/lbann/data_store/jag_store.hpp | 264 ------- src/data_store/CMakeLists.txt | 7 - src/data_store/jag_converter.cpp | 245 ------ src/data_store/jag_io.cpp | 390 ---------- src/data_store/jag_store.cpp | 999 ------------------------- 6 files changed, 2098 deletions(-) delete mode 100644 include/lbann/data_store/jag_io.hpp delete mode 100644 include/lbann/data_store/jag_store.hpp delete mode 100644 src/data_store/jag_converter.cpp delete mode 100644 src/data_store/jag_io.cpp delete mode 100644 src/data_store/jag_store.cpp diff --git a/include/lbann/data_store/jag_io.hpp b/include/lbann/data_store/jag_io.hpp deleted file mode 100644 index e3947f2d27b..00000000000 --- a/include/lbann/data_store/jag_io.hpp +++ /dev/null @@ -1,193 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#ifndef __JAG_IO_HPP__ -#define __JAG_IO_HPP__ - -#include "lbann_config.hpp" - -#ifdef LBANN_HAS_CONDUIT -#include "conduit/conduit.hpp" -#include "conduit/conduit_relay.hpp" -#include -#include -#include - -namespace lbann { - -class jag_io { - public: - - using TypeID = conduit::DataType::TypeID; - - /** - * NOTE: some methods below take variables "string node_name" while others - * take "string key." My convention is that "node_name" indicated - * a fully qualified name, i.e, it begins with the sample id: - * 0/field_1/field_2 - * on the other hand, "key" does not contain the sample id: - * field_1/field_2 - */ - -/// WARNING! CAUTION! BE ADVISED! -/// cut-n-paste from data_reader_jag_conduit; this is -/// fragile -- probably best to but these in a small -/// file that both reader and store can then include -/// @todo -using ch_t = double; ///< jag output image channel type -using scalar_t = double; ///< jag scalar output type -using input_t = double; ///< jag input parameter type - - //! ctor - jag_io(); - - //! copy ctor - jag_io(const jag_io&) = default; - - //! operator= - jag_io& operator=(const jag_io&) = default; - - jag_io * copy() const { return new jag_io(*this); } - - //! dtor - ~jag_io(); - - /// converts conduit data to our format and saves to disk - void convert(std::string conduit_pathname, std::string base_dir); - - /// load our format from disk - void load(std::string base_dir); - - /// returns the set of the child nodes of the parent node - /// @todo not currently used; may not be needed - const std::unordered_set &get_children(std::string parent) const; - - //const std::vector & get_scalars(size_t sample_id) const; - - /// Returns size and data type information for the requested node. - /// 'total_bytes_out' is num_elts_out * bytes_per_elt_out; - void get_metadata(std::string node_name, size_t &num_elts_out, size_t &bytes_per_elt_out, size_t &total_bytes_out, conduit::DataType::TypeID &type_out); - - /// Reads the requested data from file and returns it in 'data_out.' - /// The caller is responsible for allocating sufficient memory, - /// i.e, they should previously have called get_metadata(...), then - /// allocated memory, i.e, std::vector d(total_bytes_out); - void get_data(std::string node_name, char * data_out, size_t num_bytes); - - /// returns true if the key exists in the metadata map - bool has_key(std::string key) const; - - const std::vector& get_scalar_choices() const; - - const std::vector& get_input_choices() const; - - size_t get_num_samples() const { - return m_num_samples; - } - - /// this method is provided for testing and debugging - size_t get_offset(std::string node_name); - - /// this method is provided for testing and debugging - const std::vector &get_keys() const { - return m_keys; - } - - /// this method is provided for testing and debugging - void print_metadata(); - -protected : - - struct MetaData { - MetaData() {} - MetaData(TypeID tp, int elts, int bytes, size_t _offset = 0) - : dType(tp), num_elts(elts), num_bytes(bytes), offset(_offset) {} - - conduit::DataType::TypeID dType; - int num_elts; //number of elements in this field - int num_bytes; //number of bytes for a single element - size_t offset; //offset wrt m_data: where this resides on disk - }; - - size_t m_num_samples; - - /// used when reading converted data from file; - std::ifstream *m_data_stream; - - /// recursive function invoked by convert(); - /// fills in m_keys and m_parent_to_children - void get_hierarchy( - conduit::Node &head, - std::string parent_name); - - /// maps parent node_named to child node_names - std::unordered_map> m_parent_to_children; - - /// contains the same keys that appear in m_metadata; saving them - /// separately so we can iterate through in the order they appeared - std::vector m_keys; - - - ///@todo this may go away ... - //std::unordered_map m_data_reader; - - std::unordered_map m_metadata; - - /// number of bytes required to store each sample on disk in our format - size_t m_sample_offset; - - //std::vector m_scalar_keys; - - std::vector m_input_keys; - - /// some conduit keys contain white space, which is annoying to parse, - /// so internally we convert them to underscores - void white_space_to_underscore(std::string &s) { - for (size_t j=0; j -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -//////////////////////////////////////////////////////////////////////////////// - -#ifndef _JAG_STORE_HPP__ -#define _JAG_STORE_HPP__ - -#include "lbann_config.hpp" - -#ifdef LBANN_HAS_CONDUIT - -#include "lbann/utils/timer.hpp" -#include "conduit/conduit.hpp" -#include "conduit/conduit_relay.hpp" -#include "lbann/data_readers/data_reader_jag_conduit_hdf5.hpp" -#include -#include -#include -#include -#include "lbann/comm.hpp" -#include "hdf5.h" - -namespace lbann { - -class data_reader_jag_conduit_hdf5; - -/** - * Loads the pairs of JAG simulation inputs and results from a conduit-wrapped hdf5 file - */ -class jag_store { - public: - - #define METADATA_FN "metadata.txt" - #define IMAGE_SIZE_PER_CHANNEL 4096 - #define NUM_IMAGE_CHANNELS 4 - #define MAX_SAMPLES_PER_BINARY_FILE 1000 - //#define MAX_SAMPLES_PER_BINARY_FILE 10000 - #define BINARY_FILE_BASENAME "converted" - #define FILES_PER_DIR 1000 - - jag_store(); - - jag_store(const jag_store&) = default; - - jag_store& operator=(const jag_store&) = default; - - ~jag_store() {} - - void set_comm(lbann_comm *comm) { - m_comm = comm; - m_num_procs_in_world = m_comm->get_procs_in_world(); - m_rank_in_world = m_comm->get_rank_in_world(); - } - - /// Returns the requested inputs - const std::vector & fetch_inputs(size_t sample_id, size_t tid) const { - check_sample_id(sample_id); - return m_data_inputs[tid]; - } - - /// Returns the requested scalars - const std::vector & fetch_scalars (size_t sample_id, size_t tid) const { - check_sample_id(sample_id); - return m_data_scalars[tid]; - } - - /// Returns the requested images - const std::vector> & fetch_views(size_t sample_id, size_t tid) { - check_sample_id(sample_id); - return m_data_images[tid]; - } - - void setup(data_reader_jag_conduit_hdf5 *reader, - bool num_stores = 1, - int my_rank = 0); - - void set_image_size(size_t n) { m_image_size = n; } - - size_t get_linearized_data_size() const; - size_t get_linearized_image_size() const { return 4096*4; } - //size_t get_linearized_image_size() const { return m_image_size; } - size_t get_linearized_channel_size() const { return IMAGE_SIZE_PER_CHANNEL; } - - /// returns the total number of channels in a view (image) - /// Note: probably should be deleted, since we can chose which - /// channels to use - //size_t get_num_channels() const { return NUM_IMAGE_CHANNELS; } - size_t get_linearized_scalar_size() const { return m_scalars_to_use.size(); } - size_t get_linearized_input_size() const { return m_inputs_to_use.size(); } - - /// returns the number of views (images) that we're actually using - /// (so currently may be 0, 1, 2, or 3) - size_t get_num_img_srcs() const { return m_image_views_to_use.size(); } - - /// returns the number of channels that we're actually using per view, - /// i.e, may be 1, 2, 3, or 4 - size_t get_num_channels_per_view() const { return m_image_channels_to_use.size(); } - - /// returns the number channels that we're actually using, * num_views - size_t get_total_num_channels() const { return get_num_img_srcs() * get_num_channels_per_view(); } - - const std::vector & get_linearized_data_sizes() const { return m_data_sizes; } - - bool check_sample_id(const size_t sample_id) const { return sample_id < m_num_samples; } - - size_t get_num_samples() const { return m_num_samples; } - - void load_data(int data_id, int tid) { - check_sample_id(data_id); - if (m_mode == 1) { - load_data_conduit(data_id, tid); - } else if (m_mode == 2) { - load_data_binary(data_id, tid); - } - } - - private: - - /// one of these is called by load_data() - void load_data_conduit(int data_id, int tid); - void load_data_binary(int data_id, int tid); - - size_t m_image_size; - - size_t m_num_samples; - - lbann_comm *m_comm; - - int m_num_procs_in_world; - - int m_rank_in_world; - - bool m_master; - - data_reader_jag_conduit_hdf5 *m_reader; - - /// next three will contain the actual sample data; - /// they are filled in by one of the load_data_XX methods; - /// each thread has a separate set of buffers - std::vector> m_data_inputs; - std::vector> m_data_scalars; - std::vector>> m_data_images; - - /// next four are called by setup() - void build_data_sizes(); - void load_variable_names(); - void report_linearized_sizes(); - void allocate_memory(); - - /// these hold the names of the dependent and independant variables - /// that we're using - std::vector m_inputs_to_use; - std::vector m_scalars_to_use; - std::vector m_image_views_to_use; - std::vector m_image_channels_to_use; - - /// these fill in the above four variables; - /// they are called by load_variable_names() - void load_inputs_to_use(const std::string &keys); - void load_scalars_to_use(const std::string &keys); - void load_image_views_to_use(const std::string &keys); - void load_image_channels_to_use(const std::string &keys); - - std::vector m_data_sizes; - - void check_entry(std::string &e) { - if (m_key_map.find(e) == m_key_map.end()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: m_key_map is missing entry: " + e); - } - } - - /// one of the next three methods is called by setup(), depending - /// on the value of --mode= - int m_mode; - void setup_conduit(); // mode = 1 - void setup_binary(); // mode = 2 - void setup_testing(); // mode = 3 - - size_t m_max_samples; - - /// next three are used when reading samples from conduit files - std::vector m_conduit_filenames; - std::vector m_data_id_to_conduit_filename_idx; - std::vector m_data_id_to_sample_id; - - - // these are used when reading samples from binary formatted files - std::vector> m_scratch; - std::unordered_map m_key_map; - // maps a shuffled index to - std::unordered_map> m_sample_map; - std::unordered_map m_sample_id_to_global_idx; - std::vector m_binary_filenames; - // maps global idx (i.e: shuffled indices subscript) to sample ID - // (e.g: 0.9.99.57:1) - std::unordered_map m_sample_id_map; - size_t m_sample_len; - std::vector> m_streams; - void read_key_map(const std::string &filename); - - /// methods and variables for dealing with normalization - void load_normalization_values(); - void load_normalization_values_impl( - std::vector> &values, - const std::vector &variables); - - std::vector> m_normalize_inputs; - std::vector> m_normalize_scalars; - std::vector> m_normalize_views; - - // magic numbers (from Rushil); these are for normalizing the images - // 0.035550589898738466 - // 0.0012234476453273034 - // 1.0744965260584181e-05 - // 2.29319120949361e-07 - - // testing and other special methods: if these are invoked something - // special happens, the the code exits; in the case a model is not run - void compute_min_max(); - void compute_bandwidth(); - void build_conduit_index(const std::vector &filenames); - void compute_bandwidth_binary(); - void convert_conduit_to_binary(const std::vector &filenames); - void test_converted_files(); - - /// functions and variables for converting conduit files to a binary format; - /// these are used by convert_conduit_to_binary - void write_binary_metadata(std::string dir); - void write_binary(const std::vector &input, const std::string &dir); - std::ofstream m_name_file; - size_t m_global_file_idx; - size_t m_num_converted_samples; - void open_binary_file_for_output(const std::string &dir); - std::ofstream m_binary_output_file; - std::ofstream m_binary_output_file_names; - std::string m_binary_output_filename; -}; - -} // end of namespace lbann -#endif //ifdef LBANN_HAS_CONDUIT - -#endif // _JAG_STORE_HPP__ diff --git a/src/data_store/CMakeLists.txt b/src/data_store/CMakeLists.txt index 1e3ac36a98f..eee68016945 100644 --- a/src/data_store/CMakeLists.txt +++ b/src/data_store/CMakeLists.txt @@ -11,14 +11,7 @@ set_full_path(THIS_DIR_SOURCES data_store_pilot2_molecular.cpp data_store_triplet.cpp data_store_jag.cpp - jag_io.cpp - jag_store.cpp ) -add_executable( jag_converter-bin jag_converter.cpp ) -target_link_libraries(jag_converter-bin lbann ) -set_target_properties(jag_converter-bin PROPERTIES OUTPUT_NAME jag_converter) - - set(SOURCES "${SOURCES}" "${THIS_DIR_SOURCES}" PARENT_SCOPE) diff --git a/src/data_store/jag_converter.cpp b/src/data_store/jag_converter.cpp deleted file mode 100644 index 4f10daeb24e..00000000000 --- a/src/data_store/jag_converter.cpp +++ /dev/null @@ -1,245 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/lbann.hpp" -#include "lbann/data_store/jag_io.hpp" -#include "lbann/utils/options.hpp" -#include - -std::string usage("\n\nusage: jag_converter --mode= --bundle= --dir="); - -using namespace lbann; - -void convert(std::string bundle_fn, std::string dir); -void test(std::string bundle_fn, std::string dir); - -int main(int argc, char *argv[]) { - -#ifndef LBANN_HAS_CONDUIT - std::cerr << "ERROR: lbann was not compiled with conduit support\n" - "(LBANN_HAS_CONDUIT was not defined at compile time)\n"; - exit(9); -#else - - lbann_comm *comm = initialize(argc, argv, 42); - std::cerr << "num ranks: " << comm->get_procs_in_world() << "\n"; - - try { - options *opts = options::get(); - opts->init(argc, argv); - - std::stringstream err; - - std::string bundle_fn; - std::string convert_dir; - std::string mode; - - if (!opts->has_string("mode")) { - err << __FILE__ << " " << __LINE__ << " :: " - << "you must pass the option: --mode=,\n" - "where is \"convert\" or \"test\" or \"both\"" - << usage; - throw lbann_exception(err.str()); - } - mode = opts->get_string("mode"); - - if (!opts->has_string("bundle")) { - err << __FILE__ << " " << __LINE__ << " :: " - << "you must pass the option: --bundle=,\n" - "which is the input filename" - << usage; - throw lbann_exception(err.str()); - } - bundle_fn= opts->get_string("bundle"); - - if (!opts->has_string("dir")) { - err << __FILE__ << " " << __LINE__ << " :: " - << "you must pass the option: --dir=,\n" - "which is the directory for the converted file" - << usage; - throw lbann_exception(err.str()); - } - convert_dir = opts->get_string("dir"); - - if (mode == "convert") { - convert(bundle_fn, convert_dir); - } else if (mode == "test") { - test(bundle_fn, convert_dir); - } else if (mode == "both") { - convert(bundle_fn, convert_dir); - test(bundle_fn, convert_dir); - } else { - err << __FILE__ << " " << __LINE__ << " :: " - << "bad value for option: --mode=;\n" - "must be 'convert' or 'test' or 'both'" - << usage; - throw lbann_exception(err.str()); - } - - } catch (lbann_exception& e) { - e.print_report(); - } - -#endif //ifdef LBANN_HAS_CONDUIT - - return 0; -} - -#ifdef LBANN_HAS_CONDUIT -void convert(std::string bundle_fn, std::string dir) { - jag_io io; - io.convert(bundle_fn, dir); -} - -void test(std::string bundle_fn, std::string dir) { - using TypeID = conduit::DataType::TypeID; - - std::cerr << "\nstarting test ...\n"; - std::cerr << "loading conduit node...\n"; - double tm = get_time(); - conduit::Node head; - conduit::relay::io::load(bundle_fn, "hdf5", head); - std::cerr << "time to load node: " << get_time() - tm << "\n"; - - std::cerr << "calling jag.load("< 1 ? 1 : 0; - const std::vector &keys = jag.get_keys(); - - std::cerr << "using sample " << sample_id << " of " << num_samples << "\n"; - std::cerr << "num keys: " << keys.size() << "\n"; - - size_t num_elts; - size_t bytes_per_elt; - size_t total_bytes; - TypeID type; - std::vector data; - size_t pass = 0; - size_t skipped = 0; - size_t warnings = 0; - size_t total = 0; - - //=========================================================================\n; - // test #1: - // loop over all keys; test that what we get from the jag_io is identical - // to what we get directly from the conduit node - // - //=========================================================================\n; - double tm2 = get_time(); - for (size_t s=0; s -#include -#include -#include -#include - - -namespace lbann { - -jag_io::~jag_io() { - if (m_data_stream != nullptr && m_data_stream->is_open()) { - m_data_stream->close(); - delete m_data_stream; - } -} - -jag_io::jag_io() : m_data_stream(nullptr) {} - -void jag_io::get_hierarchy(conduit::Node &nd, std::string parent) { - std::string parent_2 = parent; - if (parent.find('/') != std::string::npos) { - // hack to discard keys that vary between samples; - // will fix later, when we have the samples we're going - // to actually use, and guidance as to which outputs - // we should use. - if (parent.find("outputs/scalars") == std::string::npos) { - m_keys.push_back( parent.substr(2)); - } - } - conduit::Node nd2 = nd[parent]; - const std::vector &children_names = nd2.child_names(); - for (auto t : children_names) { - m_parent_to_children[parent_2].insert(t); - std::string p = parent + '/' + t; - get_hierarchy(nd, p); - } -} - -void jag_io::convert(std::string conduit_pathname, std::string base_dir) { - std::stringstream err; - - //create the output directory (if it doesn't already exist) - create_dir(base_dir); - - // load the conduit bundle - std::cerr << "Loading conduit file ...\n"; - double tm1 = get_time(); - conduit::Node head; - conduit::relay::io::load(conduit_pathname, "hdf5", head); - std::cerr << "time to load: " << get_time() - tm1 << "\n"; - m_num_samples = head.number_of_children(); - std::cerr << "\nconversion in progress for " << m_num_samples << " samples\n"; - - // get the hierarchy (get all keys in the hierarchy); this fills in m_keys - // and m_parent_to_children - get_hierarchy(head, "6"); - - // fill in m_metadata - m_sample_offset = 0; - for (auto t : m_keys) { - conduit::Node nd = head["0/" + t]; - conduit::DataType m = nd.dtype(); - m_metadata[t] = MetaData((TypeID)m.id(), m.number_of_elements(), m.element_bytes(), m_sample_offset); - m_sample_offset += (m.number_of_elements() * m.element_bytes()); - } - - // write metadata to file - std::string fn = base_dir + "/metadata.txt"; - std::ofstream metadata_writer(fn.c_str()); - if (!metadata_writer.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for writing"; - throw lbann_exception(err.str()); - } - - metadata_writer << m_num_samples << "\n"; - metadata_writer << m_sample_offset << "\n"; - for (auto t : m_keys) { - metadata_writer << m_metadata[t].dType << " " << m_metadata[t].num_elts - << " " << m_metadata[t].num_bytes << " " << m_metadata[t].offset - << " " << t << "\n"; - } - metadata_writer.close(); - std::cerr << "wrote file: " << fn << "\n"; - - - // open output file for binary data - fn = base_dir + "/data.bin"; - std::ofstream bin_writer(fn.c_str(), std::ios::binary); - if (!bin_writer.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for writing"; - throw lbann_exception(err.str()); - } - - // write binary data - for (size_t j=0; jgood()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for reading"; - throw lbann_exception(err.str()); - } - - // fill in parent_to_child map - fn = base_dir + "/parent_to_child.txt"; - in.open(fn.c_str()); - if (!in.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for reading"; - throw lbann_exception(err.str()); - } - std::string parent; - std::string child; - while (in >> parent >> child) { - m_parent_to_children[parent].insert(child); - } - in.close(); - - // open metadata file - fn = base_dir + "/metadata.txt"; - in.open(fn.c_str(), std::ios::in | std::ios::binary); - if (!in.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for reading"; - throw lbann_exception(err.str()); - } - - // get num_samples, etc. - in >> m_num_samples; - in >> m_sample_offset; - - // fill in the metadata map - uint64 dType; - int num_elts; - int bytes_per_elt; - size_t offset; - while (in >> dType >> num_elts >> bytes_per_elt >> offset >> key) { - m_metadata[key] = MetaData((TypeID)dType, num_elts, bytes_per_elt, offset); - m_keys.push_back(key); - } - in.close(); - - #if 0 - // fill in m_scalar_keys - fn = base_dir + "/scalar_keys.txt"; - in.open(fn.c_str()); - if (!in.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for reading"; - throw lbann_exception(err.str()); - } - while (in >> key) { - m_scalar_keys.push_back(key); - } - in.close(); - #endif - - // fill in m_input_keys - fn = base_dir + "/input_keys.txt"; - in.open(fn.c_str()); - if (!in.good()) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to open " << fn << " for reading"; - throw lbann_exception(err.str()); - } - while (in >> key) { - m_input_keys.push_back(key); - } - in.close(); -} - -const std::unordered_set & jag_io::get_children(std::string parent) const { - std::unordered_map>::const_iterator t; - t = m_parent_to_children.find(parent); - if (t == m_parent_to_children.end()) { - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to find " << parent << " in m_parent_to_children map\n" - << "m_parent_to_children.size(): " << m_parent_to_children.size(); - throw lbann_exception(err.str()); - } - return (*t).second; -} - -size_t jag_io::get_sample_id(std::string node_name) const { - std::stringstream err; - size_t j = node_name.find('/'); - if (j == std::string::npos) { - err << __FILE__ << " " << __LINE__ << " :: " - << "failed to find '/' in node_name: " << node_name; - throw lbann_exception(err.str()); - } - for (size_t i=0; iseekg(offset); - m_data_stream->read(data_out, num_bytes); -} - -const std::vector& jag_io::get_input_choices() const { - return m_input_keys; -} - -/* -const std::vector& jag_io::get_scalar_choices() const { - return m_scalar_keys; -} -*/ - - -void jag_io::get_metadata(std::string key, size_t &num_elts_out, size_t &bytes_per_elt_out, size_t &total_bytes_out, conduit::DataType::TypeID &type_out) { - num_elts_out = m_metadata[key].num_elts; - bytes_per_elt_out = m_metadata[key].num_bytes; - total_bytes_out = num_elts_out*bytes_per_elt_out; - type_out = m_metadata[key].dType; -} - -bool jag_io::has_key(std::string key) const { - if (m_metadata.find(key) == m_metadata.end()) { - return false; - } - return true; -} - -size_t jag_io::get_offset(std::string node_name) { - std::string key = get_metadata_key(node_name); - size_t sample_id = get_sample_id(node_name); - return sample_id*m_sample_offset + m_metadata[key].offset; -} - -void jag_io::print_metadata() { - for (auto key : m_keys) { - std::cerr << "type/num_elts/bytes_per_elt: " << m_metadata[key].dType - << " " << m_metadata[key].num_elts << " " << m_metadata[key].num_bytes << " offset: " << m_metadata[key].offset << " :: " << key << "\n"; - } -} - -} // namespace lbann - -#endif //#ifdef LBANN_HAS_CONDUIT - diff --git a/src/data_store/jag_store.cpp b/src/data_store/jag_store.cpp deleted file mode 100644 index 293b57d4c80..00000000000 --- a/src/data_store/jag_store.cpp +++ /dev/null @@ -1,999 +0,0 @@ -#include "lbann/data_store/jag_store.hpp" - -#ifdef LBANN_HAS_CONDUIT - -#include "lbann/utils/exception.hpp" -#include "lbann/utils/options.hpp" -#include "conduit/conduit_relay.hpp" -#include "conduit/conduit_relay_io_hdf5.hpp" -#include "lbann/data_readers/data_reader_jag_conduit_hdf5.hpp" -#include "lbann/utils/glob.hpp" -#include -#include -#include "hdf5.h" -#include - -namespace lbann { - -jag_store::jag_store() - : m_image_size(0), - m_comm(nullptr), - m_master(false), - m_max_samples(INT_MAX) - { - } - -void load_keys(std::vector &v, const std::string &keys) { - std::stringstream s; - s << keys; - std::string key; - while (s >> key) { - v.push_back(key); - } -} - -void jag_store::load_scalars_to_use(const std::string &keys) { - m_scalars_to_use.clear(); - load_keys(m_scalars_to_use, keys); -} - -void jag_store::load_inputs_to_use(const std::string &keys) { - m_inputs_to_use.clear(); - load_keys(m_inputs_to_use, keys); -} - -void jag_store::load_image_views_to_use(const std::string &keys) { - m_image_views_to_use.clear(); - size_t last = 0; - while (true) { - size_t j1 = keys.find('(', last); - size_t j2 = keys.find(')', last); - if (j1 == std::string::npos || j2 == std::string::npos) { - break; - } - std::string key = keys.substr(j1, j2-j1+1); - m_image_views_to_use.push_back(key); - last = j2+1; - } -} - -void jag_store::load_image_channels_to_use(const std::string &keys) { - std::stringstream s; - s << keys; - int channel; - while (s >> channel) { - m_image_channels_to_use.push_back(channel); - } -} - -void jag_store::build_conduit_index(const std::vector &filenames) { - options *opts = options::get(); - if (!opts->has_string("base_dir")) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --base_dir= on the cmd line"); - } - const std::string base_dir = opts->get_string("base_dir"); - const std::string output_fn = opts->get_string("build_conduit_index"); - std::stringstream ss; - ss << output_fn << "." << m_rank_in_world; - std::ofstream out(ss.str().c_str()); - if (!out.good()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + output_fn + " for writing"); - } - if (m_master) std::cerr << "writing index file: " << output_fn << "\n"; - if (m_rank_in_world == 0) { - out << base_dir << "\n"; - } - if (m_master) std::cerr << "base dir: " << base_dir << "\n"; - - int global_num_samples = 0; - for (size_t j=m_rank_in_world; j cnames; - conduit::relay::io::hdf5_group_list_child_names(hdf5_file_hnd, "/", cnames); - size_t is_good = 0; - size_t is_bad = 0; - std::stringstream s5; - conduit::Node n_ok; - for (size_t h=0; hglobal_barrier(); - - int num_samples; - MPI_Reduce(&global_num_samples, &num_samples, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); - //m_comm->reduce(&global_num_samples, 1, 0, m_comm->get_world_comm(), El::mpi::SUM); - // - - if (m_master) { - std::stringstream s3; - s3 << "echo " << num_samples << " " << filenames.size() << " > num_samples_tmp"; - system(s3.str().c_str()); - s3.clear(); - s3.str(""); - s3 << "cat num_samples_tmp "; - for (int k=0; k " << output_fn; - system(s3.str().c_str()); - s3.clear(); - s3.str(""); - s3 << "chmod 660 " << output_fn; - system(s3.str().c_str()); - s3.clear(); - s3.str(""); - s3 << "rm -f num_samples_tmp "; - for (int k=0; kglobal_barrier(); -} - -void jag_store::setup_testing() { - setup_conduit(); - setup_binary(); -} - -void jag_store::setup( - data_reader_jag_conduit_hdf5 *reader, - bool num_stores, - int my_rank) { - double tm1 = get_time(); - - m_master = m_comm->am_world_master(); - options *opts = options::get(); - m_reader = reader; - - m_max_samples = INT_MAX; - if (opts->has_int("max_samples")) { - m_max_samples = (size_t)opts->get_int("max_samples"); - } - - bool has_conduit_filenames = false; - if (opts->has_string("conduit_filelist")) { - std::string f = opts->get_string("conduit_filelist"); - std::ifstream in(f.c_str()); - if (!in) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + f + " for reading"); - } - std::string line; - while (getline(in, line)) { - m_conduit_filenames.push_back(line); - } - in.close(); - if (m_max_samples < m_conduit_filenames.size()) { - m_conduit_filenames.resize(m_max_samples); - } - has_conduit_filenames = true; - } - - if (m_image_size == 0) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: image_size = 0; probably set_image_size() has not been called"); - } - - // optionally build an index file, then exit. Each line of the file will - // contain a conduit filename, followed by the valid sample_ids in - // the conduit file - if (opts->has_string("build_conduit_index")) { - if (! has_conduit_filenames) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --conduit_filenames= on the cmd line when building a conduit index"); - } - build_conduit_index(m_conduit_filenames); - exit(0); - } - - load_variable_names(); - build_data_sizes(); - report_linearized_sizes(); - allocate_memory(); - load_normalization_values(); - - if (!opts->has_int("mode")) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --mode= on cmd line, where is 1 (to use conduit files) or 2 or 3 (for testing) (to use binary files)"); - } - m_mode = opts->get_int("mode"); - if (! (m_mode == 1 || m_mode == 2 || m_mode == 3)) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --mode= on cmd line, where is 1 (to use conduit files) or 2 (to use binary files); or 4 (for testing) you passed: " + std::to_string(m_mode)); - } - if (m_master) std::cerr << "Running in mode: " << m_mode << "\n"; - - // optionally convert conduit files to our binary format, then exit - if (opts->has_string("convert_conduit")) { - if (! has_conduit_filenames) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --conduit_filenames= on the cmd line when converting conduit filenames to binary"); - } - setup_conduit(); - convert_conduit_to_binary(m_conduit_filenames); - exit(0); - } - - if (m_mode == 1) { - setup_conduit(); - } else if (m_mode == 2) { - setup_binary(); - } else { - setup_testing(); - } - - if (m_master) { - std::cerr << "jag_store::setup time: " << get_time() - tm1 << "; num samples: " << m_num_samples << std::endl; - } - - if (m_mode == 3) { - test_converted_files(); - m_comm->global_barrier(); - exit(0); - } - - // optionally compute min/max values, then exit. - // This is only needed for one-time computation of normalization values - if (opts->has_string("compute_min_max")) { - compute_min_max(); - exit(0); - } - - // optionally check bandwidth (sort of), then exit - if (opts->has_int("bandwidth")) { - if (m_mode == 0) { - compute_bandwidth(); - } else { - compute_bandwidth_binary(); - } - exit(0); - } -} - -size_t jag_store::get_linearized_data_size() const { - size_t n = m_image_views_to_use.size() * m_image_channels_to_use.size() * get_linearized_channel_size() - + m_scalars_to_use.size() - + m_inputs_to_use.size(); - return n; -} - -void jag_store::build_data_sizes() { - for (size_t i=0; i 0.0) { - m_data_sizes.push_back(get_linearized_scalar_size()); - } - if (get_linearized_input_size() > 0.0) { - m_data_sizes.push_back(get_linearized_input_size()); - } -} - -void jag_store::report_linearized_sizes() { - if (! m_master) { - return; - } - std::cerr - << "===================================================================\n" - << "LINEARIZED SIZES REPORT:\n" - << "get_linearized_data_size: " << get_linearized_data_size() << "\n" - << "get_linearized_image_size: " << get_linearized_image_size() << "\n" - << "get_linearized_channel_size: " << get_linearized_channel_size() << "\n" - << "get_num_channels: " << get_num_channels_per_view() << "\n" - << "get_linearized_scalar_size: " << get_linearized_scalar_size() << "\n" - << "get_linearized_input_size: " << get_linearized_input_size() << "\n" - << "get_num_img_srcs: " << get_num_img_srcs() << "\n" - << "sizes vector: "; - size_t total = 0; - for (auto t : m_data_sizes) { - std::cerr << t << " "; - total += t; - } - std::cerr << "\n"; - std::cerr << "total, from m_data_sizes; should be same as above: " - << total << "\n" - << "===================================================================\n"; -} - -void jag_store::load_data_binary(int data_id, int tid) { - const int file_idx = m_sample_map[data_id].first; -// std::string fn = m_binary_filenames[file_idx]; - const int sample_idx = m_sample_map[data_id].second; - - // std::ifstream in(fn.c_str(), std::ios::out | std::ios::binary); - /* - if (!in.good()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open: " + fn + " for reading; data_id: " + std::to_string(data_id) + " tid: " + std::to_string(tid)); - } - */ - -// in.seekg(sample_idx*m_sample_len); - m_streams[tid][file_idx]->seekg(sample_idx*m_sample_len); - m_streams[tid][file_idx]->read((char*)m_scratch[tid].data(), m_sample_len); - //in.read((char*)m_scratch[tid].data(), m_sample_len); -// in.close(); - -// size_t offset = sample_idx * m_sample_len; - -// in.seekg(offset); - // in.read((char*)m_scratch[tid].data(), m_sample_len); - - for (size_t j=0; j &filenames, const std::string &dir) { - if (m_master) std::cerr << "starting jag_store::write_binary\n"; - options *opts = options::get(); - const std::string output_dir = opts->get_string("convert_conduit"); - - m_global_file_idx = 0; - m_num_converted_samples = 0; - m_binary_output_filename = ""; - open_binary_file_for_output(output_dir); - - size_t num_samples_written = 0; - std::string fn; - for (size_t k=0; k> fn; - hid_t hdf5_file_hnd = conduit::relay::io::hdf5_open_file_for_read( fn ); - std::vector cnames; - conduit::relay::io::hdf5_group_list_child_names(hdf5_file_hnd, "/", cnames); - if (m_master) std::cerr << " num samples this file: " << cnames.size() << "\n"; - - conduit::Node n_ok; - conduit::Node node; - for (auto sample_name : cnames) { - const std::string key_1 = "/" + sample_name + "/performance/success"; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key_1, n_ok); - int success = n_ok.to_int64(); - if (success == 1) { - m_binary_output_file_names << sample_name << "\n"; - for (auto input_name : m_inputs_to_use) { - const std::string key = "/" + sample_name + "/inputs/" + input_name; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, node); - //this is fragile; will break if input_t changes - double tmp = node.to_float64(); - m_binary_output_file.write((char*)&tmp, sizeof(data_reader_jag_conduit_hdf5::input_t)); - } - - for (auto scalar_name : m_scalars_to_use) { - const std::string key = "/" + sample_name + "/outputs/scalars/" + scalar_name; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, node); - //this is fragile; will break if scalar_t changes - double tmp = node.to_float64(); - m_binary_output_file.write((char*)&tmp, sizeof(data_reader_jag_conduit_hdf5::scalar_t)); - } - - for (auto image_name : m_image_views_to_use) { - const std::string key = "/" + sample_name + "/outputs/images/" + image_name + "/0.0/emi"; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, node); - conduit::float32_array emi = node.value(); - const size_t image_size = emi.number_of_elements(); - //this is fragile; will break if ch_t changes - for (int channel=0; channel<4; channel++) { - for (size_t j=channel; j= m_max_samples) { - conduit::relay::io::hdf5_close_file(hdf5_file_hnd); - goto EARLY_EXIT; - break; - } - ++num_samples_written; - if (num_samples_written == MAX_SAMPLES_PER_BINARY_FILE) { - num_samples_written = 0; - open_binary_file_for_output(output_dir); - } - } - } - conduit::relay::io::hdf5_close_file(hdf5_file_hnd); - } -EARLY_EXIT : - m_binary_output_file.close(); - m_binary_output_file_names.close(); - if (m_master) std::cerr << "LEAVING jag_store::write_binary\n"; -} - -void jag_store::read_key_map(const std::string &filename) { - if (m_master) std::cerr << "starting jag_store::read_key_map; opening file: " << filename << "\n"; - std::ifstream in(filename.c_str()); - if (!in.good()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open: " + filename); - } - - std::string line; - getline(in, line); - getline(in, line); - getline(in, line); - - std::string key; - int n; - for (int k=0; k<3; k++) { - getline(in, line); - std::stringstream s; - s << line; - s >> key >> n; - for (int j=0; j &conduit_filenames) { - m_num_converted_samples = 0; - - if (m_comm->get_procs_in_world() != 1) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must run convert_conduit with a single processor"); - } - - options *opts = options::get(); - std::string output_dir = opts->get_string("convert_conduit"); - if (m_master) { - char b[128]; - sprintf(b, "mkdir --mode=770 -p %s", output_dir.c_str()); - system(b); - write_binary_metadata(output_dir); - } - write_binary(conduit_filenames, output_dir); -} - -void jag_store::load_variable_names() { - load_inputs_to_use(m_reader->m_input_keys); - load_scalars_to_use(m_reader->m_scalar_keys); - load_image_views_to_use(m_reader->m_image_views); - load_image_channels_to_use(m_reader->m_image_channels); - - if (m_master) { - std::cerr << "using these inputs:\n"; - for (auto t : m_inputs_to_use) { - std::cerr << " " << t << "\n"; - } - std::cerr << "\nusing these scalars:\n"; - for (auto t : m_scalars_to_use) { - std::cerr << " " << t << "\n"; - } - std::cerr << "\nusing these views:\n"; - for (auto t : m_image_views_to_use) { - std::cerr << " " << t << "\n"; - } - std::cerr << "\nusing these image channels: "; - for (auto t : m_image_channels_to_use) { - std::cerr << t << " "; - } - std::cerr << "\n"; - } -} - -void jag_store::allocate_memory() { - size_t nthreads = omp_get_max_threads(); - if (m_master) std::cerr << "starting jag_store::allocate_memory; nthreads: " << nthreads << "\n"; - m_data_inputs.resize(nthreads); - m_data_scalars.resize(nthreads); - for (size_t j=0; jget_procs_in_world(); - if (np != 1) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: mode 3 (test converted binary files) must be run with a single process"); - } - std::cerr << "\nstarting jag_store::test_converted_files()\n"; - - std::vector> inputs; - std::vector> scalars; - std::vector>> images; - - int tid = 0; - options *opts = options::get(); - if (!opts->has_int("num_to_test")) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: when running in test mode you must pass --num_to_test= on the cmd line"); - } - size_t num_to_test = opts->get_int("num_to_test"); - std::cerr << "\nnum to test: " << num_to_test << "\n"; - for (size_t data_id=0; data_id= m_data_id_to_sample_id.size()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: data_id: " + std::to_string(data_id) + " >= m_data_id_to_sample_id.size(): " + std::to_string(m_data_id_to_sample_id.size())); - } - - const std::string sample_id = m_data_id_to_sample_id[data_id]; - - if (m_sample_id_to_global_idx.find(sample_id) == m_sample_id_to_global_idx.end()) { - std::cerr << "discarding " << sample_id << " since it's not found in m_sample_id_to_global_idx\n"; - //throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to find " + sample_id + " in m_sample_id_to_global_idx; data_id: " + std::to_string(data_id)); - } - -else { - int global_id = m_sample_id_to_global_idx[sample_id]; - - std::cerr << "testing sample: " << sample_id << " data_id: " << data_id << " global_id: " << global_id << "\n"; - - load_data_conduit(data_id, tid); - inputs = m_data_inputs; - scalars = m_data_scalars; - images = m_data_images; - - load_data_binary(global_id, tid); - - if (inputs != m_data_inputs) { - std::cerr << "inputs for data_id " << data_id << " failed.\n" - << "values from conduit: "; - for (auto t : inputs[tid]) std::cerr << t << " "; - std::cerr << "\nvalues from binary: "; - for (auto t : m_data_inputs[tid]) std::cerr << t << " "; - std::cerr << "\n"; - exit(9); - } - if (scalars != m_data_scalars) { - std::cerr << "scalars != m_data_scalars\n"; - exit(9); - } - - std::cerr << "1. num channels: " << images[0].size() << "\n"; - std::cerr << "2. num channels: " << m_data_images[0].size() << "\n"; - for (size_t j=0; j::epsilon(); - std::cerr << x << " " << images[0][j][x] << " " << m_data_images[0][j][x] << " epsilon? " << testme << "\n"; - } - } - //exit(9); - } else { - std::cerr << "PASSED: images[0][" << j << "] == m_data_images[0][" << j << "]\n"; - } - } - } - } - std::cerr << "\ntested " << m_max_samples << "; all passed\n"; -} - -void jag_store::setup_conduit() { - if (m_master) std::cerr << "starting jag_store::setup_conduit\n"; - - std::string filename; - std::string sample_id; - int j = -1; - std::vector tmp; - for (auto t : m_conduit_filenames) { - if (m_data_id_to_sample_id.size() == m_max_samples) { - break; - } - ++j; - std::stringstream s(t); - s >> filename; - tmp.push_back(filename); - while (s >> sample_id) { - m_data_id_to_conduit_filename_idx.push_back(j); - m_data_id_to_sample_id.push_back(sample_id); - if (m_data_id_to_sample_id.size() == m_max_samples) { - break; - } - } - } - m_conduit_filenames = tmp; - m_num_samples = m_data_id_to_sample_id.size(); - if (m_master) std::cerr << "finished reading " << m_num_samples << " sample names\n"; -} - -void jag_store::setup_binary() { - if (m_master) std::cerr << "starting jag_store::setup_binary\n"; - options *opts = options::get(); - if (!opts->has_string("binary_filelist")) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: you must pass --binary_filelist= on the cmd line"); - } - - const std::string fn = opts->get_string("binary_filelist"); - std::ifstream in(fn.c_str()); - if (!in.good()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + fn + " for reading"); - } - if (m_master) std::cerr << "opened " << fn << " for reading\n"; - - std::string filename; - size_t num_files = 0; - while (in >> filename) { - ++num_files; - } - in.close(); - - in.open(fn.c_str()); - size_t nthreads = omp_get_max_threads(); - m_streams.resize(nthreads); - for (size_t j=0; j> filename) { - if (m_master) std::cerr << "next binary filename: " << filename << "\n"; - ++file_idx; - - for (size_t tid=0; tid> sample_id) { - //maps global index (shuffled index subscript) to get_procs_in_world(); - - LBANN_OMP_PARALLEL - { - const auto threadId = omp_get_thread_num(); - - LBANN_OMP_PARALLEL_FOR - for (size_t j = me; jglobal_barrier(); - if (m_master) std::cerr << "time to load all data: " << get_time() - tm1 << "\n"; -} - -void jag_store::compute_bandwidth() { - if (m_master) std::cerr << "starting bandwidth test\n"; - double tm1 = get_time(); - int me = get_rank_in_world(); - int np = m_comm->get_procs_in_world(); - size_t n = 0; - for (size_t j = me; jglobal_barrier(); - if (m_master) std::cerr << "time to load all data: " << get_time() - tm1 << "\n"; -} - -void jag_store::compute_min_max() { - std::vector inputs_max(m_inputs_to_use.size(), DBL_MIN); - std::vector inputs_min(m_inputs_to_use.size(), DBL_MAX); - std::vector inputs_avg(m_inputs_to_use.size(), 0.); - std::vector scalars_max(m_scalars_to_use.size(), DBL_MIN);; - std::vector scalars_min(m_scalars_to_use.size(), DBL_MAX);; - std::vector scalars_avg(m_scalars_to_use.size(), 0.);; - - for (size_t j = 0; j &t1 = fetch_inputs(j, 0); - for (size_t h=0; h inputs_max[h]) inputs_max[h] = t1[h]; - if (t1[h] < inputs_min[h]) inputs_min[h] = t1[h]; - } - } - - const std::vector &t2 = fetch_scalars(j, 0); - for (size_t h=0; h scalars_max[h]) scalars_max[h] = t2[h]; - if (t2[h] < scalars_min[h]) scalars_min[h] = t2[h]; - } - } - std::cerr << "\n\ninputs min: "; - for (auto t : inputs_min) std::cerr << t << " "; - std::cerr << "\ninputs max: "; - for (auto t : inputs_max) std::cerr << t << " "; - std::cerr << "\ninputs avg: "; - for (auto t : inputs_avg) std::cerr << t/m_data_id_to_conduit_filename_idx.size() << " "; - std::cerr << "\n\n"; - std::cerr << "\n\nscalars min: "; - for (auto t : scalars_min) std::cerr << t << " "; - std::cerr << "\nscalars max: "; - for (auto t : scalars_max) std::cerr << t << " "; - std::cerr << "\nscalars avg: "; - for (auto t : scalars_avg) std::cerr << t/m_data_id_to_conduit_filename_idx.size() << " "; - std::cerr << "\n\n"; -} - -void jag_store::load_normalization_values_impl( - std::vector> &values, - const std::vector &variables) { - values.resize(variables.size()); - for (size_t j=0; jhas_string("normalization_fn")) { - if (m_master) { - std::cerr << "\nWARNING! missing --normalization_fn option on command line; inputs, scalars, and possibly images will not be normalized. This is probably a bad thing.\n"; - } - } else { - const std::string fn = opts->get_string("normalization_fn"); - std::unordered_map> m; - std::ifstream in(fn.c_str()); - if (!in.good()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + fn + " for reading"); - } - std::string variable; - double scale; - double bias; - while (in >> variable >> scale >> bias) { - m[variable] = std::make_pair(scale, bias); - } - in.close(); - for (size_t j=0; j channels_to_use; - for (int j=0; j<4; j++) { - std::string s = "C" + std::to_string(j); - channels_to_use.push_back(s); - } - load_normalization_values_impl(m_normalize_views, channels_to_use); -} - - -} // namespace lbann -#endif //ifdef LBANN_HAS_CONDUIT From bdcf30125699c3339b9e7e41f5fc617c10256be4 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Tue, 19 Feb 2019 14:00:46 -0800 Subject: [PATCH 073/443] removing data_reader_jag_conduit_hdf5.cpp --- src/data_readers/CMakeLists.txt | 1 - .../data_reader_jag_conduit_hdf5.cpp | 409 ------------------ 2 files changed, 410 deletions(-) delete mode 100644 src/data_readers/data_reader_jag_conduit_hdf5.cpp diff --git a/src/data_readers/CMakeLists.txt b/src/data_readers/CMakeLists.txt index 5d9d67f0395..8557f494ead 100644 --- a/src/data_readers/CMakeLists.txt +++ b/src/data_readers/CMakeLists.txt @@ -21,7 +21,6 @@ set_full_path(THIS_DIR_SOURCES data_reader_imagenet_patches.cpp data_reader_jag.cpp data_reader_jag_conduit.cpp - data_reader_jag_conduit_hdf5.cpp data_reader_merge_features.cpp data_reader_merge_samples.cpp data_reader_mesh.cpp diff --git a/src/data_readers/data_reader_jag_conduit_hdf5.cpp b/src/data_readers/data_reader_jag_conduit_hdf5.cpp deleted file mode 100644 index 48a8bee0f9d..00000000000 --- a/src/data_readers/data_reader_jag_conduit_hdf5.cpp +++ /dev/null @@ -1,409 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -// -//////////////////////////////////////////////////////////////////////////////// - -#include "lbann/data_readers/data_reader_jag_conduit_hdf5.hpp" -#include "lbann/utils/file_utils.hpp" // for add_delimiter() in load() -#include "lbann/utils/options.hpp" // for add_delimiter() in load() -#include "lbann/data_store/jag_store.hpp" -#include "lbann/models/model.hpp" - -#ifdef LBANN_HAS_CONDUIT -#include "lbann/data_readers/opencv_extensions.hpp" -#include -#include "lbann/data_readers/image_utils.hpp" -#include "lbann/utils/timer.hpp" -#include "lbann/utils/glob.hpp" -#include - - -// This macro may be moved to a global scope -#define _THROW_LBANN_EXCEPTION_(_CLASS_NAME_,_MSG_) { \ - std::stringstream err; \ - err << __FILE__ << ' ' << __LINE__ << " :: " \ - << (_CLASS_NAME_) << "::" << (_MSG_); \ - throw lbann_exception(err.str()); \ -} - -#define _THROW_LBANN_EXCEPTION2_(_CLASS_NAME_,_MSG1_,_MSG2_) { \ - std::stringstream err; \ - err << __FILE__ << ' ' << __LINE__ << " :: " \ - << (_CLASS_NAME_) << "::" << (_MSG1_) << (_MSG2_); \ - throw lbann_exception(err.str()); \ -} - -// This comes after all the headers, and is only visible within the current implementation file. -// To make sure, we put '#undef _CN_' at the end of this file -#define _CN_ "data_reader_jag_conduit_hdf5" - -namespace lbann { - -data_reader_jag_conduit_hdf5::data_reader_jag_conduit_hdf5(const std::shared_ptr& pp, bool shuffle) - : generic_data_reader(shuffle), - m_jag_store(nullptr), - m_owns_jag_store(false), - m_primary_reader(nullptr) { - - set_defaults(); - - if (!pp) { - _THROW_LBANN_EXCEPTION_(get_type(), " construction error: no image processor"); - } - - m_master_pps = lbann::make_unique(*pp); -} - -void data_reader_jag_conduit_hdf5::copy_members(const data_reader_jag_conduit_hdf5& rhs) { - m_jag_store = rhs.m_jag_store; - m_owns_jag_store = rhs.m_owns_jag_store; - m_image_width = rhs.m_image_width; - m_image_height = rhs.m_image_height; - m_image_num_channels = rhs.m_image_num_channels; - m_is_data_loaded = rhs.m_is_data_loaded; - m_scalar_keys = rhs.m_scalar_keys; - m_input_keys = rhs.m_input_keys; - m_success_map = rhs.m_success_map; - - if (!rhs.m_master_pps) { - _THROW_LBANN_EXCEPTION_(get_type(), " construction error: no image processor"); - } - - m_master_pps = lbann::make_unique(*rhs.m_master_pps); - m_uniform_input_type = rhs.m_uniform_input_type; -} - - -data_reader_jag_conduit_hdf5::data_reader_jag_conduit_hdf5(const data_reader_jag_conduit_hdf5& rhs) - : generic_data_reader(rhs) { - copy_members(rhs); -} - -data_reader_jag_conduit_hdf5& data_reader_jag_conduit_hdf5::operator=(const data_reader_jag_conduit_hdf5& rhs) { - // check for self-assignment - if (this == &rhs) { - return (*this); - } - - generic_data_reader::operator=(rhs); - - copy_members(rhs); - - return (*this); -} - -data_reader_jag_conduit_hdf5::~data_reader_jag_conduit_hdf5() { - if (m_owns_jag_store) { - delete m_jag_store; - } -} - -void data_reader_jag_conduit_hdf5::set_defaults() { - m_image_width = 0; - m_image_height = 0; - m_image_num_channels = 1; - m_num_labels = 0; -} - - void data_reader_jag_conduit_hdf5::setup(int num_io_threads, std::shared_ptr io_thread_pool) { - generic_data_reader::setup(num_io_threads, io_thread_pool); - replicate_processor(*m_master_pps, num_io_threads); -} - -/// Replicate image processor for each I/O thread - bool data_reader_jag_conduit_hdf5::replicate_processor(const cv_process& pp, const int nthreads) { - m_pps.resize(nthreads); - - // Construct thread private preprocessing objects out of a shared pointer - for (int i = 0; i < nthreads; ++i) { - m_pps[i] = lbann::make_unique(pp); - } - - bool ok = true; - for (int i = 0; ok && (i < nthreads); ++i) { - if (!m_pps[i]) ok = false; - } - - if (!ok || (nthreads <= 0)) { - _THROW_LBANN_EXCEPTION_(get_type(), " cannot replicate image processor"); - return false; - } - - const std::vector dims = pp.get_data_dims(); - if ((dims.size() == 2u) && (dims[0] != 0u) && (dims[1] != 0u)) { - m_image_width = static_cast(dims[0]); - m_image_height = static_cast(dims[1]); - } - - return true; -} - -void data_reader_jag_conduit_hdf5::set_image_dims(const int width, const int height, const int ch) { - m_image_width = width; - m_image_height = height; - m_image_num_channels = ch; -} - -bool data_reader_jag_conduit_hdf5::fetch_datum(CPUMat& X, int data_id, int mb_idx) { - int tid = m_io_thread_pool->get_local_thread_id(); - m_jag_store->load_data(data_id, tid); - - std::vector sizes = get_linearized_data_sizes(); - std::vector X_v = create_datum_views(X, sizes, mb_idx); - - size_t i = 0; - std::vector images = get_cv_images(data_id, tid); - - for(size_t k=0u; k < get_num_img_srcs(); ++k) { - int width, height, img_type; - image_utils::process_image(images[k], width, height, img_type, *(m_pps[tid]), X_v[i++]); - } - - const std::vector &scalars = m_jag_store->fetch_scalars(data_id, tid); - set_minibatch_item(X_v[i++], 0, scalars.data(), m_jag_store->get_linearized_scalar_size()); - - const std::vector &inputs = m_jag_store->fetch_inputs(data_id, tid); - set_minibatch_item(X_v[i++], 0, inputs.data(), m_jag_store->get_linearized_input_size()); - return true; -} - -void data_reader_jag_conduit_hdf5::load() { - if(m_gan_labelling) { - m_num_labels=2; - } - - if (is_master()) { - std::cout << "JAG load GAN m_gan_labelling : label_value " - << m_gan_labelling <<" : " << m_gan_label_value << std::endl; - } - - bool setup_jag_store = true; - - if (setup_jag_store) { - m_jag_store = new jag_store; - - m_jag_store->set_comm(m_comm); - if (is_master()) std::cerr << "calling: m_jag_store->set_image_size\n"; - m_jag_store->set_image_size(m_image_height * m_image_width); - - if (m_first_n > 0) { - _THROW_LBANN_EXCEPTION_(_CN_, "load() does not support first_n feature."); - } - - if (is_master()) std::cerr << "data_reader_jag_conduit_hdf5: calling m_jag_store->setup()\n"; - m_jag_store->setup(this); - } - - m_is_data_loaded = true; - - // reset indices - m_shuffled_indices.resize(get_num_samples()); - std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0); - - select_subset_of_data(); - - if (is_master()) { - std::cout << "\n" << get_description() << "\n\n"; - } -} - -size_t data_reader_jag_conduit_hdf5::get_num_samples() const { - return m_jag_store->get_num_samples(); -} - -unsigned int data_reader_jag_conduit_hdf5::get_num_img_srcs() const { - return m_jag_store->get_num_img_srcs(); -} - -unsigned int data_reader_jag_conduit_hdf5::get_num_channels() const { - return m_jag_store->get_num_channels_per_view(); -} - -size_t data_reader_jag_conduit_hdf5::get_linearized_channel_size() const { - return m_jag_store->get_linearized_channel_size(); -} - -size_t data_reader_jag_conduit_hdf5::get_linearized_image_size() const { - return m_jag_store->get_linearized_image_size(); -} - -size_t data_reader_jag_conduit_hdf5::get_linearized_scalar_size() const { - return m_jag_store->get_linearized_scalar_size(); -} - -size_t data_reader_jag_conduit_hdf5::get_linearized_input_size() const { - return m_jag_store->get_linearized_input_size(); -} - - -int data_reader_jag_conduit_hdf5::get_linearized_data_size() const { - return m_jag_store->get_linearized_data_size(); -} - -int data_reader_jag_conduit_hdf5::get_linearized_response_size() const { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: not implemented"); - return 0; - return 0; -} - -std::vector data_reader_jag_conduit_hdf5::get_linearized_data_sizes() const { - return m_jag_store->get_linearized_data_sizes(); -} - -std::vector data_reader_jag_conduit_hdf5::get_linearized_response_sizes() const { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: not implemented"); - std::vector r; - return r; -} - -const std::vector data_reader_jag_conduit_hdf5::get_data_dims() const { - return {get_linearized_data_size()}; -} - -int data_reader_jag_conduit_hdf5::get_num_labels() const { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: not implemented"); - return m_num_labels; -} - -int data_reader_jag_conduit_hdf5::get_linearized_label_size() const { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: not implemented"); - return m_num_labels; - return 0; -} - -std::string data_reader_jag_conduit_hdf5::get_description() const { -/* - std::vector s = get_linearized_data_sizes(); - std::string ret = std::string("data_reader_jag_conduit_hdf5:\n") - + " - independent: " + data_reader_jag_conduit_hdf5::to_string(m_independent) + "\n" - + " - dependent: " + data_reader_jag_conduit_hdf5::to_string(m_dependent) + "\n" - + " - images: " + std::to_string(m_num_img_srcs) + 'x' - + std::to_string(m_image_width) + 'x' - + std::to_string(m_image_height) + "\n" - + " - scalars: " + std::to_string(get_linearized_scalar_size()) + "\n" - + " - inputs: " + std::to_string(get_linearized_input_size()) + "\n" - + " - linearized data size: " + std::to_string(get_linearized_data_size()) + "\n" - - + " - uniform_input_type: " + (m_uniform_input_type? "true" : "false") + '\n'; - ret += '\n'; - return ret; - */ - return ""; -} - - -bool data_reader_jag_conduit_hdf5::check_sample_id(const size_t sample_id) const { - m_jag_store->check_sample_id(sample_id); - return true; -} - -cv::Mat data_reader_jag_conduit_hdf5::cast_to_cvMat(const std::pair img, const int height) { - const int num_pixels = static_cast(img.first); - const ch_t* ptr = img.second; - - // add a zero copying view to data - using InputBuf_T = cv_image_type; - const cv::Mat image(num_pixels, 1, InputBuf_T::T(1u), - reinterpret_cast(const_cast(ptr))); - // reshape the image. Furter need to clone (deep-copy) the image - // to preserve the constness of the original data - return (image.reshape(0, height)); -} - -std::vector data_reader_jag_conduit_hdf5::get_cv_images(const size_t sample_id, int tid) const { - const std::vector> &raw_images = m_jag_store->fetch_views(sample_id, tid); - std::vector< std::pair > img_ptrs(raw_images.size()); - size_t num_pixels = get_linearized_channel_size(); - for (size_t h=0; h images; - images.reserve(img_ptrs.size()); - - for (const auto& img: img_ptrs) { - images.emplace_back(cast_to_cvMat(img, m_image_height).clone()); - } - return images; -} - -std::vector -data_reader_jag_conduit_hdf5::create_datum_views(CPUMat& X, const std::vector& sizes, const int mb_idx) const { - std::vector X_v(sizes.size()); - El::Int h = 0; - - for(size_t i=0u; i < sizes.size(); ++i) { - const El::Int h_end = h + static_cast(sizes[i]); - El::View(X_v[i], X, El::IR(h, h_end), El::IR(mb_idx, mb_idx + 1)); - h = h_end; - } - return X_v; -} - -bool data_reader_jag_conduit_hdf5::fetch_response(CPUMat& X, int data_id, int mb_idx) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: not implemented"); - return true; -#if 0 - int tid = m_io_thread_pool->get_local_thread_id(); - std::vector sizes = get_linearized_response_sizes(); - std::vector X_v = create_datum_views(X, sizes, mb_idx); - bool ok = true; - for(size_t i = 0u; ok && (i < X_v.size()); ++i) { - ok = fetch(X_v[i], data_id, 0, tid, m_dependent[i], "response"); - } - return ok; -#endif -} - -bool data_reader_jag_conduit_hdf5::fetch_label(CPUMat& Y, int data_id, int mb_idx) { - if(m_gan_label_value) Y.Set(m_gan_label_value,mb_idx,1); //fake sample is set to 1; adversarial model - else { //fake sample (second half of minibatch is set to 0;discriminator model - //mb_idx < (m_mb_size/2) ? Y.Set(1,mb_idx,1) : Y.Set(m_gan_label_value,mb_idx,1); - mb_idx < (get_current_mini_batch_size()/2) ? Y.Set(1,mb_idx,1) : Y.Set(m_gan_label_value,mb_idx,1); - } - //Y.Set(m_gan_label_value, mb_idx, 1); - return true; -} - -void data_reader_jag_conduit_hdf5::setup_data_store(model *m) { - if (m_data_store != nullptr) { - //delete m_data_store; - } -/* - m_data_store = new data_store_jag_conduit(this, m); - if (m_data_store != nullptr) { - m_data_store->setup(); - } -*/ -} - -void data_reader_jag_conduit_hdf5::post_update() { - return; -} - -} // end of namespace lbann - -#undef _CN_ -#endif // #ifdef LBANN_HAS_CONDUIT From cfb8d3f77bc79a9d1e0c4911c635acda68e0a517 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 19 Feb 2019 14:34:08 -0800 Subject: [PATCH 074/443] Added a CMakeFile for the data store directory --- include/lbann/data_store/CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 include/lbann/data_store/CMakeLists.txt diff --git a/include/lbann/data_store/CMakeLists.txt b/include/lbann/data_store/CMakeLists.txt new file mode 100644 index 00000000000..cbf11c85530 --- /dev/null +++ b/include/lbann/data_store/CMakeLists.txt @@ -0,0 +1,8 @@ +# Add the headers for this directory +set_full_path(THIS_DIR_HEADERS + generic_data_store.hpp + data_store_jag.hpp + ) + +# Propagate the files up the tree +set(HEADERS "${HEADERS}" "${THIS_DIR_HEADERS}" PARENT_SCOPE) From 3fbf0e1e3669763588935066546a62c09e24de32 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 19 Feb 2019 16:25:49 -0800 Subject: [PATCH 075/443] Fixed a bug where the index used to compute the mapping in the data store of where each sample goes to which rank in the data exchange was incorrectly zero based. Added a sanity check to make sure that the sample being stored by a given rank is actually owned by that rank. --- src/data_readers/data_reader_jag_conduit.cpp | 12 ------------ src/data_store/data_store_jag.cpp | 16 +++++++++------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index eb04fb41899..acc55ec2717 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -25,14 +25,10 @@ // //////////////////////////////////////////////////////////////////////////////// -#ifndef _JAG_OFFLINE_TOOL_MODE_ #include "lbann/data_readers/data_reader_jag_conduit.hpp" #include "lbann/io/data_buffers/partitioned_io_buffer.hpp" #include "lbann/data_store/data_store_jag.hpp" #include "lbann/models/model.hpp" -#else -#include "data_reader_jag_conduit.hpp" -#endif // _JAG_OFFLINE_TOOL_MODE_ #ifdef LBANN_HAS_CONDUIT #include "lbann/utils/file_utils.hpp" // for add_delimiter() in load() @@ -104,7 +100,6 @@ const std::set data_reader_jag_conduit::non_numeric_vars = { "solver_mode" }; -#ifndef _JAG_OFFLINE_TOOL_MODE_ void data_reader_jag_conduit::set_io_buffer_type(const std::string io_buffer) { m_io_buffer_type = io_buffer; } @@ -149,7 +144,6 @@ int data_reader_jag_conduit::compute_max_num_parallel_readers() { bool data_reader_jag_conduit::check_num_parallel_readers(long data_set_size) { return true; } -#endif // _JAG_OFFLINE_TOOL_MODE_ data_reader_jag_conduit::data_reader_jag_conduit(const std::shared_ptr& pp, bool shuffle) : generic_data_reader(shuffle) { @@ -735,7 +729,6 @@ void data_reader_jag_conduit::check_input_keys() { } -#ifndef _JAG_OFFLINE_TOOL_MODE_ void data_reader_jag_conduit::load() { if(m_gan_labelling) { m_num_labels=2; @@ -815,7 +808,6 @@ void data_reader_jag_conduit::load_list_of_samples_from_archive(const std::strin std::cout << "Time to load sample list from archive: " << tm2 - tm1 << std::endl; } } -#endif // _JAG_OFFLINE_TOOL_MODE_ unsigned int data_reader_jag_conduit::get_num_img_srcs() const { return m_num_img_srcs; @@ -1435,7 +1427,6 @@ bool data_reader_jag_conduit::fetch_label(CPUMat& Y, int data_id, int mb_idx) { return true; } -#ifndef _JAG_OFFLINE_TOOL_MODE_ void data_reader_jag_conduit::setup_data_store(model *m) { if (m_data_store != nullptr) { delete m_data_store; @@ -1444,12 +1435,9 @@ void data_reader_jag_conduit::setup_data_store(model *m) { m_data_store = m_jag_store; // *generic_data_store m_data_store->setup(); } -#endif // _JAG_OFFLINE_TOOL_MODE_ void data_reader_jag_conduit::save_image(Mat& pixels, const std::string filename, bool do_scale) { -#ifndef _JAG_OFFLINE_TOOL_MODE_ internal_save_image(pixels, filename, m_image_height, m_image_width, 1, do_scale); -#endif // _JAG_OFFLINE_TOOL_MODE_ } void data_reader_jag_conduit::print_schema(const size_t sample_id) const { diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 62fc362f8b1..3c25c557804 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -196,6 +196,12 @@ void data_store_jag::set_conduit_node(int data_id, conduit::Node &node) { LBANN_ERROR("duplicate data_id: " + std::to_string(data_id) + " in data_store_jag::set_conduit_node"); } + if (m_owner[data_id] != m_rank) { + std::stringstream s; + s << "set_conduit_node error for data id: "<size(); i++) { auto index = (*m_shuffled_indices)[i]; - m_owner[index] = j; - j = (j + 1) % m_np; + m_owner[index] = i % m_np; } } From 8afc458cd963346e9cd1ea9f8d661ed75623843f Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Tue, 19 Feb 2019 16:26:59 -0800 Subject: [PATCH 076/443] rename lbann_comm_ptr to be more accurately descriptive --- include/lbann/base.hpp | 9 ++++++--- model_zoo/lbann.cpp | 4 ++-- model_zoo/lbann2.cpp | 4 ++-- model_zoo/lbann_aecycgan.cpp | 4 ++-- model_zoo/lbann_cycgan.cpp | 4 ++-- model_zoo/lbann_gan.cpp | 4 ++-- model_zoo/lbann_inf.cpp | 2 +- src/base.cpp | 4 ++-- tests/test_shuffled_indices.cpp | 4 ++-- 9 files changed, 21 insertions(+), 18 deletions(-) diff --git a/include/lbann/base.hpp b/include/lbann/base.hpp index 18d66e2c1ab..b7f9d940e3e 100644 --- a/include/lbann/base.hpp +++ b/include/lbann/base.hpp @@ -42,7 +42,10 @@ namespace lbann { // Forward-declaration. class lbann_comm; -using lbann_comm_ptr = std::unique_ptr; +// Note that this should only be used to wrap the thing coming out of +// initialize()! This will be removed when we have proper RAII around +// these things. +using world_comm_ptr = std::unique_ptr; /** Create LBANN communicator. * @@ -54,9 +57,9 @@ using lbann_comm_ptr = std::unique_ptr; * @param argc Command line arguments. * @param argv Number of command line arguments. * @param seed RNG seed. - * @return LBANN communicator. + * @return LBANN communicator corresponding to MPI_COMM_WORLD. */ -lbann_comm_ptr initialize(int& argc, char**& argv, int seed = -1); +world_comm_ptr initialize(int& argc, char**& argv, int seed = -1); /** Destroy LBANN communicator. * diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index ba54c2a8bfa..2048c771d70 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -37,8 +37,8 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); if (master) { std::cout << "\n\n==============================================================\n" diff --git a/model_zoo/lbann2.cpp b/model_zoo/lbann2.cpp index 1d9c281113c..b7a3a58de7d 100644 --- a/model_zoo/lbann2.cpp +++ b/model_zoo/lbann2.cpp @@ -35,8 +35,8 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); try { // Initialize options db (this parses the command line) diff --git a/model_zoo/lbann_aecycgan.cpp b/model_zoo/lbann_aecycgan.cpp index 80672bcc450..a9f035ac9cb 100644 --- a/model_zoo/lbann_aecycgan.cpp +++ b/model_zoo/lbann_aecycgan.cpp @@ -35,8 +35,8 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); try { // Initialize options db (this parses the command line) diff --git a/model_zoo/lbann_cycgan.cpp b/model_zoo/lbann_cycgan.cpp index 16f603648fe..449fa061041 100644 --- a/model_zoo/lbann_cycgan.cpp +++ b/model_zoo/lbann_cycgan.cpp @@ -35,8 +35,8 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); if (master) { std::cout << "\n\n==============================================================\n" diff --git a/model_zoo/lbann_gan.cpp b/model_zoo/lbann_gan.cpp index 45dc22bb417..7c36af1b848 100644 --- a/model_zoo/lbann_gan.cpp +++ b/model_zoo/lbann_gan.cpp @@ -35,8 +35,8 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); try { // Initialize options db (this parses the command line) diff --git a/model_zoo/lbann_inf.cpp b/model_zoo/lbann_inf.cpp index b26a3d22657..fdc7161b272 100644 --- a/model_zoo/lbann_inf.cpp +++ b/model_zoo/lbann_inf.cpp @@ -36,7 +36,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; auto comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + const bool master = comm->am_world_master(); try { // Initialize options db (this parses the command line) diff --git a/src/base.cpp b/src/base.cpp index d528036d02f..e4224a86b49 100644 --- a/src/base.cpp +++ b/src/base.cpp @@ -47,12 +47,12 @@ namespace lbann { -lbann_comm_ptr initialize(int& argc, char**& argv, int seed) { +world_comm_ptr initialize(int& argc, char**& argv, int seed) { // Initialize Elemental. El::Initialize(argc, argv); // Create a new comm object. // Initial creation with every process in one model. - auto comm = lbann_comm_ptr{new lbann_comm(0), &lbann::finalize }; + auto comm = world_comm_ptr{new lbann_comm(0), &lbann::finalize }; #if defined(LBANN_TOPO_AWARE) // Determine the number of NUMA nodes present. diff --git a/tests/test_shuffled_indices.cpp b/tests/test_shuffled_indices.cpp index 94a2b383b9b..662db6f4716 100644 --- a/tests/test_shuffled_indices.cpp +++ b/tests/test_shuffled_indices.cpp @@ -37,8 +37,8 @@ void test_is_shuffled(const generic_data_reader &reader, bool is_shuffled, const int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm_ptr comm = initialize(argc, argv, random_seed); - bool master = comm->am_world_master(); + world_comm_ptr comm = initialize(argc, argv, random_seed); + const bool master = comm->am_world_master(); try { // Initialize options db (this parses the command line) From 478617a731f14d959f2d385e6b7b7d5d31bbb765 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 19 Feb 2019 17:24:35 -0800 Subject: [PATCH 077/443] GPU implementation of RMSprop. --- include/lbann/optimizers/rmsprop.hpp | 27 +++++----- src/optimizers/CMakeLists.txt | 1 + src/optimizers/rmsprop.cpp | 32 +++--------- src/optimizers/rmsprop.cu | 75 ++++++++++++++++++++++++++++ 4 files changed, 95 insertions(+), 40 deletions(-) create mode 100644 src/optimizers/rmsprop.cu diff --git a/include/lbann/optimizers/rmsprop.hpp b/include/lbann/optimizers/rmsprop.hpp index 78364f357c6..e34b21de13d 100644 --- a/include/lbann/optimizers/rmsprop.hpp +++ b/include/lbann/optimizers/rmsprop.hpp @@ -24,8 +24,8 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZER_RMSPROP_HPP -#define LBANN_OPTIMIZER_RMSPROP_HPP +#ifndef LBANN_OPTIMIZERS_RMSPROP_HPP_INCLUDED +#define LBANN_OPTIMIZERS_RMSPROP_HPP_INCLUDED #include "lbann/optimizers/optimizer.hpp" #include @@ -34,24 +34,18 @@ namespace lbann { /** RMSprop optimizer. */ class rmsprop : public optimizer { - public: +public: - /** Constructor. */ rmsprop(lbann_comm *comm, DataType learning_rate, DataType decay_rate, DataType eps = DataType(1e-8)); - - /** Copy constructor. */ rmsprop(const rmsprop& other); - /** Copy assignment operator. */ rmsprop& operator=(const rmsprop& other); - /** Destructor. */ - ~rmsprop() override; - /** Create a copy. */ + ~rmsprop() override = default; rmsprop* copy() const override { return new rmsprop(*this); } - /** Get the optimizer name. */ + /** Human-readable type name. */ std::string get_type() const override { return "RMSprop"; } /** Human-readable description. */ description get_description() const override; @@ -61,16 +55,19 @@ class rmsprop : public optimizer { /** Perform the computation in an optimization step. */ void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; +#ifdef LBANN_HAS_CUDA + /** Perform the computation in an optimization step on GPU. */ + void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) override; +#endif // LBANN_HAS_CUDA - private: +private: /** Decay rate. */ DataType m_decay_rate; /** Small factor to avoid division by zero. */ DataType m_eps; /** RMSprop cache. */ - AbsDistMat *m_cache; - + std::unique_ptr m_cache; //************************************************************************ // Checkpointing @@ -108,4 +105,4 @@ class rmsprop : public optimizer { } // namespace lbann -#endif // LBANN_OPTIMIZER_RMSPROP_HPP +#endif // LBANN_OPTIMIZERS_RMSPROP_HPP_INCLUDED diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index d890a650f80..bb2d84ada91 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -13,6 +13,7 @@ if (LBANN_HAS_CUDA) set_full_path(THIS_DIR_CU_SOURCES adagrad.cu adam.cu + rmsprop.cu sgd.cu ) endif () diff --git a/src/optimizers/rmsprop.cpp b/src/optimizers/rmsprop.cpp index da7bb49fa87..7f3cf2e9c03 100644 --- a/src/optimizers/rmsprop.cpp +++ b/src/optimizers/rmsprop.cpp @@ -35,40 +35,22 @@ rmsprop::rmsprop(lbann_comm *comm, DataType eps) : optimizer(comm, learning_rate), m_decay_rate(decay_rate), - m_eps(eps), - m_cache(nullptr) {} + m_eps(eps) {} rmsprop::rmsprop(const rmsprop& other) : optimizer(other), m_decay_rate(other.m_decay_rate), m_eps(other.m_eps), - m_cache(other.m_cache) { - if (m_cache != nullptr) { m_cache = m_cache->Copy(); } -} + m_cache(other.m_cache ? other.m_cache->Copy() : nullptr) {} rmsprop& rmsprop::operator=(const rmsprop& other) { optimizer::operator=(other); m_decay_rate = other.m_decay_rate; m_eps = other.m_eps; - - // Copy cache matrix - if (m_cache != nullptr && other.m_cache != nullptr - && m_cache->DistData() == other.m_cache->DistData()) { - El::Copy(*other.m_cache, *m_cache); - } - else { - if (m_cache != nullptr) { delete m_cache; } - m_cache = other.m_cache; - if (m_cache != nullptr) { m_cache = m_cache->Copy(); } - } - + m_cache.reset(other.m_cache ? other.m_cache->Copy() : nullptr); return *this; } -rmsprop::~rmsprop() { - if (m_cache != nullptr) { delete m_cache; } -} - description rmsprop::get_description() const { auto&& desc = optimizer::get_description(); desc.add("Decay rate", m_decay_rate); @@ -78,8 +60,8 @@ description rmsprop::get_description() const { void rmsprop::setup(weights& w) { optimizer::setup(w); - m_cache = m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root()); + m_cache.reset(m_gradient->Construct(m_gradient->Grid(), + m_gradient->Root())); El::Zeros(*m_cache, m_gradient->Height(), m_gradient->Width()); } @@ -128,7 +110,7 @@ bool rmsprop::save_to_checkpoint_shared(persist& p, std::string name_prefix) { char l_name[512]; sprintf(l_name, "%s_optimizer_cache_%lldx%lld", name_prefix.c_str(), m_cache->Height(), m_cache->Width()); - p.write_distmat(persist_type::train, l_name, m_cache); + p.write_distmat(persist_type::train, l_name, m_cache.get()); return true; } @@ -138,7 +120,7 @@ bool rmsprop::load_from_checkpoint_shared(persist& p, std::string name_prefix) { char l_name[512]; sprintf(l_name, "%s_optimizer_cache_%lldx%lld.bin", name_prefix.c_str(), m_cache->Height(), m_cache->Width()); - p.read_distmat(persist_type::train, l_name, m_cache); + p.read_distmat(persist_type::train, l_name, m_cache.get()); return true; } diff --git a/src/optimizers/rmsprop.cu b/src/optimizers/rmsprop.cu new file mode 100644 index 00000000000..d65eaeb49b2 --- /dev/null +++ b/src/optimizers/rmsprop.cu @@ -0,0 +1,75 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#include "lbann/optimizers/rmsprop.hpp" +#include "lbann/utils/cuda.hpp" + +namespace lbann { + +namespace { + +__global__ void rmsprop_kernel(El::Int height, + El::Int width, + DataType learning_rate, + DataType decay_rate, + DataType eps, + DataType * __restrict__ values, + El::Int values_ldim, + const DataType * __restrict__ gradient, + El::Int gradient_ldim, + DataType * __restrict__ cache, + El::Int cache_ldim) { + const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int nthreads = gridDim.x * blockDim.x; + for (El::Int pos = gid; pos < height * width; pos += nthreads) { + const auto& row = pos % height; + const auto& col = pos / height; + const auto& g = gradient[row + col * gradient_ldim]; + auto& c = cache[row + col * cache_ldim]; + auto& x = values[row + col * values_ldim]; + c = decay_rate * c + (DataType(1) - decay_rate) * g * g; + x -= learning_rate * g / (cuda::sqrt(c) + eps); + } +} + +} // namespace + +void rmsprop::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { + const El::Int local_height = values.LocalHeight(); + const El::Int local_width = values.LocalWidth(); + const El::Int size = local_height * local_width; + constexpr El::Int block_dim = 256; + const El::Int grid_dim = (size + block_dim - 1) / block_dim; + if (grid_dim > 0) { + rmsprop_kernel<<>>( + local_height, local_width, m_learning_rate, m_decay_rate, m_eps, + values.Buffer(), values.LDim(), + gradient.LockedBuffer(), gradient.LDim(), + m_cache->Buffer(), m_cache->LDim()); + } +} + +} // namespace lbann From 872810c879ea5820d0fb55232608fe6996f2d901 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 19 Feb 2019 21:18:40 -0800 Subject: [PATCH 078/443] Fixed a bug in the data reader where ranks without any samples would not participate in the data store, which would lead to a hang. Now every rank will participate in the data store exchange. --- src/data_readers/data_reader.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/data_readers/data_reader.cpp b/src/data_readers/data_reader.cpp index 0345c0d89dc..117009db374 100644 --- a/src/data_readers/data_reader.cpp +++ b/src/data_readers/data_reader.cpp @@ -113,6 +113,13 @@ int lbann::generic_data_reader::fetch_data(CPUMat& X, El::Matrix& indic El::Zeros_seq(indices_fetched, mb_size, 1); } + /// Make sure that every rank participates in the data store prior + /// to seeing if the local rank's position is valid. Note that + /// every rank will hold data that may be used in the last mini-batch + if (data_store_active()) { + m_data_store->exchange_mini_batch_data(m_current_pos-m_base_offset-m_model_offset, loaded_batch_size); + } + if(!position_valid()) { if(position_is_overrun()) { return 0; @@ -123,10 +130,6 @@ int lbann::generic_data_reader::fetch_data(CPUMat& X, El::Matrix& indic } } - if (data_store_active()) { - m_data_store->exchange_mini_batch_data(m_current_pos-m_base_offset-m_model_offset, loaded_batch_size); - } - if (!m_save_minibatch_indices) { /// Allow each thread to perform any preprocessing necessary on the /// data source prior to fetching data From ad5273fe32ac128d43ecc88cefbf0e52cf25476c Mon Sep 17 00:00:00 2001 From: Naoya Maruyama Date: Tue, 19 Feb 2019 14:25:47 -0800 Subject: [PATCH 079/443] Optionally start profiling after one epoch. Add `skip_init: true` to the profiler callback and run nvprof with `--profile-with-start off`. This will skip profiling of the initialization phase including the first epoch, so the resulting profiles would be smaller and easier to visualize with nvvp. --- include/lbann/callbacks/profiler.hpp | 4 +++- include/lbann/utils/profiling.hpp | 2 ++ src/callbacks/profiler.cpp | 13 ++++++++++--- src/proto/factories/callback_factory.cpp | 3 ++- src/proto/lbann.proto | 1 + src/utils/profiling.cpp | 24 ++++++++++++++++++++---- 6 files changed, 38 insertions(+), 9 deletions(-) diff --git a/include/lbann/callbacks/profiler.hpp b/include/lbann/callbacks/profiler.hpp index 078b66ff04e..4454e19cf3e 100644 --- a/include/lbann/callbacks/profiler.hpp +++ b/include/lbann/callbacks/profiler.hpp @@ -37,7 +37,7 @@ namespace lbann { */ class lbann_callback_profiler : public lbann_callback { public: - lbann_callback_profiler(bool sync = false); + lbann_callback_profiler(bool sync = false, bool skip_init = false); lbann_callback_profiler(const lbann_callback_profiler&) = default; lbann_callback_profiler& operator=(const lbann_callback_profiler&) = default; lbann_callback_profiler* copy() const override { @@ -75,6 +75,8 @@ class lbann_callback_profiler : public lbann_callback { int get_color(Layer *l); /** Whether to synchronize the when setting up profile regions. */ bool m_sync; + /** Whether to skip initial iterations. */ + bool m_skip_init; }; } // namespace lbann diff --git a/include/lbann/utils/profiling.hpp b/include/lbann/utils/profiling.hpp index e023c527670..97684aab356 100644 --- a/include/lbann/utils/profiling.hpp +++ b/include/lbann/utils/profiling.hpp @@ -37,6 +37,8 @@ constexpr int prof_colors[num_prof_colors] = { 0x22AA99, 0xAAAA11, 0x6633CC, 0xE67300, 0x8B0707, 0x329262, 0x5574A6, 0x3B3EAC}; +void prof_start(); +void prof_stop(); void prof_region_begin(const char *s, int c, bool sync); void prof_region_end(const char *s, bool sync); diff --git a/src/callbacks/profiler.cpp b/src/callbacks/profiler.cpp index 17dd474d243..d12119a533c 100644 --- a/src/callbacks/profiler.cpp +++ b/src/callbacks/profiler.cpp @@ -38,14 +38,21 @@ namespace lbann { -lbann_callback_profiler::lbann_callback_profiler(bool sync) : - lbann_callback(), m_sync(sync) { +lbann_callback_profiler::lbann_callback_profiler(bool sync, bool skip_init) : + lbann_callback(), m_sync(sync), m_skip_init(skip_init) { #ifdef LBANN_NVPROF nvtxNameCudaStreamA(El::GPUManager::Stream(), "Hydrogen"); -#endif +#endif + if (!m_skip_init) { + prof_start(); + } } void lbann_callback_profiler::on_epoch_begin(model *m) { + // Skip the first epoch + if (m_skip_init && m->get_cur_epoch() == 1) { + prof_start(); + } prof_region_begin(("epoch " + std::to_string(m->get_cur_epoch())).c_str(), prof_colors[0], m_sync); } diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index 5e5d0c959de..bedb7c8c515 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -260,7 +260,8 @@ lbann_callback* construct_callback(lbann_comm* comm, params.mat_interval()); } if (proto_cb.has_profiler()) { - return new lbann_callback_profiler(proto_cb.profiler().sync()); + return new lbann_callback_profiler(proto_cb.profiler().sync(), + proto_cb.profiler().skip_init()); } if (proto_cb.has_sync_layers()) { const auto& params = proto_cb.sync_layers(); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 4d817d8f4c0..4d0a4ea1c8d 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -439,6 +439,7 @@ message CallbackPrint { message CallbackProfiler { bool sync = 1; + bool skip_init = 2; } message CallbackTimer { diff --git a/src/utils/profiling.cpp b/src/utils/profiling.cpp index 84c6c38cb75..e40e7b4f0cc 100644 --- a/src/utils/profiling.cpp +++ b/src/utils/profiling.cpp @@ -35,8 +35,14 @@ #include "nvToolsExtCuda.h" #include "nvToolsExtCudaRt.h" #include "cuda_runtime.h" +#include "cuda_profiler_api.h" +#include "lbann/utils/cuda.hpp" #endif +namespace { +bool profiling_started = false; +} + namespace lbann { #if defined(LBANN_SCOREP) @@ -49,23 +55,33 @@ void prof_region_end(const char *s, bool) { return; } #elif defined(LBANN_NVPROF) +void prof_start() { + CHECK_CUDA(cudaProfilerStart()); + profiling_started = true; +} +void prof_stop() { + CHECK_CUDA(cudaProfilerStop()); + profiling_started = false; +} void prof_region_begin(const char *s, int c, bool sync) { + if (!profiling_started) return; if (sync) { El::GPUManager::SynchronizeDevice(); } // Doesn't work with gcc 4.9 // nvtxEventAttributes_t ev = {0}; - nvtxEventAttributes_t ev; + nvtxEventAttributes_t ev; memset(&ev, 0, sizeof(nvtxEventAttributes_t)); - ev.version = NVTX_VERSION; + ev.version = NVTX_VERSION; ev.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; ev.colorType = NVTX_COLOR_ARGB; ev.color = c; - ev.messageType = NVTX_MESSAGE_TYPE_ASCII; - ev.message.ascii = s; + ev.messageType = NVTX_MESSAGE_TYPE_ASCII; + ev.message.ascii = s; nvtxRangePushEx(&ev); } void prof_region_end(const char *, bool sync) { + if (!profiling_started) return; if (sync) { El::GPUManager::SynchronizeDevice(); } From d9afd4c1a73f5d205df0baa93592bf071c137a89 Mon Sep 17 00:00:00 2001 From: Naoya Maruyama Date: Wed, 20 Feb 2019 09:25:32 -0800 Subject: [PATCH 080/443] Fix compilation error with the profiler. The error happens when nvprof is not enabled. It was introduced at #890. --- src/utils/profiling.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/utils/profiling.cpp b/src/utils/profiling.cpp index e40e7b4f0cc..17ec1e53ac8 100644 --- a/src/utils/profiling.cpp +++ b/src/utils/profiling.cpp @@ -46,6 +46,13 @@ bool profiling_started = false; namespace lbann { #if defined(LBANN_SCOREP) +void prof_start() { + profiling_started = true; + return; +} +void prof_stop() { + return; +} void prof_region_begin(const char *s, int, bool) { SCOREP_USER_REGION_BY_NAME_BEGIN(s, SCOREP_USER_REGION_TYPE_COMMON); return; @@ -88,6 +95,13 @@ void prof_region_end(const char *, bool sync) { nvtxRangePop(); } #else +void prof_start() { + profiling_started = true; + return; +} +void prof_stop() { + return; +} void prof_region_begin(const char *, int, bool) { return; } From 30d14c82cd1a39069f5a98a9a1398ae754ded19a Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 20 Feb 2019 09:51:26 -0800 Subject: [PATCH 081/443] clarify the deleter type with std::function instead of function pointer --- include/lbann/base.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/lbann/base.hpp b/include/lbann/base.hpp index b7f9d940e3e..63a57246c42 100644 --- a/include/lbann/base.hpp +++ b/include/lbann/base.hpp @@ -37,6 +37,8 @@ // Support for OpenMP macros #include "lbann/utils/omp_pragma.hpp" +#include + namespace lbann { // Forward-declaration. @@ -45,7 +47,8 @@ class lbann_comm; // Note that this should only be used to wrap the thing coming out of // initialize()! This will be removed when we have proper RAII around // these things. -using world_comm_ptr = std::unique_ptr; +using world_comm_ptr = + std::unique_ptr>; /** Create LBANN communicator. * From 40e0008c8678a21a4450d9116e21d51b389954b3 Mon Sep 17 00:00:00 2001 From: Naoya Maruyama Date: Wed, 20 Feb 2019 11:18:26 -0800 Subject: [PATCH 082/443] Remove a deprecated comment --- scripts/build_lbann_lc.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/build_lbann_lc.sh b/scripts/build_lbann_lc.sh index 9f27c12f07e..ba5a18c06d7 100755 --- a/scripts/build_lbann_lc.sh +++ b/scripts/build_lbann_lc.sh @@ -442,7 +442,6 @@ if [ "${BUILD_TYPE}" == "Release" ]; then CXX_FLAGS="${CXX_FLAGS} -mcpu=power8 -mtune=power8" Fortran_FLAGS="${Fortran_FLAGS} -mcpu=power8 -mtune=power8" elif [ "${CLUSTER}" == "sierra" -o "${CLUSTER}" == "lassen" ]; then - # no power9 option shown in the manual C_FLAGS="${C_FLAGS} -mcpu=power9 -mtune=power9" CXX_FLAGS="${CXX_FLAGS} -mcpu=power9 -mtune=power9" Fortran_FLAGS="${Fortran_FLAGS} -mcpu=power9 -mtune=power9" From 20d6374b05d371f9d48da764ece4f0ca5ac76db9 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Mon, 4 Feb 2019 16:21:06 -0800 Subject: [PATCH 083/443] Add end_lr field for polynomial weight decay --- include/lbann/callbacks/callback_learning_rate.hpp | 4 +++- src/callbacks/callback_learning_rate.cpp | 8 +++++--- src/proto/factories/callback_factory.cpp | 1 + src/proto/lbann.proto | 1 + 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/include/lbann/callbacks/callback_learning_rate.hpp b/include/lbann/callbacks/callback_learning_rate.hpp index a2bd0c7cc72..6d110659084 100644 --- a/include/lbann/callbacks/callback_learning_rate.hpp +++ b/include/lbann/callbacks/callback_learning_rate.hpp @@ -238,7 +238,7 @@ class lbann_callback_linear_growth_learning_rate : class lbann_callback_poly_learning_rate : public lbann_callback_learning_rate { public: lbann_callback_poly_learning_rate(double p, uint64_t n_epochs, uint64_t max_iter); - lbann_callback_poly_learning_rate(double p, uint64_t n_epochs, uint64_t max_iter, + lbann_callback_poly_learning_rate(double p, uint64_t n_epochs, uint64_t max_iter, double endl_r, std::unordered_set weights_list); lbann_callback_poly_learning_rate( const lbann_callback_poly_learning_rate&) = default; @@ -259,6 +259,8 @@ class lbann_callback_poly_learning_rate : public lbann_callback_learning_rate { uint64_t m_num_epochs; /// The maximum number of iterations until which the learning rate changes uint64_t m_max_iter; + /// The minimum learning rate + float m_end_lr; /// The current rate to scale the base learning rate float m_lr; /// The learning rate scale used at the end of the last epoch diff --git a/src/callbacks/callback_learning_rate.cpp b/src/callbacks/callback_learning_rate.cpp index 196dbd30f50..c87f5ba45ac 100644 --- a/src/callbacks/callback_learning_rate.cpp +++ b/src/callbacks/callback_learning_rate.cpp @@ -223,12 +223,14 @@ lbann_callback_poly_learning_rate::lbann_callback_poly_learning_rate( double p, uint64_t n_epochs, uint64_t max_iter) : lbann_callback_learning_rate(std::unordered_set()), m_p(p), m_num_epochs(n_epochs), m_max_iter(max_iter), + m_end_lr(0.0f), m_lr(1.0f), m_last_epoch_lr(1.0f) {} lbann_callback_poly_learning_rate::lbann_callback_poly_learning_rate( - double p, uint64_t n_epochs, uint64_t max_iter, std::unordered_set weights_list) + double p, uint64_t n_epochs, uint64_t max_iter, double end_lr, std::unordered_set weights_list) : lbann_callback_learning_rate(weights_list), m_p(p), m_num_epochs(n_epochs), m_max_iter(max_iter), + m_end_lr(end_lr), m_lr(1.0f), m_last_epoch_lr(1.0f) {} /** @@ -248,7 +250,7 @@ void lbann_callback_poly_learning_rate::setup(model *m) { float lbann_callback_poly_learning_rate::global_schedule(model *m) { const float scale = m_lr / m_last_epoch_lr; m_last_epoch_lr = m_lr; - return m_cur_global_lr * scale; + return (m_cur_global_lr - m_end_lr) * scale + m_end_lr; } /** @@ -260,7 +262,7 @@ float lbann_callback_poly_learning_rate::optimizer_schedule(model *m, optimizer m_lr = static_cast(std::pow(static_cast(m_max_iter - cur_iter)/m_max_iter, m_p)); } const float scale = m_lr / m_last_epoch_lr; - return m_cur_global_lr * scale; + return (m_cur_global_lr - m_end_lr) * scale + m_end_lr; } lbann_callback_optimizerwise_adaptive_learning_rate::lbann_callback_optimizerwise_adaptive_learning_rate( diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index bedb7c8c515..8582a1454e8 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -183,6 +183,7 @@ lbann_callback* construct_callback(lbann_comm* comm, return new lbann_callback_poly_learning_rate(params.power(), params.num_epochs(), params.max_iter(), + params.end_lr(), selected_weights); } diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 4d0a4ea1c8d..35059ccdc5b 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -526,6 +526,7 @@ message CallbackPolyLearningRate { double power = 2; uint64 num_epochs = 3; uint64 max_iter = 4; + double end_lr = 5; } message CallbackStepMinibatch { From 5ff28fa94372ac307430d45cb02c04e6d09c4795 Mon Sep 17 00:00:00 2001 From: Jae-Seung Yeom Date: Wed, 20 Feb 2019 11:29:34 -0800 Subject: [PATCH 084/443] allow using data without image, which is valid. --- src/data_readers/data_reader_jag_conduit.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index acc55ec2717..169c8b288f7 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -541,11 +541,9 @@ void data_reader_jag_conduit::check_image_data() { conduit::Node n_imageset; load_conduit_node(first_idx, m_output_image_prefix, n_imageset); if (static_cast(n_imageset.number_of_children()) == 0u) { - _THROW_LBANN_EXCEPTION_(_CN_, "check_image_data() : no image in data"); return; } if (m_emi_image_keys.size() == 0u) { - _THROW_LBANN_EXCEPTION_(_CN_, "check_image_data() : no image is selected"); return; } for (const auto& emi_tag: m_emi_image_keys) { From 93426d86cc87f034909d9d8396ae0a894f336f12 Mon Sep 17 00:00:00 2001 From: Jae-Seung Yeom Date: Wed, 20 Feb 2019 11:33:39 -0800 Subject: [PATCH 085/443] remove obsolete layer references in the prototext files that cause parsing errors --- .../model_cub_batchnorm_transferred_and_frozen.prototext | 1 - .../model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext | 1 - 2 files changed, 2 deletions(-) diff --git a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext index 27d8cbe6f96..1c8b5a4ffae 100644 --- a/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext +++ b/model_zoo/models/siamese/finetune-cub/model_cub_batchnorm_transferred_and_frozen.prototext @@ -966,7 +966,6 @@ model { layer { parents: "fc8_new" name: "prob_new" - children: "target_new" data_layout: "data_parallel" softmax {} } diff --git a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext index c1c959f41af..1a6fe127950 100644 --- a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext +++ b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext @@ -1552,7 +1552,6 @@ model { layer { parents: "fc9" name: "prob" - children: "target" data_layout: "data_parallel" softmax {} } From 8cedf97304f5064c5261c9122b106e88669b04c0 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 20 Feb 2019 13:13:22 -0800 Subject: [PATCH 086/443] push changes through jag utils --- model_zoo/jag_utils/build_index.cpp | 11 ++++------- model_zoo/jag_utils/build_sample_id_mapping.cpp | 6 ++---- .../jag_utils/check_for_duplicate_samples.cpp | 7 ++----- model_zoo/jag_utils/check_images.cpp | 5 +---- model_zoo/jag_utils/compute_min_max_images.cpp | 5 +---- .../compute_per_channel_image_avg_min_max.cpp | 5 +---- model_zoo/jag_utils/detect_corruption.cpp | 5 +---- model_zoo/jag_utils/dump_bundle.cpp | 4 +--- model_zoo/jag_utils/extract_random_samples.cpp | 8 ++------ model_zoo/jag_utils/load_balance.cpp | 4 +--- model_zoo/jag_utils/load_bundle2raw.cpp | 7 ++----- model_zoo/jag_utils/select_samples.cpp | 15 +++++++-------- model_zoo/jag_utils/test_conduit_hdf5.cpp | 3 +-- model_zoo/jag_utils/test_conduit_with_mpi.cpp | 4 +--- model_zoo/jag_utils/test_mpi.cpp | 4 +--- src/data_store/jag_converter.cpp | 12 ++++++------ 16 files changed, 34 insertions(+), 71 deletions(-) diff --git a/model_zoo/jag_utils/build_index.cpp b/model_zoo/jag_utils/build_index.cpp index a894e4dd81b..29660ae9c40 100644 --- a/model_zoo/jag_utils/build_index.cpp +++ b/model_zoo/jag_utils/build_index.cpp @@ -44,7 +44,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); if (master) { @@ -70,7 +70,6 @@ int main(int argc, char *argv[]) { "function: constructs an index that lists number of samples\n" " in each file, indices of invalid samples, etc\n"; } - finalize(comm); return EXIT_SUCCESS; } @@ -85,7 +84,7 @@ int main(int argc, char *argv[]) { int rank = comm->get_rank_in_world(); std::stringstream ss; ss << output_fn << "." << rank; - std::ofstream out(ss.str().c_str()); + std::ofstream out(ss.str()); std::cerr << rank << " :: opened for writing: " << ss.str() << "\n"; if (!out.good()) { throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + output_fn + " for writing"); @@ -96,7 +95,7 @@ int main(int argc, char *argv[]) { // get list of input filenames std::vector filenames; - read_filelist(comm, input_fn, filenames); + read_filelist(comm.get(), input_fn, filenames); int num_samples = 0; int num_samples_bad = 0; @@ -169,7 +168,7 @@ if (j >= 400) break; if (!out2) { LBANN_ERROR("failed to open output file"); } - out2 << "CONDUIT_HDF5_EXCLUSION\n" << global_num_samples << " " << global_num_samples_bad + out2 << "CONDUIT_HDF5_EXCLUSION\n" << global_num_samples << " " << global_num_samples_bad << " " << filenames.size() << "\n" << base_dir << "\n"; out2.close(); @@ -200,7 +199,6 @@ if (j >= 400) break; } // if (master) } catch (std::exception const &e) { - finalize(comm); if (master) std::cerr << "caught exception: " << e.what() << "\n"; return EXIT_FAILURE; } catch (...) { @@ -209,7 +207,6 @@ if (j >= 400) break; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/build_sample_id_mapping.cpp b/model_zoo/jag_utils/build_sample_id_mapping.cpp index 8feac894855..3814ef676d1 100644 --- a/model_zoo/jag_utils/build_sample_id_mapping.cpp +++ b/model_zoo/jag_utils/build_sample_id_mapping.cpp @@ -20,7 +20,7 @@ using namespace lbann; int main(int argc, char **argv) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); int rank, np; MPI_Comm_rank(MPI_COMM_WORLD, &rank); @@ -36,7 +36,6 @@ int main(int argc, char **argv) { << "assumes: the file '/index.txt' exists\n" << "output: writes the file /id_mapping.txt\n\n"; } - finalize(comm); return(0); } @@ -78,7 +77,7 @@ int main(int argc, char **argv) { out << filenames[j] << " "; ++q; if (q % 10 == 0) cout << rank << " :: " << q/10 << " *10 processed\n"; - const std::string f_name(base_dir + filenames[j]); + const std::string f_name(base_dir + filenames[j]); hid_t hdf5_file_hnd = conduit::relay::io::hdf5_open_file_for_read( f_name ); std::vector cnames; conduit::relay::io::hdf5_group_list_child_names(hdf5_file_hnd, "/", cnames); @@ -117,5 +116,4 @@ int main(int argc, char **argv) { } } - finalize(comm); } diff --git a/model_zoo/jag_utils/check_for_duplicate_samples.cpp b/model_zoo/jag_utils/check_for_duplicate_samples.cpp index 4317382a66f..beb0f18c5c9 100644 --- a/model_zoo/jag_utils/check_for_duplicate_samples.cpp +++ b/model_zoo/jag_utils/check_for_duplicate_samples.cpp @@ -47,7 +47,7 @@ void get_input_names(std::unordered_set &s); //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -71,7 +71,7 @@ int main(int argc, char *argv[]) { // read list of conduit filenames std::vector files; const std::string fn = opts->get_string("filelist"); - read_filelist(comm, fn, files); + read_filelist(comm.get(), fn, files); std::unordered_set input_names; get_input_names(input_names); @@ -141,16 +141,13 @@ int main(int argc, char *argv[]) { } } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/check_images.cpp b/model_zoo/jag_utils/check_images.cpp index a87b9931161..4f943dcc176 100644 --- a/model_zoo/jag_utils/check_images.cpp +++ b/model_zoo/jag_utils/check_images.cpp @@ -48,7 +48,7 @@ using namespace lbann; //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -127,16 +127,13 @@ int main(int argc, char *argv[]) { } } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } #endif //#ifdef LBANN_HAS_CONDUIT diff --git a/model_zoo/jag_utils/compute_min_max_images.cpp b/model_zoo/jag_utils/compute_min_max_images.cpp index 2bbac32057d..1d2a305793b 100644 --- a/model_zoo/jag_utils/compute_min_max_images.cpp +++ b/model_zoo/jag_utils/compute_min_max_images.cpp @@ -46,7 +46,7 @@ using namespace lbann; //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -227,16 +227,13 @@ std::cerr << rank << " :: opening for reading: " << files[j] << "\n"; } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/compute_per_channel_image_avg_min_max.cpp b/model_zoo/jag_utils/compute_per_channel_image_avg_min_max.cpp index 297e07955c0..d862b4824a5 100644 --- a/model_zoo/jag_utils/compute_per_channel_image_avg_min_max.cpp +++ b/model_zoo/jag_utils/compute_per_channel_image_avg_min_max.cpp @@ -46,7 +46,7 @@ using namespace lbann; //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -233,16 +233,13 @@ std::cerr << rank << " :: opening for reading: " << files[j] << "\n"; } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/detect_corruption.cpp b/model_zoo/jag_utils/detect_corruption.cpp index 65d5de77b8a..9751bf5fc71 100644 --- a/model_zoo/jag_utils/detect_corruption.cpp +++ b/model_zoo/jag_utils/detect_corruption.cpp @@ -47,7 +47,7 @@ void get_scalar_names(std::unordered_set &s); //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -192,16 +192,13 @@ int main(int argc, char *argv[]) { } } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/dump_bundle.cpp b/model_zoo/jag_utils/dump_bundle.cpp index c24f1e6804a..a16fc4ff7b0 100644 --- a/model_zoo/jag_utils/dump_bundle.cpp +++ b/model_zoo/jag_utils/dump_bundle.cpp @@ -43,7 +43,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); int np = comm->get_procs_in_world(); if (np != 1 || argc == 1) { @@ -52,14 +52,12 @@ int main(int argc, char *argv[]) { << "usage: " << argv[0] << " conduit_bundle_filename\n" << "function: dumps the conduit file to cout\n"; } - finalize(comm); } conduit::Node node; conduit::relay::io::load(argv[1], "hdf5", node); node.print(); - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/extract_random_samples.cpp b/model_zoo/jag_utils/extract_random_samples.cpp index b7fb5ecd35b..6664f67dc10 100644 --- a/model_zoo/jag_utils/extract_random_samples.cpp +++ b/model_zoo/jag_utils/extract_random_samples.cpp @@ -84,7 +84,7 @@ void print_sample_ids( //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -96,7 +96,6 @@ int main(int argc, char *argv[]) { if (master) { std::cout << usage(); } - finalize(comm); return EXIT_SUCCESS; } @@ -149,21 +148,18 @@ int main(int argc, char *argv[]) { build_sample_mapping(conduit_filenames, indices, samples); num_files = samples.size(); - extract_samples(comm, rank, np, conduit_filenames, samples); + extract_samples(comm.get(), rank, np, conduit_filenames, samples); } catch (exception& e) { std::cerr << "\n\n" << rank << " ::::: caught exception, outer try/catch: " << e.what() << "\n\n"; El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/load_balance.cpp b/model_zoo/jag_utils/load_balance.cpp index d84669e3f0d..1c0954eb154 100644 --- a/model_zoo/jag_utils/load_balance.cpp +++ b/model_zoo/jag_utils/load_balance.cpp @@ -44,7 +44,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -206,12 +206,10 @@ int main(int argc, char *argv[]) { } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/load_bundle2raw.cpp b/model_zoo/jag_utils/load_bundle2raw.cpp index f2e4b7ee880..19b61a77a11 100644 --- a/model_zoo/jag_utils/load_bundle2raw.cpp +++ b/model_zoo/jag_utils/load_bundle2raw.cpp @@ -51,7 +51,7 @@ void get_input_names(std::vector &s); //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int rank = comm->get_rank_in_world(); const int np = comm->get_procs_in_world(); @@ -79,7 +79,7 @@ int main(int argc, char *argv[]) { std::vector files; const std::string fn = opts->get_string("filelist"); - read_filelist(comm, fn, files); + read_filelist(comm.get(), fn, files); std::vector scalar_names; std::vector input_names; @@ -212,16 +212,13 @@ std::cerr << rank << " :: num samples: " << cnames.size() << "\n"; } catch (exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (std::exception const &e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } // Clean up - finalize(comm); return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/select_samples.cpp b/model_zoo/jag_utils/select_samples.cpp index b56f55e2ce0..527ab8be1b2 100644 --- a/model_zoo/jag_utils/select_samples.cpp +++ b/model_zoo/jag_utils/select_samples.cpp @@ -21,7 +21,7 @@ using namespace lbann; int main(int argc, char **argv) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); int rank, np; MPI_Comm_rank(MPI_COMM_WORLD, &rank); @@ -39,7 +39,7 @@ int main(int argc, char **argv) { std::stringstream err; // sanity check the cmd line - if (! (opts->has_string("index_fn") && opts->has_string("sample_mapping_fn") + if (! (opts->has_string("index_fn") && opts->has_string("sample_mapping_fn") && opts->has_int("num_samples") && opts->has_int("random_seed") && opts->has_string("output_fn"))) { if (master) { @@ -92,7 +92,7 @@ int main(int argc, char **argv) { //========================================================================== // master builds two maps: > maps a filename to the - // set of indices (not sample_ids; that comes later!) that are to be + // set of indices (not sample_ids; that comes later!) that are to be // included and excluded if (master) { @@ -114,7 +114,7 @@ int main(int argc, char **argv) { int num_valid, num_invalid, num_files; in >> num_valid >> num_invalid >> num_files; - getline(in, line); //discard newline + getline(in, line); //discard newline string base_dir; getline(in, base_dir); cerr << "input index file contains " << num_valid << " valid samples\n"; @@ -130,7 +130,7 @@ int main(int argc, char **argv) { break; } } - + // loop over each entry from in input index file; determine which, if any, // local indices will be added to the INCLUSION index int first = 0; @@ -281,10 +281,9 @@ int main(int argc, char **argv) { } } - out << total_good << " " << total_bad << " " << num_include_files + out << total_good << " " << total_bad << " " << num_include_files << "\n" << base_dir << "\n" << sout.str(); } - - finalize(comm); + return EXIT_SUCCESS; } diff --git a/model_zoo/jag_utils/test_conduit_hdf5.cpp b/model_zoo/jag_utils/test_conduit_hdf5.cpp index 1a9cba3175a..a35e23354ca 100644 --- a/model_zoo/jag_utils/test_conduit_hdf5.cpp +++ b/model_zoo/jag_utils/test_conduit_hdf5.cpp @@ -49,7 +49,7 @@ void get_image_names(std::unordered_set &s); //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); const int np = comm->get_procs_in_world(); @@ -119,7 +119,6 @@ int main(int argc, char *argv[]) { } } - finalize(comm); return 0; } diff --git a/model_zoo/jag_utils/test_conduit_with_mpi.cpp b/model_zoo/jag_utils/test_conduit_with_mpi.cpp index f8e181557fc..45d4f2ea14b 100644 --- a/model_zoo/jag_utils/test_conduit_with_mpi.cpp +++ b/model_zoo/jag_utils/test_conduit_with_mpi.cpp @@ -45,7 +45,7 @@ using namespace lbann; int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); int np = comm->get_procs_in_world(); @@ -80,7 +80,6 @@ int main(int argc, char *argv[]) { } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (...) { std::cerr << "unknown exception in main\n"; @@ -88,7 +87,6 @@ int main(int argc, char *argv[]) { } // Clean up - finalize(comm); return EXIT_SUCCESS; #endif //if 0 diff --git a/model_zoo/jag_utils/test_mpi.cpp b/model_zoo/jag_utils/test_mpi.cpp index e9edd7c45e4..ddb27168b87 100644 --- a/model_zoo/jag_utils/test_mpi.cpp +++ b/model_zoo/jag_utils/test_mpi.cpp @@ -47,7 +47,7 @@ int main(int argc, char *argv[]) { #if 0 int random_seed = lbann_default_random_seed; - lbann_comm *comm = initialize(argc, argv, random_seed); + world_comm_ptr comm = initialize(argc, argv, random_seed); bool master = comm->am_world_master(); int np = comm->get_procs_in_world(); @@ -82,7 +82,6 @@ int main(int argc, char *argv[]) { } catch (std::exception& e) { El::ReportException(e); - finalize(comm); return EXIT_FAILURE; } catch (...) { std::cerr << "unknown exception in main\n"; @@ -90,7 +89,6 @@ int main(int argc, char *argv[]) { } // Clean up - finalize(comm); return EXIT_SUCCESS; #endif //if 0 diff --git a/src/data_store/jag_converter.cpp b/src/data_store/jag_converter.cpp index 4f10daeb24e..a587d2e16ab 100644 --- a/src/data_store/jag_converter.cpp +++ b/src/data_store/jag_converter.cpp @@ -45,13 +45,13 @@ int main(int argc, char *argv[]) { exit(9); #else - lbann_comm *comm = initialize(argc, argv, 42); + world_comm_ptr comm = initialize(argc, argv, 42); std::cerr << "num ranks: " << comm->get_procs_in_world() << "\n"; try { options *opts = options::get(); opts->init(argc, argv); - + std::stringstream err; std::string bundle_fn; @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) { throw lbann_exception(err.str()); } mode = opts->get_string("mode"); - + if (!opts->has_string("bundle")) { err << __FILE__ << " " << __LINE__ << " :: " << "you must pass the option: --bundle=,\n" @@ -102,7 +102,7 @@ int main(int argc, char *argv[]) { } catch (lbann_exception& e) { e.print_report(); - } + } #endif //ifdef LBANN_HAS_CONDUIT @@ -146,7 +146,7 @@ void test(std::string bundle_fn, std::string dir) { size_t total = 0; //=========================================================================\n; - // test #1: + // test #1: // loop over all keys; test that what we get from the jag_io is identical // to what we get directly from the conduit node // @@ -218,7 +218,7 @@ void test(std::string bundle_fn, std::string dir) { << "sanity: " << sanity << " (should be same as total keys tested)\n"; //=========================================================================\n; - // test #2: + // test #2: // test, for each key, that type, num elts, num_bytes identical //=========================================================================\n; for (auto key : keys) { From 60d1952af640d194173f50a9c59395d69d295bd1 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 20 Feb 2019 15:38:15 -0800 Subject: [PATCH 087/443] Fixed a bug in in the sample list and JAG Conduit data reader so that all files do not have to be opened at load time. Instead, sample files are dynamically opened and the file descriptors are cached. Using a fixed sized unordered map to provide constant access time to the file descriptor cache and random selection criteria for FD closure on hash collision. --- .../data_readers/data_reader_jag_conduit.hpp | 15 ++--- .../lbann/data_readers/sample_list_jag.hpp | 55 +++++++++++++++-- .../data_readers/sample_list_jag_impl.hpp | 40 +++++++------ src/data_readers/data_reader_jag_conduit.cpp | 59 ++++++++++++++----- 4 files changed, 124 insertions(+), 45 deletions(-) diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index ea6219435f5..4684cb1721c 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -216,19 +216,19 @@ class data_reader_jag_conduit : public generic_data_reader { std::string get_description() const; /// Return the image simulation output of the i-th sample - std::vector get_cv_images(const size_t i, conduit::Node& sample) const; + std::vector get_cv_images(const size_t i, conduit::Node& sample); /** * Return the images of the i-th sample as an 1-D vector of lbann::DataType * There is one image per view, each of which is taken at closest to the bang time. */ - std::vector get_images(const size_t i, conduit::Node& sample) const; + std::vector get_images(const size_t i, conduit::Node& sample); /// Return the scalar simulation output data of the i-th sample - std::vector get_scalars(const size_t i, conduit::Node& sample) const; + std::vector get_scalars(const size_t i, conduit::Node& sample); /// Return the simulation input parameters of the i-th sample - std::vector get_inputs(const size_t i, conduit::Node& sample) const; + std::vector get_inputs(const size_t i, conduit::Node& sample); template static size_t add_val(const std::string key, const conduit::Node& n, std::vector& vals); @@ -247,7 +247,7 @@ class data_reader_jag_conduit : public generic_data_reader { static std::string to_string(const variable_t t); /// print the schema of the specific sample identified by a given id - void print_schema(const size_t i) const; + void print_schema(const size_t i); void clear_image_normalization_params(); void clear_scalar_normalization_params(); @@ -353,12 +353,13 @@ class data_reader_jag_conduit : public generic_data_reader { /** Load the conduit node with the data of the sample i identified by key * from the file that contains the sample. */ - bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const; + bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node); /// Check if a key exist for sample i bool has_conduit_path(const size_t i, const std::string& key) const; + void close_conduit_node(const size_t i); /// Obtain image data - std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample) const; + std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample); bool data_store_active() const { bool flag = generic_data_reader::data_store_active(); diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index a209002ff6a..9719a7a3046 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -12,10 +12,12 @@ #include #endif +#include "lbann/utils/file_utils.hpp" #include #include #include #include +#include "conduit/conduit_relay_io_hdf5.hpp" namespace lbann { @@ -73,7 +75,7 @@ class sample_list_jag { /// To describe a sample as a pair of the file to which it belongs and its name // using sample_t = std::pair; using sample_t = std::pair; - using sample_id_map_t = std::pair; + using sample_id_map_t = std::string; /// Type for the list of samples using samples_t = std::vector< sample_t >; using samples_id_map_v_t = std::vector< sample_id_map_t >; @@ -140,19 +142,57 @@ class sample_list_jag { const sample_t& operator[](size_t idx) const; const std::string& get_samples_filename(sample_id_t id) const { - return (m_sample_id_map[id]).first; + return m_sample_id_map[id]; + } + + const std::string& get_samples_dirname() const { + return m_header.get_file_dir(); } hid_t get_samples_hdf5_handle(sample_id_t id) const { - return (m_sample_id_map[id]).second; + const std::string& filename = m_sample_id_map[id]; + hid_t h = 0; + if(m_open_fd_map.count(filename) != 0) { + h = m_open_fd_map.at(filename); + } + return h; } void set_samples_filename(sample_id_t id, const std::string& filename) { - m_sample_id_map[id].first = filename; + m_sample_id_map[id] = filename; } void set_samples_hdf5_handle(sample_id_t id, hid_t h) { - m_sample_id_map[id].second = h; + const std::string& filename = m_sample_id_map[id]; + + int bucket = m_open_fd_map.bucket(filename); + if(m_open_fd_map.bucket_size(bucket) > 0) { + if(m_open_fd_map.bucket_size(bucket) != 1) { + LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " + + std::to_string(bucket)); + } + // std::cout << "I am adding a file handle for " << filename << " at bucket " << std::to_string(m_open_fd_map.bucket(filename)) << " and there are " << std::to_string(m_open_fd_map.bucket_size(m_open_fd_map.bucket(filename))) << " entries in the bucket." << std::endl; + // std::cout << "Inside of the bucket I have "; + for ( auto local_it = m_open_fd_map.begin(bucket); local_it!= m_open_fd_map.end(bucket); ++local_it ) { + // std::cout << " " << local_it->first << ":" << local_it->second; + const std::string& old_filename = local_it->first; + hid_t old_h = local_it->second; + if (old_h <= static_cast(0)) { + LBANN_ERROR(std::string{} + " :: data file '" + old_filename + + "' has a corrupt file descriptor = " + std::to_string(old_h)); + } + conduit::relay::io::hdf5_close_file(old_h); + int num_erased = m_open_fd_map.erase(old_filename); + if(num_erased != 1) { + LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename + + "' that had a file descriptor = " + std::to_string(old_h)); + } + } + // std::cout << std::endl; + } + + + m_open_fd_map.emplace(filename, h); } void all_gather_archive(const std::string &archive, std::vector& gathered_archive, lbann_comm& comm); @@ -202,7 +242,12 @@ class sample_list_jag { /// Maps a global index to a local index sample_list_indexer m_indexer; + /// Track the number of samples per file std::unordered_map m_file_map; + + /// Track the number of open file descriptors + std::unordered_map m_open_fd_map; + }; void handle_mpi_error(int ierr); diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index a5bde385c8d..923c649c60b 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -20,6 +20,8 @@ #include #include +#define LBANN_MAX_OPEN_DATA_FILES 100 + namespace lbann { inline sample_list_header::sample_list_header() @@ -282,7 +284,7 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str } sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(std::make_pair(filename, hdf5_file_hnd)); + m_sample_id_map.emplace_back(filename); size_t valid_sample_count = 0u; for(auto s : sample_names) { @@ -300,6 +302,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str + std::string(" samples, but found ") + std::to_string(valid_sample_count)); } + + conduit::relay::io::hdf5_close_file(hdf5_file_hnd); } if (m_header.get_num_files() != cnt_files) { @@ -369,7 +373,7 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str std::unordered_set set_of_samples(sample_names.begin(), sample_names.end()); sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(std::make_pair(filename, hdf5_file_hnd)); + m_sample_id_map.emplace_back(filename); size_t valid_sample_count = 0u; while(!sstr.eof()) { @@ -388,6 +392,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str + std::string(" samples, but found ") + std::to_string(valid_sample_count)); } + + conduit::relay::io::hdf5_close_file(hdf5_file_hnd); } if (m_header.get_num_files() != cnt_files) { @@ -517,15 +523,23 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { size_t num_files = all_gather_field(m_file_map, per_rank_file_map, comm); // Close the existing open files - for(auto f : m_sample_id_map) { + for(auto f : m_open_fd_map) { conduit::relay::io::hdf5_close_file(f.second); } m_sample_list.clear(); m_sample_id_map.clear(); + m_open_fd_map.clear(); + m_sample_list.reserve(num_samples); m_sample_id_map.reserve(num_ids); m_file_map.reserve(num_files); + /// Create an unordered map that will not rehash and has a fixed + /// number of buckets. This allows the sample list to easily select + /// a file descriptor for closing + m_open_fd_map.reserve(num_files); + m_open_fd_map.rehash(LBANN_MAX_OPEN_DATA_FILES); + m_open_fd_map.max_load_factor(std::numeric_limits::max()); for(int r = 0; r < num_ranks; r++) { const samples_t& sample_list = per_rank_samples[r]; @@ -533,28 +547,18 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { const std::unordered_map& file_map = per_rank_file_map[r]; for (const auto& s : sample_list) { sample_id_t index = s.first; - const std::string& filename = sample_id_map[index].first; + const std::string& filename = sample_id_map[index]; if(index >= m_sample_id_map.size() - || (m_sample_id_map.back()/*[m_sample_id_map.size()-1]*/.first != filename)) { + || (m_sample_id_map.back() != filename)) { index = m_sample_id_map.size(); - - // Open the file on this rank - const std::string conduit_file_path = add_delimiter(m_header.get_file_dir()) + filename; - if (filename.empty() || !check_if_file_exists(conduit_file_path)) { - LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' does not exist."); - } - hid_t hdf5_file_hnd = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); - if (hdf5_file_hnd <= static_cast(0)) { - LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); - } - m_sample_id_map.emplace_back(std::make_pair(filename, hdf5_file_hnd)); + m_sample_id_map.emplace_back(filename); // Update the file map structure if(m_file_map.count(filename) == 0) { m_file_map[filename] = file_map.at(filename); } }else { for(size_t i = 0; i < m_sample_id_map.size(); i++) { - if(filename == m_sample_id_map[i].first) { + if(filename == m_sample_id_map[i]) { index = i; break; } @@ -608,7 +612,7 @@ inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { std::map> tmp_file_map; for (const auto& s : m_sample_list) { - std::string filename = (m_sample_id_map[s.first]).first; + std::string filename = m_sample_id_map[s.first]; tmp_file_map[filename].emplace_back(s.second); } diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index acc55ec2717..c5602347934 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -308,7 +308,7 @@ const conduit::Node& data_reader_jag_conduit::get_conduit_node(const conduit::No return n_base[key]; } -bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const { +bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) { const sample_t& s = m_sample_list[i]; const std::string& sample_name = s.second; const std::string path = sample_name + key; @@ -316,8 +316,20 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin sample_id_t id = s.first; hid_t h = m_sample_list.get_samples_hdf5_handle(id); const std::string& file_name = m_sample_list.get_samples_filename(id); - if (h <= static_cast(0) || !conduit::relay::io::hdf5_has_path(h, path)) { - LBANN_ERROR(get_type() + ":: Cannot open file " + file_name + \ + if (h <= static_cast(0)) { + const std::string conduit_file_path = add_delimiter(m_sample_list.get_samples_dirname()) + file_name; + if (file_name.empty() || !check_if_file_exists(conduit_file_path)) { + LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' does not exist."); + } + h = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + if (h <= static_cast(0)) { + LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); + } + m_sample_list.set_samples_hdf5_handle(id, h); + } + + if (!conduit::relay::io::hdf5_has_path(h, path)) { + LBANN_ERROR(get_type() + ":: Cannot open HDF5 path in file " + file_name + \ " for sample "+ sample_name); return false; } @@ -327,6 +339,17 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin return true; } +void data_reader_jag_conduit::close_conduit_node(const size_t i) { + const sample_t& s = m_sample_list[i]; + + sample_id_t id = s.first; + hid_t h = m_sample_list.get_samples_hdf5_handle(id); + if (h > static_cast(0)) { + conduit::relay::io::hdf5_close_file(h); + m_sample_list.set_samples_hdf5_handle(id, 0); + } +} + bool data_reader_jag_conduit::has_conduit_path(const size_t i, const std::string& key) const { const sample_t& s = m_sample_list[i]; sample_id_t id = s.first; @@ -754,14 +777,10 @@ void data_reader_jag_conduit::load() { /// how index lists are used between trainers and models /// @todo m_list_per_trainer || m_list_per_model load_list_of_samples(sample_list_file, m_comm->get_procs_per_trainer(), m_comm->get_rank_in_trainer()); - m_sample_list.all_gather_packed_lists(*m_comm); - std::stringstream s; - std::string basename = get_basename_without_ext(sample_list_file); - std::string ext = get_ext_name(sample_list_file); - s << "r" << m_comm->get_rank_in_trainer() << "_per_rank_" << basename << "." << ext; - m_sample_list.write(s.str()); + /// Check the data that each rank loaded if (!m_is_data_loaded) { + std::cout << "Checking local data" << std::endl; m_is_data_loaded = true; if (m_scalar_keys.size() == 0u) { @@ -776,6 +795,15 @@ void data_reader_jag_conduit::load() { check_image_data(); } + + /// Merge all of the sample lists + m_sample_list.all_gather_packed_lists(*m_comm); + std::stringstream s; + std::string basename = get_basename_without_ext(sample_list_file); + std::string ext = get_ext_name(sample_list_file); + s << "r" << m_comm->get_rank_in_trainer() << "_per_rank_" << basename << "." << ext; + m_sample_list.write(s.str()); + m_shuffled_indices.resize(m_sample_list.size()); std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0); @@ -1072,7 +1100,7 @@ bool data_reader_jag_conduit::check_non_numeric(const std::string key) { std::vector< std::vector > -data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& sample) const { +data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& sample) { std::vector< std::vector > image_ptrs; image_ptrs.reserve(m_emi_image_keys.size()); @@ -1117,7 +1145,7 @@ void data_reader_jag_conduit::image_normalization(cv::Mat& img, size_t i, size_t img.convertTo(img, -1, tr.first, tr.second); } -std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_id, conduit::Node& sample) const { +std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_id, conduit::Node& sample) { const std::vector< std::vector > img_data(get_image_data(sample_id, sample)); std::vector images; @@ -1155,7 +1183,7 @@ std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_ return images; } -std::vector data_reader_jag_conduit::get_images(const size_t sample_id, conduit::Node& sample) const { +std::vector data_reader_jag_conduit::get_images(const size_t sample_id, conduit::Node& sample) { std::vector< std::vector > img_data(get_image_data(sample_id, sample)); std::vector images; @@ -1193,7 +1221,7 @@ std::vector data_reader_jag_conduit::get_images(c return images; } -std::vector data_reader_jag_conduit::get_scalars(const size_t sample_id, conduit::Node& sample) const { +std::vector data_reader_jag_conduit::get_scalars(const size_t sample_id, conduit::Node& sample) { std::vector scalars; scalars.reserve(m_scalar_keys.size()); @@ -1219,7 +1247,7 @@ std::vector data_reader_jag_conduit::get_scal return scalars; } -std::vector data_reader_jag_conduit::get_inputs(const size_t sample_id, conduit::Node& sample) const { +std::vector data_reader_jag_conduit::get_inputs(const size_t sample_id, conduit::Node& sample) { std::vector inputs; inputs.reserve(m_input_keys.size()); @@ -1391,6 +1419,7 @@ bool data_reader_jag_conduit::fetch_datum(CPUMat& X, int data_id, int mb_idx) { m_jag_store->set_conduit_node(data_id, node); } + // close_conduit_node(data_id); return ok; } @@ -1440,7 +1469,7 @@ void data_reader_jag_conduit::save_image(Mat& pixels, const std::string filename internal_save_image(pixels, filename, m_image_height, m_image_width, 1, do_scale); } -void data_reader_jag_conduit::print_schema(const size_t sample_id) const { +void data_reader_jag_conduit::print_schema(const size_t sample_id) { //@TODO revisit later -- don't know how to handle this yet if (m_data_store != nullptr) { return; From ce1a427ea2a6d57b00c3b73d410d28cd29c62e08 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 20 Feb 2019 15:44:06 -0800 Subject: [PATCH 088/443] Refactoring optimizer base class. Putting matrices in `unique_ptr`s. Removing gradient staging matrix. Using enum rather than bools to track status of gradient allreduce. Expanding documentation. --- .../layers/learning/base_convolution.hpp | 18 +- include/lbann/layers/transform/weights.hpp | 10 +- include/lbann/lbann.hpp | 6 +- include/lbann/optimizers/optimizer.hpp | 276 ++++++++------ src/callbacks/CMakeLists.txt | 4 +- src/layers/learning/fully_connected.cpp | 50 ++- .../regularizers/batch_normalization.cpp | 10 +- .../regularizers/batch_normalization.cu | 20 +- src/optimizers/CMakeLists.txt | 18 +- src/optimizers/optimizer.cpp | 358 ++++++++++-------- src/optimizers/rmsprop.cpp | 2 +- src/proto/factories/callback_factory.cpp | 4 + src/proto/factories/optimizer_factory.cpp | 2 + src/weights/weights.cpp | 6 +- 14 files changed, 447 insertions(+), 337 deletions(-) diff --git a/include/lbann/layers/learning/base_convolution.hpp b/include/lbann/layers/learning/base_convolution.hpp index 7d704a4e8e2..f8175301b16 100644 --- a/include/lbann/layers/learning/base_convolution.hpp +++ b/include/lbann/layers/learning/base_convolution.hpp @@ -631,8 +631,9 @@ class base_convolution_layer : public learning_layer { m_bias_cudnn_desc, m_bias_gradient.Buffer())); } - bias_optimizer->add_to_gradient_staging(m_bias_gradient, - m_bias_scaling_factor / effective_mini_batch_size); + bias_optimizer->add_to_gradient(m_bias_gradient, + m_bias_scaling_factor / effective_mini_batch_size, + true); } // Compute kernel gradient @@ -718,8 +719,9 @@ class base_convolution_layer : public learning_layer { } // Add gradient contribution - kernel_optimizer->add_to_gradient_staging(m_kernel_gradient, - one / effective_mini_batch_size); + kernel_optimizer->add_to_gradient(m_kernel_gradient, + one / effective_mini_batch_size, + true); } @@ -910,8 +912,7 @@ class base_convolution_layer : public learning_layer { local_bias_gradient(channel, 0) = m_bias_scaling_factor * sum; } const DataType bias_scale = m_bias_scaling_factor / effective_mini_batch_size; - bias_optimizer->add_to_gradient_staging(m_bias_gradient, - bias_scale); + bias_optimizer->add_to_gradient(m_bias_gradient, bias_scale, true); } // Stop early if kernel is not being optimized @@ -970,8 +971,9 @@ class base_convolution_layer : public learning_layer { // Scale and accumulate gradients const DataType kernel_scale = DataType(1) / effective_mini_batch_size; - kernel_optimizer->add_to_gradient_staging(m_kernel_gradient, - kernel_scale); + kernel_optimizer->add_to_gradient(m_kernel_gradient, + kernel_scale, + true); } diff --git a/include/lbann/layers/transform/weights.hpp b/include/lbann/layers/transform/weights.hpp index 1d9b63a7778..c834497e9f9 100644 --- a/include/lbann/layers/transform/weights.hpp +++ b/include/lbann/layers/transform/weights.hpp @@ -172,6 +172,8 @@ class weights_layer : public transform_layer { } void bp_compute() override { + constexpr DataType zero = 1; + constexpr DataType one = 1; // Get optimizer // Note: Nothing needs to be done if there is no optimizer @@ -181,14 +183,14 @@ class weights_layer : public transform_layer { // Matrices const auto& local_gradient_wrt_output = get_local_prev_error_signals(); m_workspace->Resize(local_gradient_wrt_output.Width(), 1); - El::Fill(*m_workspace, DataType(1)); + El::Fill(*m_workspace, one); // Compute gradient contribution and accumulate - const auto& scale = DataType(1) / this->m_model->get_effective_mini_batch_size(); + const auto& scale = one / this->m_model->get_effective_mini_batch_size(); El::Gemv(El::NORMAL, scale, local_gradient_wrt_output, *m_workspace, - DataType(0), m_gradient->Matrix()); - opt->add_to_gradient_staging(*m_gradient); + zero, m_gradient->Matrix()); + opt->add_to_gradient(*m_gradient, one, true); // Clean up m_workspace->Empty(); diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 8b3da4d1d83..ac687473e3c 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -146,7 +146,7 @@ #include "lbann/callbacks/callback_dump_gradients.hpp" #include "lbann/callbacks/callback_dump_minibatch_sample_indices.hpp" #include "lbann/callbacks/callback_early_stopping.hpp" -#include "lbann/callbacks/callback_ltfb.hpp" +// #include "lbann/callbacks/callback_ltfb.hpp" #include "lbann/callbacks/callback_save_images.hpp" #include "lbann/callbacks/callback_save_model.hpp" #include "lbann/callbacks/profiler.hpp" @@ -162,7 +162,7 @@ #include "lbann/callbacks/callback_confusion_matrix.hpp" #include "lbann/callbacks/callback_check_gradients.hpp" #include "lbann/callbacks/callback_check_metric.hpp" -#include "lbann/callbacks/callback_perturb_adam.hpp" +// #include "lbann/callbacks/callback_perturb_adam.hpp" /// Weights and weight initializers #include "lbann/weights/weights.hpp" @@ -170,11 +170,13 @@ #include "lbann/weights/variance_scaling_initializers.hpp" /// Optimizers +#if 0 #include "lbann/optimizers/adagrad.hpp" #include "lbann/optimizers/adam.hpp" #include "lbann/optimizers/hypergradient_adam.hpp" #include "lbann/optimizers/rmsprop.hpp" #include "lbann/optimizers/sgd.hpp" +#endif // 0 /// Objective functions #include "lbann/objective_functions/objective_function.hpp" diff --git a/include/lbann/optimizers/optimizer.hpp b/include/lbann/optimizers/optimizer.hpp index 14edb8fc467..ef9eb7cca79 100644 --- a/include/lbann/optimizers/optimizer.hpp +++ b/include/lbann/optimizers/optimizer.hpp @@ -24,183 +24,237 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZER_HPP -#define LBANN_OPTIMIZER_HPP +#ifndef LBANN_OPTIMIZERS_OPTIMIZER_HPP_INCLUDED +#define LBANN_OPTIMIZERS_OPTIMIZER_HPP_INCLUDED +#include +#include +#include #include "lbann/utils/compiler_control.hpp" #include "lbann/base.hpp" #include "lbann/comm.hpp" #include "lbann/utils/exception.hpp" #include "lbann/utils/description.hpp" #include "lbann/weights/weights.hpp" -#include -#include - #ifdef LBANN_HAS_GPU #include "lbann/utils/cuda.hpp" #endif // LBANN_HAS_GPU namespace lbann { +/** Status of values in objective function gradient. */ +enum class optimizer_gradient_status { + /** Values can be accessed immediately. */ + ready, + /** @brief Values have been cleared. + * @detailed Buffer must be zeroed out before accessing. + */ + cleared, + /** Allreduce is needed before accessing values. */ + allreduce_needed, + /** @brief Allreduce on values is in progress. + * @detailed Non-blocking allreduce must be synchronized before + * accessing. + */ + allreduce_started +}; + +/** Human-readable string for status of gradient in optimizer. */ +std::string to_string(optimizer_gradient_status status); + // Forward declarations class weights; class persist; -/** Abstract optimizer. */ +/** Abstract base class for gradient-based optimization algorithms. + * + * Uses a variant of stochastic gradient descent to optimize the + * values in a @c weights instance. The weights values are + * iteratively adjusted to minimize an objective function. Each + * optimization step requires the objective function gradient + * w.r.t. the weights. + */ class optimizer { - public: +public: - optimizer(lbann_comm* comm, DataType learning_rate = DataType(0)); + optimizer(lbann_comm* comm, DataType learning_rate = 0); optimizer(const optimizer& other); optimizer& operator=(const optimizer& other); - virtual ~optimizer(); + virtual ~optimizer() = default; + + /** Create a copy of the class instance. + * + * The caller is responsible for deallocating the returned object. + */ virtual optimizer* copy() const = 0; - /** Get the optimizer name. */ + /** Human-readable type name. */ virtual std::string get_type() const = 0; /** Human-readable description. */ virtual description get_description() const; - /** Whether the optimizer has been set up. */ - inline bool is_initialized() const { return m_weights != nullptr; } - - /** Get weights being optimized. */ + /** Weights being optimized. */ weights& get_weights(); - /** Set weights being optimized. */ - void set_weights(weights& w) { m_weights = &w; } - /** Get learning rate. */ - DataType get_learning_rate() const { return m_learning_rate; } - /** Set learning rate. */ - void set_learning_rate(DataType learning_rate) { - m_learning_rate = learning_rate; - }; - - /** Get gradient matrix. */ - const AbsDistMat& get_gradient(); - - /** Clear gradient matrix. */ - void clear_gradient(); - /** Add to the gradient matrix. */ - void add_to_gradient(const AbsDistMat& gradient, - DataType scale = DataType(1)); + /** Weights being optimized. */ + const weights& get_weights() const; + /** Weights being optimized. */ + void set_weights(weights* w) { m_weights = w; } - /** Add to the gradient staging matrix. - * When the gradient is needed, an allreduce is applied over the - * redundant communicator of the staging matrix and the result is - * added to the gradient. - */ - void add_to_gradient_staging(const AbsDistMat& gradient, - DataType scale = DataType(1)); - /** Start allreduce on the gradient staging matrix. - * If an allreduce is not needed or if it has already started, this - * function does nothing. This may call a non-blocking allreduce. + /** Objective function gradient w.r.t. the weights. + * + * An allreduce may be launched and/or synchronized if needed. */ - void start_gradient_staging_allreduce(); + AbsDistMat& get_gradient(); - /** Get number of gradient sources. - * This is the number of objects that contribute to the gradient - * but have not added their contributions yet. - */ - int get_num_gradient_sources() const { return m_gradient_sources.size(); } - /** Add a gradient source. - * Objects that depend on the weights being optimized and which - * contribute to the gradient should add themselves as a gradient - * source. + /** Add to the objective function gradient w.r.t. the weights. */ + void add_to_gradient(const AbsDistMat& gradient, + DataType scale = DataType(1), + bool allreduce_needed = false); + /** Zero out the objective function gradient w.r.t. the weights. */ + void clear_gradient(); + + /** Objects that are expected to contribute to the gradient. */ + El::Int get_num_gradient_sources() const; + /** Register a gradient source. + * + * Any object that uses the weights and influences the objective + * function is expected to contribute to the objective function + * gradient. These objects should register themselves during + * forward prop. */ void add_gradient_source(const void* source); - /** Remove a gradient source. - * Objects that contribute to the gradient should remove themselves - * as gradient sources when they add to the gradient. If there are - * no more gradient sources remaining, an allreduce is started on - * the gradient staging matrix. + /** Unregister a gradient source. + * + * When an object adds its contribution to the objective function + * gradient during back prop, it should unregister itself. If there + * are no more gradient sources remaining, a non-blocking allreduce + * will be launched on the gradient, if needed. */ void remove_gradient_source(const void* source); - /** Setup optimizer. */ - virtual void setup(weights& w); + /** Must be called before training. + * + * @param w Weights being optimized. If null, no change is made to + * the weights. + */ + virtual void setup(weights* w = nullptr); - /** Apply an optimization step. */ + /** Optimization step. */ void step(); - /** Perform the computation in an optimization step. - * It can be assumed that values and gradient are the same size and - * have the same matrix distribution. + + /** LBANN communicator. */ + lbann_comm& get_comm() { return *m_comm; } + /** LBANN communicator. */ + const lbann_comm& get_comm() const { return *m_comm; } + + /** Scaling factor for optimization step sizes. */ + DataType get_learning_rate() const; + /** Scaling factor for optimization step sizes. */ + void set_learning_rate(DataType learning_rate); + + /** Time spent in optimization step. */ + EvalType get_step_time() const { return m_step_time; } + /** Reset stats counters. */ + virtual void reset_counters() { m_step_time = 0; } + +protected: + + /** Computation for an optimization step on CPU. + * + * @c values and @gradient can be assumed to have the same + * distribution. */ virtual void step_compute(AbsDistMat& values, const AbsDistMat& gradient) = 0; -#ifdef LBANN_HAS_GPU - /** Perform the computation in an optimization step on GPU. - * The default implementation is to transfer data to CPU and call - * step_compute. +#ifdef LBANN_HAS_CUDA + /** Computation for an optimization step on GPU. + * + * The default implementation is to throw an exception. @c values + * and @gradient can be assumed to have the same distribution. */ virtual void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient); -#endif // LBANN_HAS_GPU - - /** Get the time spent in step(). */ - double get_step_time() const { return m_step_time; } - /** Reset stats counters. */ - virtual void reset_counters() { - m_step_time = 0.0; - } +#endif // LBANN_HAS_CUDA - protected: +private: /** LBANN communicator. */ - lbann_comm *m_comm; + lbann_comm* m_comm; /** Weights being optimized. */ - weights* m_weights; - - /** Learning rate. */ - DataType m_learning_rate; + weights* m_weights = nullptr; - /** Gradient matrix. */ - AbsDistMat* m_gradient; + /** Objective function gradient w.r.t. weights. */ + std::unique_ptr m_gradient; - private: + /** Workspace matrix. + * + * Helps ensure gradient contributions are in the right + * distribution. Most of the time, this should just be a matrix + * view. + */ + std::unique_ptr m_gradient_v; /** Sources of gradient contributions. - * This set contains pointers to objects (i.e. layers and objective - * function terms) which depend on the weights being optimized and - * which contribute to the gradient. Objects should add themselves - * to the set as they request the weights and they should remove - * themselves as they add their gradient contribution. Once this - * set is empty, it is safe to perform an allreduce on the gradient - * staging matrix. + * + * This set contains pointers to objects (e.g. layers and objective + * function terms) that contribute to the objective function + * gradient. Objects should register themselves as they use the + * weights during forward prop and unregister themselves as they + * add their gradient contributions. Once this set is empty, it is + * safe to launch a non-blocking allreduce on the gradient, if + * needed. */ std::unordered_set m_gradient_sources; - /** Gradient staging matrix. - * When the gradient is needed, an allreduce is applied over the - * redundant communicator of the staging matrix and the result is - * added to the gradient matrix. + /** Status of values in objective function gradient. */ + optimizer_gradient_status m_gradient_status = optimizer_gradient_status::cleared; + + /** Communication request object for gradient allreduce. + * + * Used to synchronize non-blocking allreduce. */ - AbsDistMat* m_gradient_staging; + Al::request m_gradient_allreduce_req; - /** Whether the gradient staging matrix requires an allreduce. */ - bool m_gradient_allreduce_needed; - /** Whether an allreduce on the gradient staging matrix has started. */ - bool m_gradient_allreduce_started; - /** Whether an allreduce on the gradient staging matrix has been finished. */ - bool m_gradient_allreduce_finished; + /** Scaling factor for optimization step sizes. + * + * This is not used by the base optimizer class, but is currently + * used by all derived optimizer classes. There are several cases + * where it is convenient to expose this in the base class, + * e.g. for variable learning rate schedules. + * @todo Consider moving this to the derived classes. + */ + DataType m_learning_rate; - /** Running count of the time spent in step(). */ - double m_step_time = 0.0; + /** Time spent in optimization step. */ + EvalType m_step_time = 0; - /** The request for non-blocking allreduces. */ - Al::request m_gradient_allreduce_req; + /** Launch non-blocking allreduce on the gradient, if needed. + * + * Does nothing if an allreduce is not needed or has already been + * started. + */ + void start_gradient_allreduce(); + + /** Synchronize non-blocking allreduce on the gradient, if needed. + * + * Does nothing if an allreduce isn't needed. Throws an exception + * if an allreduce is needed but hasn't been started. + */ + void finish_gradient_allreduce(); -//************************************************************************ -// Checkpointing -//************************************************************************ - public: +public: + + // =========================================== + // Checkpointing + // =========================================== virtual bool save_to_checkpoint_shared(persist& p, std::string m_name); virtual bool load_from_checkpoint_shared(persist& p, std::string m_name); - virtual bool save_to_checkpoint_distributed(persist& p, std::string m_name); virtual bool load_from_checkpoint_distributed(persist& p, std::string m_name); + }; } // namespace lbann -#endif // LBANN_OPTIMIZER_HPP +#endif // LBANN_OPTIMIZERS_OPTIMIZER_HPP_INCLUDED diff --git a/src/callbacks/CMakeLists.txt b/src/callbacks/CMakeLists.txt index 2b29975b44b..a841c3a357e 100644 --- a/src/callbacks/CMakeLists.txt +++ b/src/callbacks/CMakeLists.txt @@ -19,8 +19,8 @@ set_full_path(THIS_DIR_SOURCES callback_imcomm.cpp callback_io.cpp callback_learning_rate.cpp - callback_ltfb.cpp - callback_perturb_adam.cpp +# callback_ltfb.cpp +# callback_perturb_adam.cpp callback_print.cpp callback_save_images.cpp callback_save_model.cpp diff --git a/src/layers/learning/fully_connected.cpp b/src/layers/learning/fully_connected.cpp index e7728bcf478..c522bfae2b6 100644 --- a/src/layers/learning/fully_connected.cpp +++ b/src/layers/learning/fully_connected.cpp @@ -126,9 +126,10 @@ void fully_connected_layer::bp_com && bias_optimizer != nullptr) { El::RowSum(local_gradient_wrt_output, m_bias_gradient->Matrix()); - bias_optimizer->add_to_gradient_staging( + bias_optimizer->add_to_gradient( *m_bias_gradient, - m_bias_scaling_factor / mini_batch_size); + m_bias_scaling_factor / mini_batch_size, + true); } // Compute gradient w.r.t. linearity if needed @@ -145,9 +146,10 @@ void fully_connected_layer::bp_com DataType(1), local_gradient_wrt_output, local_input, DataType(0), m_linearity_gradient->Matrix()); } - linearity_optimizer->add_to_gradient_staging( + linearity_optimizer->add_to_gradient( *m_linearity_gradient, - DataType(1) / mini_batch_size); + DataType(1) / mini_batch_size, + true); } else { if (m_transpose) { El::Gemm(El::NORMAL, El::TRANSPOSE, @@ -227,9 +229,10 @@ void fully_connected_layer::bp_comp && bias_optimizer != nullptr) { El::RowSum(local_gradient_wrt_output, m_bias_gradient->Matrix()); - bias_optimizer->add_to_gradient_staging( + bias_optimizer->add_to_gradient( *m_bias_gradient, - m_bias_scaling_factor / mini_batch_size); + m_bias_scaling_factor / mini_batch_size, + true); } // Compute gradient w.r.t. linearity if needed @@ -237,16 +240,17 @@ void fully_connected_layer::bp_comp if (linearity_optimizer != nullptr) { if (m_transpose) { El::Gemm(El::NORMAL, El::TRANSPOSE, - DataType(1), local_input, local_gradient_wrt_output, + DataType(1), local_input, local_gradient_wrt_output, DataType(0), m_linearity_gradient->Matrix()); } else { El::Gemm(El::NORMAL, El::TRANSPOSE, DataType(1), local_gradient_wrt_output, local_input, DataType(0), m_linearity_gradient->Matrix()); } - linearity_optimizer->add_to_gradient_staging( + linearity_optimizer->add_to_gradient( *m_linearity_gradient, - DataType(1) / mini_batch_size); + DataType(1) / mini_batch_size, + true); } // Compute gradient w.r.t. input @@ -286,7 +290,7 @@ void fully_connected_layer::fp_comp m_bias_scaling_factor, local_bias, ones, DataType(1), local_output); } - + } /** GPU implementation of backward prop computation. */ @@ -320,9 +324,10 @@ void fully_connected_layer::bp_comp m_bias_scaling_factor, local_gradient_wrt_output, ones, DataType(0), m_bias_gradient->Matrix()); } - bias_optimizer->add_to_gradient_staging( + bias_optimizer->add_to_gradient( *m_bias_gradient, - m_bias_scaling_factor / mini_batch_size); + m_bias_scaling_factor / mini_batch_size, + true); } // Compute gradient w.r.t. linearity if needed @@ -330,16 +335,17 @@ void fully_connected_layer::bp_comp if (linearity_optimizer != nullptr) { if (m_transpose) { El::Gemm(El::NORMAL, El::TRANSPOSE, - DataType(1), local_input, local_gradient_wrt_output, + DataType(1), local_input, local_gradient_wrt_output, DataType(0), m_linearity_gradient->Matrix()); } else { El::Gemm(El::NORMAL, El::TRANSPOSE, DataType(1), local_gradient_wrt_output, local_input, DataType(0), m_linearity_gradient->Matrix()); } - linearity_optimizer->add_to_gradient_staging( + linearity_optimizer->add_to_gradient( *m_linearity_gradient, - DataType(1) / mini_batch_size); + DataType(1) / mini_batch_size, + true); } // Compute gradient w.r.t. input @@ -386,7 +392,7 @@ void fully_connected_layer::fp_com m_bias_scaling_factor, bias.LockedMatrix(), ones, DataType(1), output.Matrix()); } - + } template <> @@ -424,9 +430,10 @@ void fully_connected_layer::bp_com m_bias_scaling_factor, local_gradient_wrt_output, ones, DataType(0), m_bias_gradient->Matrix()); } - bias_optimizer->add_to_gradient_staging( + bias_optimizer->add_to_gradient( *m_bias_gradient, - m_bias_scaling_factor / mini_batch_size); + m_bias_scaling_factor / mini_batch_size, + true); } // Compute gradient w.r.t. linearity if needed @@ -443,9 +450,10 @@ void fully_connected_layer::bp_com DataType(1), local_gradient_wrt_output, local_input, DataType(0), m_linearity_gradient->Matrix()); } - linearity_optimizer->add_to_gradient_staging( + linearity_optimizer->add_to_gradient( *m_linearity_gradient, - DataType(1) / mini_batch_size); + DataType(1) / mini_batch_size, + true); } else { if (m_transpose) { El::Gemm(El::NORMAL, El::TRANSPOSE, @@ -475,7 +483,7 @@ void fully_connected_layer::bp_com DataType(1), linearity, gradient_wrt_output, DataType(0), gradient_wrt_input); } - + } #endif // LBANN_HAS_GPU diff --git a/src/layers/regularizers/batch_normalization.cpp b/src/layers/regularizers/batch_normalization.cpp index d98188c7f9f..f08e7d634e2 100644 --- a/src/layers/regularizers/batch_normalization.cpp +++ b/src/layers/regularizers/batch_normalization.cpp @@ -245,13 +245,15 @@ void batch_normalization_layer::bp_ } optimizer* scale_optimizer = m_weights[0]->get_optimizer(); if (scale_optimizer != nullptr) { - scale_optimizer->add_to_gradient_staging(*m_scale_gradient, - one / effective_mini_batch_size); + scale_optimizer->add_to_gradient(*m_scale_gradient, + one / effective_mini_batch_size, + true); } optimizer* bias_optimizer = m_weights[1]->get_optimizer(); if (bias_optimizer != nullptr) { - bias_optimizer->add_to_gradient_staging(*m_bias_gradient, - one / effective_mini_batch_size); + bias_optimizer->add_to_gradient(*m_bias_gradient, + one / effective_mini_batch_size, + true); } // Compute error signal diff --git a/src/layers/regularizers/batch_normalization.cu b/src/layers/regularizers/batch_normalization.cu index 48880160172..b44900e8372 100644 --- a/src/layers/regularizers/batch_normalization.cu +++ b/src/layers/regularizers/batch_normalization.cu @@ -292,7 +292,7 @@ __global__ void backprop2_kernel( } } // namespace - + template <> void batch_normalization_layer::fp_compute() { constexpr DataType one = 1; @@ -301,7 +301,7 @@ void batch_normalization_layer::fp_ // CUDA objects CHECK_CUDA(cudaSetDevice(El::GPUManager::Device())); auto&& stream = El::GPUManager::Stream(); - + // Matrices const auto& input = get_prev_activations(); const auto& local_input = input.LockedMatrix(); @@ -401,7 +401,7 @@ void batch_normalization_layer::fp_ local_scale.LockedBuffer(), local_bias.LockedBuffer(), local_output.Buffer(), local_output.LDim()); } - + } template <> @@ -484,13 +484,15 @@ void batch_normalization_layer::bp_ } optimizer* scale_optimizer = m_weights[0]->get_optimizer(); if (scale_optimizer != nullptr) { - scale_optimizer->add_to_gradient_staging(*m_scale_gradient, - one / effective_mini_batch_size); + scale_optimizer->add_to_gradient(*m_scale_gradient, + one / effective_mini_batch_size, + true); } optimizer* bias_optimizer = m_weights[1]->get_optimizer(); if (bias_optimizer != nullptr) { - bias_optimizer->add_to_gradient_staging(*m_bias_gradient, - one / effective_mini_batch_size); + bias_optimizer->add_to_gradient(*m_bias_gradient, + one / effective_mini_batch_size, + true); } // Compute error signal @@ -526,7 +528,7 @@ void batch_normalization_layer::bp_ local_mean_gradient.LockedBuffer(), local_var_gradient.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } - + } - + } // namespace lbann diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index bb2d84ada91..61a94de978f 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -1,20 +1,20 @@ # Add the source files for this directory set_full_path(THIS_DIR_SOURCES - adagrad.cpp - adam.cpp - hypergradient_adam.cpp +# adagrad.cpp +# adam.cpp +# hypergradient_adam.cpp optimizer.cpp - rmsprop.cpp - sgd.cpp +# rmsprop.cpp +# sgd.cpp ) if (LBANN_HAS_CUDA) # Add the CUDA source files for this directory set_full_path(THIS_DIR_CU_SOURCES - adagrad.cu - adam.cu - rmsprop.cu - sgd.cu +# adagrad.cu +# adam.cu +# rmsprop.cu +# sgd.cu ) endif () diff --git a/src/optimizers/optimizer.cpp b/src/optimizers/optimizer.cpp index 695b775b88b..c855e62c86f 100644 --- a/src/optimizers/optimizer.cpp +++ b/src/optimizers/optimizer.cpp @@ -29,65 +29,59 @@ namespace lbann { -optimizer::optimizer(lbann_comm *comm, DataType learning_rate) - : m_comm(comm), - m_weights(nullptr), - m_learning_rate(learning_rate), - m_gradient(nullptr), - m_gradient_staging(nullptr), - m_gradient_allreduce_needed(false), - m_gradient_allreduce_started(false), - m_gradient_allreduce_finished(false) {} +std::string to_string(optimizer_gradient_status status) { + switch (status) { + case optimizer_gradient_status::ready: + return "ready"; + case optimizer_gradient_status::cleared: + return "cleared"; + case optimizer_gradient_status::allreduce_needed: + return "allreduce needed"; + case optimizer_gradient_status::allreduce_started: + return "allreduce started"; + default: + return "unknown"; + } +} + +optimizer::optimizer(lbann_comm* comm, DataType learning_rate) + : m_comm(comm), m_learning_rate(learning_rate) { + if (m_comm == nullptr) { + LBANN_ERROR("got null pointer for lbann_comm"); + } +} optimizer::optimizer(const optimizer& other) : m_comm(other.m_comm), m_weights(other.m_weights), + m_gradient(other.m_gradient ? other.m_gradient->Copy() : nullptr), + m_gradient_v(other.m_gradient_v ? other.m_gradient_v->Copy() : nullptr), + m_gradient_sources(other.m_gradient_sources), + m_gradient_status(other.m_gradient_status), m_learning_rate(other.m_learning_rate), - m_gradient(other.m_gradient), - m_gradient_staging(other.m_gradient_staging), - m_gradient_allreduce_needed(other.m_gradient_allreduce_needed), - m_gradient_allreduce_started(other.m_gradient_allreduce_started), - m_gradient_allreduce_finished(other.m_gradient_allreduce_finished), - m_step_time(other.m_step_time) -{ - if (m_gradient != nullptr) { - m_gradient = m_gradient->Copy(); - } - if (m_gradient_staging != nullptr) { - m_gradient_staging = m_gradient_staging->Copy(); + m_step_time(other.m_step_time) { + if (m_gradient_status == optimizer_gradient_status::allreduce_started) { + LBANN_ERROR("attempted to copy optimizer while a " + "gradient allreduce is in progress"); } } optimizer& optimizer::operator=(const optimizer& other) { m_comm = other.m_comm; m_weights = other.m_weights; + m_gradient.reset(other.m_gradient ? other.m_gradient->Copy() : nullptr); + m_gradient_v.reset(other.m_gradient_v ? other.m_gradient_v->Copy() : nullptr); + m_gradient_sources = other.m_gradient_sources; + m_gradient_status = other.m_gradient_status; m_learning_rate = other.m_learning_rate; m_step_time = other.m_step_time; - m_gradient_allreduce_needed = other.m_gradient_allreduce_needed; - m_gradient_allreduce_started = other.m_gradient_allreduce_started; - m_gradient_allreduce_finished = other.m_gradient_allreduce_finished; - m_gradient_allreduce_started = other.m_gradient_allreduce_started; - - // Deep copy matrices - if (m_gradient != nullptr) { delete m_gradient; } - if (m_gradient_staging != nullptr) { delete m_gradient_staging; } - m_gradient = other.m_gradient; - m_gradient_staging = other.m_gradient_staging; - if (m_gradient != nullptr) { - m_gradient = m_gradient->Copy(); - } - if (m_gradient_staging != nullptr) { - m_gradient_staging = m_gradient_staging->Copy(); + if (m_gradient_status == optimizer_gradient_status::allreduce_started) { + LBANN_ERROR("attempted to copy optimizer while a " + "gradient allreduce is in progress"); } - return *this; } -optimizer::~optimizer() { - if (m_gradient != nullptr) { delete m_gradient; } - if (m_gradient_staging != nullptr) { delete m_gradient_staging; } -} - description optimizer::get_description() const { description desc(get_type() + " optimizer"); desc.add("Learning rate", m_learning_rate); @@ -95,120 +89,114 @@ description optimizer::get_description() const { } weights& optimizer::get_weights() { - if (!is_initialized()) { - LBANN_ERROR("attempted to access the weights being optimized before they are set"); + // Item 3, p. 23 in "Effective C++", 3rd ed., by Scott Meyers + return const_cast(static_cast(*this).get_weights()); +} + +const weights& optimizer::get_weights() const { + if (m_weights == nullptr) { + LBANN_ERROR("attempted to access the weights being optimized " + "before they are set"); } return *m_weights; } -const AbsDistMat& optimizer::get_gradient() { +AbsDistMat& optimizer::get_gradient() { - // Check if gradient is initialized - if (!is_initialized()) { - LBANN_ERROR("attempted to access gradients before they are set up"); + // Make sure gradient matrix has been setup + if (m_gradient == nullptr) { + LBANN_ERROR("attempted to access gradient before it is set up"); } - // Perform allreduce on staging matrix if needed - if (m_gradient_allreduce_needed && !m_gradient_allreduce_started) { - start_gradient_staging_allreduce(); - } - if (m_gradient_allreduce_started && !m_gradient_allreduce_finished) { - m_comm->wait(m_gradient_allreduce_req); - m_gradient_allreduce_finished = true; + // Make sure gradient values are ready + start_gradient_allreduce(); + finish_gradient_allreduce(); + if (m_gradient_status == optimizer_gradient_status::cleared) { + El::Zero(*m_gradient); + m_gradient_status = optimizer_gradient_status::ready; } - if (m_gradient_allreduce_needed) { - add_to_gradient(*m_gradient_staging); + if (m_gradient_status != optimizer_gradient_status::ready) { + LBANN_ERROR("expected gradient to be \"ready\", but its status is " + "\"" + to_string(m_gradient_status) + "\""); } - m_gradient_allreduce_needed = false; - m_gradient_allreduce_started = false; - m_gradient_allreduce_finished = false; + // Return gradient return *m_gradient; } -void optimizer::start_gradient_staging_allreduce() { - if (!m_gradient_allreduce_needed || m_gradient_allreduce_started) { - return; - } - - m_gradient_allreduce_started = true; - m_comm->nb_allreduce(*m_gradient_staging, - m_gradient_staging->RedundantComm(), - m_gradient_allreduce_req, - El::mpi::SUM); - m_gradient_allreduce_finished = false; -} - -void optimizer::clear_gradient() { - - // Clear matrices - El::Zero(*m_gradient); - - // Reset gradient allreduce flags - m_gradient_allreduce_needed = false; - m_gradient_allreduce_started = false; - m_gradient_allreduce_finished = false; - -} - void optimizer::add_to_gradient(const AbsDistMat& gradient, - DataType scale) { - if (!is_initialized()) { - LBANN_ERROR("attempted to access gradients before they are set up"); + DataType scale, + bool allreduce_needed) { + + // Check that matrices have been setup + if (m_gradient == nullptr || m_gradient_v == nullptr) { + LBANN_ERROR("attempted to access gradient before it is set up"); } if (scale == DataType(0)) { return; } - // Add to gradient - const auto dist_data = m_gradient->DistData(); - if (gradient.DistData() == dist_data) { - El::Axpy(scale, gradient, *m_gradient); + // Make a view or copy of input matrix in correct distribution + m_gradient_v->Empty(); + m_gradient_v->AlignWith(*m_gradient); + if (m_gradient_v->DistData() == gradient.DistData()) { + El::LockedView(*m_gradient_v, gradient); + } else if (allreduce_needed) { + std::unique_ptr temp(gradient.Copy()); + get_comm().allreduce(*temp, temp->RedundantComm()); + El::Copy(*temp, *m_gradient_v); + allreduce_needed = false; } else { - std::unique_ptr workspace(m_gradient->Construct(*dist_data.grid, - dist_data.root)); -#ifdef HYDROGEN_HAVE_CUB - if (workspace->GetLocalDevice() == El::Device::GPU) { - workspace->Matrix().SetMemoryMode(1); // CUB GPU memory pool - } -#endif // HYDROGEN_HAVE_CUB - El::Copy(gradient, *workspace); - El::Axpy(scale, *workspace, *m_gradient); + El::Copy(gradient, *m_gradient_v); } -} - -void optimizer::add_to_gradient_staging(const AbsDistMat& gradient, - DataType scale) { - if (!is_initialized()) { - LBANN_ERROR("attempted to access gradients before they are set up"); - } - if (m_gradient_allreduce_started) { - LBANN_ERROR("attempted to add to staging matrix after gradient accumulation has started"); + // Add to gradient + switch (m_gradient_status) { + case optimizer_gradient_status::ready: + if (allreduce_needed) { + El::Scale(DataType(1) / m_gradient->RedundantSize(), *m_gradient); + m_gradient_status = optimizer_gradient_status::allreduce_needed; + } + El::Axpy(scale, *m_gradient_v, *m_gradient); + break; + case optimizer_gradient_status::cleared: + El::Copy(*m_gradient_v, *m_gradient); + El::Scale(scale, *m_gradient); + m_gradient_status = (allreduce_needed ? + optimizer_gradient_status::allreduce_needed : + optimizer_gradient_status::ready); + break; + case optimizer_gradient_status::allreduce_needed: + { + const auto& scale_ = (allreduce_needed ? + scale : + scale / m_gradient->RedundantSize()); + El::Axpy(scale_, *m_gradient_v, *m_gradient); + } + break; + case optimizer_gradient_status::allreduce_started: + LBANN_ERROR("attempted to add to gradient " + "after a non-blocking allreduce has been launched"); + break; + default: + LBANN_ERROR("unexpected gradient status " + "(" + to_string(m_gradient_status) + ")"); } - if (scale == DataType(0)) { return; } - // Clear staging matrix if needed - if (!m_gradient_allreduce_needed) { - El::Zero(*m_gradient_staging); - } - m_gradient_allreduce_needed = true; + // Clean up + m_gradient_v->Empty(); - // Add to staging matrix - const auto dist_data = m_gradient_staging->DistData(); - if (gradient.DistData() == dist_data) { - El::Axpy(scale, gradient, *m_gradient_staging); - } else { - std::unique_ptr workspace(m_gradient_staging->Construct(*dist_data.grid, - dist_data.root)); -#ifdef HYDROGEN_HAVE_CUB - if (workspace->GetLocalDevice() == El::Device::GPU) { - workspace->Matrix().SetMemoryMode(1); // CUB GPU memory pool - } -#endif // HYDROGEN_HAVE_CUB - El::Copy(gradient, *workspace); - El::Axpy(scale, *workspace, *m_gradient_staging); +} + +void optimizer::clear_gradient() { + if (m_gradient_status == optimizer_gradient_status::allreduce_started) { + finish_gradient_allreduce(); } + m_gradient_status = optimizer_gradient_status::cleared; + m_gradient_sources.clear(); +} +El::Int optimizer::get_num_gradient_sources() const { + return m_gradient_sources.size(); } void optimizer::add_gradient_source(const void* source) { @@ -221,49 +209,49 @@ void optimizer::remove_gradient_source(const void* source) { m_gradient_sources.erase(nullptr); m_gradient_sources.erase(source); if (m_gradient_sources.empty()) { - start_gradient_staging_allreduce(); + start_gradient_allreduce(); } } -void optimizer::setup(weights& w) { - if (is_initialized()) { - LBANN_ERROR("attempted to setup an optimizer that is already set up"); +void optimizer::setup(weights* w) { + clear_gradient(); + + // Set weights being optimized + if (w != nullptr) { set_weights(w); } + if (m_weights == nullptr) { + LBANN_ERROR("attempted to setup optimizer without weights"); } - set_weights(w); // Initialize matrices - const int height = m_weights->get_matrix_height(); - const int width = m_weights->get_matrix_width(); + const auto& height = m_weights->get_matrix_height(); + const auto& width = m_weights->get_matrix_width(); const AbsDistMat& values = m_weights->get_values(); - - m_gradient = values.Construct(values.Grid(), values.Root()); - m_gradient_staging = values.Construct(values.Grid(), values.Root()); + m_gradient.reset(values.Construct(values.Grid(), values.Root())); m_gradient->Resize(height, width); - m_gradient_staging->Resize(height, width); - - // Initialize with zero gradient - clear_gradient(); + m_gradient_v.reset(values.Construct(values.Grid(), values.Root())); +#ifdef HYDROGEN_HAVE_CUB + if (m_gradient_v->GetLocalDevice() == El::Device::GPU) { + m_gradient_v->Matrix().SetMemoryMode(1); // CUB GPU memory pool + } +#endif // HYDROGEN_HAVE_CUB } void optimizer::step() { - if (!is_initialized()) { - LBANN_ERROR("optimizer must be set up before performing optimization step"); - } - - double step_start = get_time(); + const auto start_time = get_time(); - // Apply optimization step + // Get matrices + if (m_weights == nullptr) { + LBANN_ERROR("attempted to perform optimization step without weights"); + } auto& values = m_weights->get_values(); const auto& gradient = get_gradient(); + + // Apply optimization step switch (values.GetLocalDevice()) { - case El::Device::CPU: - step_compute(values, gradient); - break; + case El::Device::CPU: step_compute(values, gradient); break; #ifdef LBANN_HAS_GPU - case El::Device::GPU: - step_compute_gpu(values, gradient); - break; + case El::Device::GPU: step_compute_gpu(values, gradient); break; #endif // LBANN_HAS_GPU default: std::stringstream err; @@ -271,20 +259,63 @@ void optimizer::step() { LBANN_ERROR(err.str()); } - m_step_time += get_time() - step_start; - + m_step_time += get_time() - start_time; } #ifdef LBANN_HAS_GPU void optimizer::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { /// @todo Automatically use CPU implementation - LBANN_ERROR("no GPU implementation detected"); + LBANN_ERROR(get_type() + " optimizer has no GPU implementation"); } #endif // LBANN_HAS_GPU -//************************************************************************ +DataType optimizer::get_learning_rate() const { + return m_learning_rate; +} + +void optimizer::set_learning_rate(DataType learning_rate) { + m_learning_rate = learning_rate; +}; + +void optimizer::start_gradient_allreduce() { + switch (m_gradient_status) { + case optimizer_gradient_status::allreduce_needed: + get_comm().nb_allreduce(*m_gradient, + m_gradient->RedundantComm(), + m_gradient_allreduce_req); + m_gradient_status = optimizer_gradient_status::allreduce_started; + break; + case optimizer_gradient_status::ready: + case optimizer_gradient_status::cleared: + case optimizer_gradient_status::allreduce_started: + break; + default: LBANN_ERROR("unexpected gradient status " + "(" + to_string(m_gradient_status) + ")"); + } +} + +void optimizer::finish_gradient_allreduce() { + switch (m_gradient_status) { + case optimizer_gradient_status::allreduce_started: + get_comm().wait(m_gradient_allreduce_req); + m_gradient_status = optimizer_gradient_status::ready; + break; + case optimizer_gradient_status::ready: + case optimizer_gradient_status::cleared: + break; + case optimizer_gradient_status::allreduce_needed: + LBANN_ERROR("attempted to finish gradient allreduce " + "before starting it"); + break; + default: + LBANN_ERROR("unexpected gradient status " + "(" + to_string(m_gradient_status) + ")"); + } +} + +// ============================= // Checkpointing -//************************************************************************ +// ============================= bool optimizer::save_to_checkpoint_shared(persist& p, std::string m_name) { // m_learning_rate; @@ -294,7 +325,7 @@ bool optimizer::save_to_checkpoint_shared(persist& p, std::string m_name) { bool optimizer::load_from_checkpoint_shared(persist& p, std::string m_name) { p.read_datatype(persist_type::train, "learning_rate", &m_learning_rate); - m_comm->trainer_broadcast(0, m_learning_rate); + get_comm().trainer_broadcast(0, m_learning_rate); return true; } @@ -307,4 +338,5 @@ bool optimizer::load_from_checkpoint_distributed(persist& p, std::string m_name) p.read_datatype(persist_type::train, "learning_rate", &m_learning_rate); return true; } -} // namespace lbann + +} // namespace lbann diff --git a/src/optimizers/rmsprop.cpp b/src/optimizers/rmsprop.cpp index 7f3cf2e9c03..14057e0ae42 100644 --- a/src/optimizers/rmsprop.cpp +++ b/src/optimizers/rmsprop.cpp @@ -145,4 +145,4 @@ bool rmsprop::load_from_checkpoint_shared(persist& p, std::string name_prefix) { return true; } -} // namespace lbann +} // namespace lbann diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index 5e5d0c959de..cdd74128134 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -96,6 +96,7 @@ lbann_callback* construct_callback(lbann_comm* comm, // Inter-model communication ////////////////////////////////////////////////////////////// +#if 0 if (proto_cb.has_ltfb()) { const auto& params = proto_cb.ltfb(); return new lbann_callback_ltfb(params.batch_interval(), @@ -105,6 +106,7 @@ lbann_callback* construct_callback(lbann_comm* comm, lbann_callback_ltfb::string_to_comm_algo(params.communication_algorithm()), summarizer); } +#endif // 0 /// @todo if (proto_cb.has_imcomm()) { const auto& params = proto_cb.imcomm(); @@ -405,6 +407,7 @@ lbann_callback* construct_callback(lbann_comm* comm, ////////////////////////////////////////////////////////////// // Hyperparameter exploration ////////////////////////////////////////////////////////////// +#if 0 if (proto_cb.has_perturb_adam()) { const auto& params = proto_cb.perturb_adam(); return new lbann_callback_perturb_adam( @@ -416,6 +419,7 @@ lbann_callback* construct_callback(lbann_comm* comm, params.batch_interval(), parse_set(params.weights())); } +#endif // 0 return nullptr; } diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index 8ba69185168..479a6f7c053 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -32,6 +32,7 @@ namespace proto { optimizer* construct_optimizer(lbann_comm* comm, const lbann_data::Optimizer& proto_opt) { +#if 0 // Stochastic gradient descent if (proto_opt.has_sgd()) { const auto& params = proto_opt.sgd(); @@ -76,6 +77,7 @@ optimizer* construct_optimizer(lbann_comm* comm, params.beta2(), params.eps()); } +#endif // Return null pointer if no optimizer is specified return nullptr; diff --git a/src/weights/weights.cpp b/src/weights/weights.cpp index 171c92fcaec..d2414870122 100644 --- a/src/weights/weights.cpp +++ b/src/weights/weights.cpp @@ -95,7 +95,7 @@ weights::weights(const weights& other) m_optimizer.reset(other.m_optimizer ? other.m_optimizer->copy() : nullptr); if (m_optimizer != nullptr) { - m_optimizer->set_weights(*this); + m_optimizer->set_weights(this); } } @@ -115,7 +115,7 @@ weights& weights::operator=(const weights& other) { m_optimizer.reset(other.m_optimizer ? other.m_optimizer->copy() : nullptr); if (m_optimizer != nullptr) { - m_optimizer->set_weights(*this); + m_optimizer->set_weights(this); } return *this; @@ -288,7 +288,7 @@ void weights::setup() { // Setup optimizer if (m_optimizer != nullptr) { - m_optimizer->setup(*this); + m_optimizer->setup(this); } } From d299fe1281d9d3c8f1f5a9ba1fcb46f83a282fd9 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 20 Feb 2019 18:07:15 -0800 Subject: [PATCH 089/443] Refactoring Adam optimizer. Adapting to refactored optimizer base class. Optimized GPU kernel for contiguous data. --- include/lbann/lbann.hpp | 12 +-- include/lbann/optimizers/adam.hpp | 28 +++--- include/lbann/optimizers/optimizer.hpp | 11 +-- src/callbacks/CMakeLists.txt | 2 +- src/optimizers/CMakeLists.txt | 4 +- src/optimizers/adam.cpp | 114 ++++++++++++---------- src/optimizers/adam.cu | 113 ++++++++++++--------- src/optimizers/optimizer.cpp | 29 +----- src/proto/factories/callback_factory.cpp | 2 - src/proto/factories/optimizer_factory.cpp | 2 + 10 files changed, 163 insertions(+), 154 deletions(-) diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index ac687473e3c..1b9d2bd3057 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -162,7 +162,7 @@ #include "lbann/callbacks/callback_confusion_matrix.hpp" #include "lbann/callbacks/callback_check_gradients.hpp" #include "lbann/callbacks/callback_check_metric.hpp" -// #include "lbann/callbacks/callback_perturb_adam.hpp" +#include "lbann/callbacks/callback_perturb_adam.hpp" /// Weights and weight initializers #include "lbann/weights/weights.hpp" @@ -170,13 +170,11 @@ #include "lbann/weights/variance_scaling_initializers.hpp" /// Optimizers -#if 0 -#include "lbann/optimizers/adagrad.hpp" +// #include "lbann/optimizers/adagrad.hpp" #include "lbann/optimizers/adam.hpp" -#include "lbann/optimizers/hypergradient_adam.hpp" -#include "lbann/optimizers/rmsprop.hpp" -#include "lbann/optimizers/sgd.hpp" -#endif // 0 +// #include "lbann/optimizers/hypergradient_adam.hpp" +// #include "lbann/optimizers/rmsprop.hpp" +// #include "lbann/optimizers/sgd.hpp" /// Objective functions #include "lbann/objective_functions/objective_function.hpp" diff --git a/include/lbann/optimizers/adam.hpp b/include/lbann/optimizers/adam.hpp index e80e399dec6..6e4cfd63595 100644 --- a/include/lbann/optimizers/adam.hpp +++ b/include/lbann/optimizers/adam.hpp @@ -24,8 +24,8 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZERS_ADAM_HPP -#define LBANN_OPTIMIZERS_ADAM_HPP +#ifndef LBANN_OPTIMIZERS_ADAM_HPP_INCLUDED +#define LBANN_OPTIMIZERS_ADAM_HPP_INCLUDED #include "lbann/optimizers/optimizer.hpp" @@ -41,13 +41,11 @@ namespace lbann { class adam : public optimizer { public: - /** Constructor. */ - adam(lbann_comm *comm, + adam(lbann_comm* comm, DataType learning_rate, DataType beta1 = 0.9, DataType beta2 = 0.99, DataType eps = 1e-8); - adam(const adam& other); adam& operator=(const adam& other); ~adam() = default; @@ -67,16 +65,13 @@ class adam : public optimizer { /** Second moment estimates. */ AbsDistMat& get_moment2(); - void setup(weights& w) override; + void setup(weights* w = nullptr) override; + +protected: - /** Perform the computation in an optimization step. */ + /** Computation for an optimization step. */ void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; -#ifdef LBANN_HAS_CUDNN - /** Perform the computation in an optimization step on GPU. */ - void step_compute_gpu(AbsDistMat& values, - const AbsDistMat& gradient) override; -#endif // LBANN_HAS_CUDNN private: @@ -98,6 +93,13 @@ class adam : public optimizer { /** Hyperparameter exploration. */ friend class lbann_callback_perturb_adam; + /** CPU optimization step. */ + void step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient); +#ifdef LBANN_HAS_CUDA + /** GPU optimization step. */ + void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient); +#endif // LBANN_HAS_CUDA + // =========================================== // Checkpointing // =========================================== @@ -154,4 +156,4 @@ class adam : public optimizer { } // namespace lbann -#endif // LBANN_OPTIMIZERS_ADAM_HPP +#endif // LBANN_OPTIMIZERS_ADAM_HPP_INCLUDED diff --git a/include/lbann/optimizers/optimizer.hpp b/include/lbann/optimizers/optimizer.hpp index ef9eb7cca79..c131771beb3 100644 --- a/include/lbann/optimizers/optimizer.hpp +++ b/include/lbann/optimizers/optimizer.hpp @@ -159,22 +159,13 @@ class optimizer { protected: - /** Computation for an optimization step on CPU. + /** Computation for an optimization step. * * @c values and @gradient can be assumed to have the same * distribution. */ virtual void step_compute(AbsDistMat& values, const AbsDistMat& gradient) = 0; -#ifdef LBANN_HAS_CUDA - /** Computation for an optimization step on GPU. - * - * The default implementation is to throw an exception. @c values - * and @gradient can be assumed to have the same distribution. - */ - virtual void step_compute_gpu(AbsDistMat& values, - const AbsDistMat& gradient); -#endif // LBANN_HAS_CUDA private: diff --git a/src/callbacks/CMakeLists.txt b/src/callbacks/CMakeLists.txt index a841c3a357e..d7fa7c08db9 100644 --- a/src/callbacks/CMakeLists.txt +++ b/src/callbacks/CMakeLists.txt @@ -20,7 +20,7 @@ set_full_path(THIS_DIR_SOURCES callback_io.cpp callback_learning_rate.cpp # callback_ltfb.cpp -# callback_perturb_adam.cpp + callback_perturb_adam.cpp callback_print.cpp callback_save_images.cpp callback_save_model.cpp diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index 61a94de978f..cfded8d9c77 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -1,7 +1,7 @@ # Add the source files for this directory set_full_path(THIS_DIR_SOURCES # adagrad.cpp -# adam.cpp + adam.cpp # hypergradient_adam.cpp optimizer.cpp # rmsprop.cpp @@ -12,7 +12,7 @@ if (LBANN_HAS_CUDA) # Add the CUDA source files for this directory set_full_path(THIS_DIR_CU_SOURCES # adagrad.cu -# adam.cu + adam.cu # rmsprop.cu # sgd.cu ) diff --git a/src/optimizers/adam.cpp b/src/optimizers/adam.cpp index ba137ec9c81..92774b50eca 100644 --- a/src/optimizers/adam.cpp +++ b/src/optimizers/adam.cpp @@ -29,7 +29,7 @@ namespace lbann { -adam::adam(lbann_comm *comm, +adam::adam(lbann_comm* comm, DataType learning_rate, DataType beta1, DataType beta2, @@ -71,7 +71,7 @@ description adam::get_description() const { const AbsDistMat& adam::get_moment1() const { if (m_moment1 == nullptr) { - LBANN_ERROR(get_type() + " optimizer " + LBANN_ERROR(this->get_type() + " optimizer " + "attempted to access moment1 before it was setup"); } return *m_moment1; @@ -82,7 +82,7 @@ AbsDistMat& adam::get_moment1() { } const AbsDistMat& adam::get_moment2() const { if (m_moment2 == nullptr) { - LBANN_ERROR(get_type() + " optimizer " + LBANN_ERROR(this->get_type() + " optimizer " + "attempted to access moment2 before it was setup"); } return *m_moment2; @@ -92,7 +92,7 @@ AbsDistMat& adam::get_moment2() { return const_cast(static_cast(*this).get_moment2()); } -void adam::setup(weights& w) { +void adam::setup(weights* w) { optimizer::setup(w); const auto& gradient = this->get_gradient(); m_moment1.reset(AbsDistMat::Instantiate(gradient.DistData())); @@ -102,61 +102,75 @@ void adam::setup(weights& w) { } void adam::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { + switch (values.GetLocalDevice()) { + case El::Device::CPU: step_compute_cpu(values, gradient); break; +#ifdef LBANN_HAS_CUDA + case El::Device::GPU: step_compute_gpu(values, gradient); break; +#endif // LBANN_HAS_CUDA + default: + std::ostringstream err; + err << "unsupported device type " + << "(" << static_cast(values.GetLocalDevice()) << ")"; + LBANN_ERROR(err.str()); + } +} + +void adam::step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient) { + constexpr DataType one = 1; // Precompute the bias correction and learning rate. m_current_beta1 *= m_beta1; m_current_beta2 *= m_beta2; - const DataType correction = m_learning_rate * - (std::sqrt(DataType(1) - m_current_beta2) - / (DataType(1) - m_current_beta1)); + const DataType correction = this->get_learning_rate() * + (std::sqrt(one - m_current_beta2) + / (one - m_current_beta1)); // Get local matrix data - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - DataType* __restrict__ values_buffer = values.Buffer(); - const int values_ldim = values.LDim(); - const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - const int gradient_ldim = gradient.LDim(); - DataType* __restrict__ moment1_buffer = m_moment1->Buffer(); - const int moment1_ldim = m_moment1->LDim(); - DataType* __restrict__ moment2_buffer = m_moment2->Buffer(); - const int moment2_ldim = m_moment2->LDim(); - - // Check if matrix data is contiguous - if (values_ldim != local_height - || gradient_ldim != local_height - || moment1_ldim != local_height - || moment2_ldim != local_height) { + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + auto* __restrict__ values_buffer = values.Buffer(); + const auto* __restrict__ gradient_buffer = gradient.LockedBuffer(); + auto* __restrict__ moment1_buffer = m_moment1->Buffer(); + auto* __restrict__ moment2_buffer = m_moment2->Buffer(); + + if (values.Contiguous() && gradient.Contiguous() + && m_moment1->Contiguous() && m_moment2->Contiguous()) { + + // Update with contiguous data + const size_t local_size = local_height * local_width; + LBANN_OMP_PARALLEL_FOR + for (size_t i = 0; i < local_size; ++i) { + auto& x = values_buffer[i]; + const auto& g = gradient_buffer[i] + m_eps; // Avoid denormalized floats + auto& m1 = moment1_buffer[i]; + auto& m2 = moment2_buffer[i]; + m1 = m_beta1 * m1 + (one - m_beta1) * g; + m2 = m_beta2 * m2 + (one - m_beta2) * g * g; + x -= correction * m1 / (std::sqrt(m2) + m_eps); + } + + } else { + // Update with non-contiguous data + const size_t values_ldim = values.LDim(); + const size_t gradient_ldim = gradient.LDim(); + const size_t moment1_ldim = m_moment1->LDim(); + const size_t moment2_ldim = m_moment2->LDim(); LBANN_OMP_PARALLEL_FOR_COLLAPSE2 - for (int j=0; jam_trainer_master()) { + if (get_comm().am_trainer_master()) { pack_scalars(p); } @@ -183,11 +197,11 @@ bool adam::save_to_checkpoint_shared(persist& p, std::string name_prefix) { bool adam::load_from_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::load_from_checkpoint_shared(p, name_prefix); struct packing_header header; - if (m_comm->am_trainer_master()) { + if (get_comm().am_trainer_master()) { unpack_scalars(p, &header); } - m_comm->trainer_broadcast(0, header); + get_comm().trainer_broadcast(0, header); unpack_header(header); diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index a224ed026bc..a19bbb97385 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -30,65 +30,90 @@ namespace lbann { namespace { -__global__ void adam_kernel(int height, - int width, - DataType correction, - DataType eps, - DataType beta1, - DataType beta2, - DataType * __restrict__ values, - int values_ldim, - const DataType * __restrict__ gradient, - int gradient_ldim, - DataType * __restrict__ moment1, - int moment1_ldim, - DataType * __restrict__ moment2, - int moment2_ldim) { - const int tid = blockIdx.x * blockDim.x + threadIdx.x; - const int num_threads = gridDim.x * blockDim.x; - for (int pos = tid; pos < height * width; pos += num_threads) { - const auto& i = pos % height; - const auto& j = pos / height; - const auto& g = gradient[i + j * gradient_ldim] + eps; - auto& m1 = moment1[i + j * moment1_ldim]; - auto& m2 = moment2[i + j * moment2_ldim]; - auto& x = values[i + j * values_ldim]; - m1 = beta1 * m1 + (DataType(1) - beta1) * g; - m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; - x -= correction * m1 / (sqrt(m2) + eps); - } +__global__ void noncontiguous_kernel(size_t height, + size_t width, + DataType correction, + DataType eps, + DataType beta1, + DataType beta2, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ moment1, + size_t moment1_ldim, + DataType * __restrict__ moment2, + size_t moment2_ldim) { + const auto& gid = blockIdx.x * blockDim.x + threadIdx.x; + const auto& row = gid % height; + const auto& col = gid / height; + const auto& g = gradient[row + col * gradient_ldim] + eps; + auto& m1 = moment1[row + col * moment1_ldim]; + auto& m2 = moment2[row + col * moment2_ldim]; + auto& x = values[row + col * values_ldim]; + m1 = beta1 * m1 + (DataType(1) - beta1) * g; + m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; + x -= correction * m1 / (cuda::sqrt(m2) + eps); } +__global__ void contiguous_kernel(size_t size, + DataType correction, + DataType eps, + DataType beta1, + DataType beta2, + DataType * __restrict__ values, + const DataType * __restrict__ gradient, + DataType * __restrict__ moment1, + DataType * __restrict__ moment2) { + const auto& gid = threadIdx.x + blockIdx.x * blockDim.x; + const auto& g = gradient[gid] + eps; + auto& m1 = moment1[gid]; + auto& m2 = moment2[gid]; + auto& x = values[gid]; + m1 = beta1 * m1 + (DataType(1) - beta1) * g; + m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; + x -= correction * m1 / (cuda::sqrt(m2) + eps); } +} // namespace + void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { + constexpr DataType one = 1; // Precompute the bias correction and learning rate. m_current_beta1 *= m_beta1; m_current_beta2 *= m_beta2; - const DataType correction = m_learning_rate * - (std::sqrt(DataType(1) - m_current_beta2) - / (DataType(1) - m_current_beta1)); + const DataType correction = this->get_learning_rate() * + (std::sqrt(one - m_current_beta2) + / (one - m_current_beta1)); // Get matrix dimensions - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - const int size = local_height * local_width; - if (size <= 0) { return; } + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + const size_t local_size = local_height * local_width; + if (local_size <= 0) { return; } - // Launch CUDA kernels - const int block_size = 256; + // Launch CUDA kernel + constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; - grid_dims.x = (size + block_size - 1) / block_size; + grid_dims.x = (local_size + block_size - 1) / block_size; cudaStream_t stream = El::GPUManager::Stream(); - adam_kernel<<>> - (local_height, local_width, correction, m_eps, m_beta1, m_beta2, - values.Buffer(), values.LDim(), - gradient.LockedBuffer(), gradient.LDim(), - m_moment1->Buffer(), m_moment1->LDim(), - m_moment2->Buffer(), m_moment2->LDim()); + if (values.Contiguous() && gradient.Contiguous() + && m_moment1->Contiguous() && m_moment2->Contiguous()) { + contiguous_kernel<<>>( + local_size, correction, m_eps, m_beta1, m_beta2, + values.Buffer(), gradient.LockedBuffer(), + m_moment1->Buffer(), m_moment2->Buffer()); + } else { + noncontiguous_kernel<<>>( + local_height, local_width, correction, m_eps, m_beta1, m_beta2, + values.Buffer(), values.LDim(), + gradient.LockedBuffer(), gradient.LDim(), + m_moment1->Buffer(), m_moment1->LDim(), + m_moment2->Buffer(), m_moment2->LDim()); + } } -} // namespace lbann +} // namespace lbann diff --git a/src/optimizers/optimizer.cpp b/src/optimizers/optimizer.cpp index c855e62c86f..6046928c484 100644 --- a/src/optimizers/optimizer.cpp +++ b/src/optimizers/optimizer.cpp @@ -227,8 +227,10 @@ void optimizer::setup(weights* w) { const auto& width = m_weights->get_matrix_width(); const AbsDistMat& values = m_weights->get_values(); m_gradient.reset(values.Construct(values.Grid(), values.Root())); + m_gradient->AlignWith(values); m_gradient->Resize(height, width); m_gradient_v.reset(values.Construct(values.Grid(), values.Root())); + m_gradient_v->AlignWith(values); #ifdef HYDROGEN_HAVE_CUB if (m_gradient_v->GetLocalDevice() == El::Device::GPU) { m_gradient_v->Matrix().SetMemoryMode(1); // CUB GPU memory pool @@ -238,37 +240,14 @@ void optimizer::setup(weights* w) { } void optimizer::step() { - const auto start_time = get_time(); - - // Get matrices if (m_weights == nullptr) { LBANN_ERROR("attempted to perform optimization step without weights"); } - auto& values = m_weights->get_values(); - const auto& gradient = get_gradient(); - - // Apply optimization step - switch (values.GetLocalDevice()) { - case El::Device::CPU: step_compute(values, gradient); break; -#ifdef LBANN_HAS_GPU - case El::Device::GPU: step_compute_gpu(values, gradient); break; -#endif // LBANN_HAS_GPU - default: - std::stringstream err; - err << "invalid device (" << (int) values.GetLocalDevice() << ")"; - LBANN_ERROR(err.str()); - } - + const auto start_time = get_time(); + step_compute(m_weights->get_values(), get_gradient()); m_step_time += get_time() - start_time; } -#ifdef LBANN_HAS_GPU -void optimizer::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { - /// @todo Automatically use CPU implementation - LBANN_ERROR(get_type() + " optimizer has no GPU implementation"); -} -#endif // LBANN_HAS_GPU - DataType optimizer::get_learning_rate() const { return m_learning_rate; } diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index cdd74128134..639c11b1b80 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -407,7 +407,6 @@ lbann_callback* construct_callback(lbann_comm* comm, ////////////////////////////////////////////////////////////// // Hyperparameter exploration ////////////////////////////////////////////////////////////// -#if 0 if (proto_cb.has_perturb_adam()) { const auto& params = proto_cb.perturb_adam(); return new lbann_callback_perturb_adam( @@ -419,7 +418,6 @@ lbann_callback* construct_callback(lbann_comm* comm, params.batch_interval(), parse_set(params.weights())); } -#endif // 0 return nullptr; } diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index 479a6f7c053..aa4526371a7 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -56,6 +56,7 @@ optimizer* construct_optimizer(lbann_comm* comm, params.decay_rate(), params.eps()); } +#endif // Adam if (proto_opt.has_adam()) { @@ -67,6 +68,7 @@ optimizer* construct_optimizer(lbann_comm* comm, params.eps()); } +#if 0 // Hypergradient Adam if (proto_opt.has_hypergradient_adam()) { const auto& params = proto_opt.hypergradient_adam(); From 88dea213d999c93ef68e2536da07f2f4e67b588e Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 21 Feb 2019 07:42:03 -0800 Subject: [PATCH 090/443] Fixed a bug where removing an entry from the file descriptor map under a bucket iterator caused incorrect behavior. --- .../lbann/data_readers/sample_list_jag.hpp | 59 ++++++++++++------- .../data_readers/sample_list_jag_impl.hpp | 14 ++--- 2 files changed, 44 insertions(+), 29 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 9719a7a3046..c863391c588 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -165,34 +165,49 @@ class sample_list_jag { void set_samples_hdf5_handle(sample_id_t id, hid_t h) { const std::string& filename = m_sample_id_map[id]; + int bucket_count = m_open_fd_map.bucket_count(); int bucket = m_open_fd_map.bucket(filename); if(m_open_fd_map.bucket_size(bucket) > 0) { - if(m_open_fd_map.bucket_size(bucket) != 1) { - LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " - + std::to_string(bucket)); + // if(m_open_fd_map.bucket_size(bucket) != 1) { + // LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " + // + std::to_string(bucket)); + // } + auto local_it = m_open_fd_map.begin(bucket); + if(local_it == m_open_fd_map.end(bucket)) { + LBANN_ERROR(std::string{} + " :: bucket '" + std::to_string(bucket) + + "' has an empty iterator"); } - // std::cout << "I am adding a file handle for " << filename << " at bucket " << std::to_string(m_open_fd_map.bucket(filename)) << " and there are " << std::to_string(m_open_fd_map.bucket_size(m_open_fd_map.bucket(filename))) << " entries in the bucket." << std::endl; - // std::cout << "Inside of the bucket I have "; - for ( auto local_it = m_open_fd_map.begin(bucket); local_it!= m_open_fd_map.end(bucket); ++local_it ) { - // std::cout << " " << local_it->first << ":" << local_it->second; - const std::string& old_filename = local_it->first; - hid_t old_h = local_it->second; - if (old_h <= static_cast(0)) { - LBANN_ERROR(std::string{} + " :: data file '" + old_filename - + "' has a corrupt file descriptor = " + std::to_string(old_h)); - } - conduit::relay::io::hdf5_close_file(old_h); - int num_erased = m_open_fd_map.erase(old_filename); - if(num_erased != 1) { - LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename - + "' that had a file descriptor = " + std::to_string(old_h)); - } + const std::string& old_filename = local_it->first; + hid_t old_h = local_it->second; + if (old_h <= static_cast(0)) { + LBANN_ERROR(std::string{} + " :: data file '" + old_filename + + "' has a corrupt file descriptor = " + std::to_string(old_h)); + } + + conduit::relay::io::hdf5_close_file(old_h); + int num_erased = m_open_fd_map.erase(old_filename); + if(num_erased != 1) { + LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename + + "' that had a file descriptor = " + std::to_string(old_h)); } - // std::cout << std::endl; } - m_open_fd_map.emplace(filename, h); + auto result = m_open_fd_map.emplace(filename, h); + int bucket2 = m_open_fd_map.bucket(filename); + int bucket_count2 = m_open_fd_map.bucket_count(); + if(!result.second) { + LBANN_WARNING(std::string{} + " :: The key for " + filename + " already existed"); + } + if(bucket2 != bucket) { + LBANN_ERROR(std::string{} + " :: the buckets don't match original bucket " + + std::to_string(bucket) + " with a count of " + std::to_string(bucket_count) + " and new bucket " + std::to_string(bucket2) + " and a new count of " + std::to_string(bucket_count2)); + } + if(m_open_fd_map.bucket_size(bucket) != 1) { + LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " + + std::to_string(bucket) + " not " + + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); + } } void all_gather_archive(const std::string &archive, std::vector& gathered_archive, lbann_comm& comm); @@ -225,7 +240,7 @@ class sample_list_jag { /// Add the header info to the given string void write_header(std::string& sstr, size_t num_files) const; - protected: + private: /// The number of partitions to divide samples into size_t m_num_partitions; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index 923c649c60b..642e4a65ccc 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -20,7 +20,7 @@ #include #include -#define LBANN_MAX_OPEN_DATA_FILES 100 +#define LBANN_MAX_OPEN_DATA_FILES 768 namespace lbann { @@ -78,6 +78,12 @@ inline size_t sample_list_indexer::get_partition_offset() const { inline sample_list_jag::sample_list_jag() : m_num_partitions(1u) { + /// Create an unordered map that will not rehash and has a fixed + /// number of buckets. This allows the sample list to easily select + /// a file descriptor for closing + m_open_fd_map.reserve(LBANN_MAX_OPEN_DATA_FILES); + m_open_fd_map.rehash(LBANN_MAX_OPEN_DATA_FILES); + m_open_fd_map.max_load_factor(std::numeric_limits::max()); } inline void sample_list_jag::set_num_partitions(size_t n) { @@ -534,12 +540,6 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { m_sample_list.reserve(num_samples); m_sample_id_map.reserve(num_ids); m_file_map.reserve(num_files); - /// Create an unordered map that will not rehash and has a fixed - /// number of buckets. This allows the sample list to easily select - /// a file descriptor for closing - m_open_fd_map.reserve(num_files); - m_open_fd_map.rehash(LBANN_MAX_OPEN_DATA_FILES); - m_open_fd_map.max_load_factor(std::numeric_limits::max()); for(int r = 0; r < num_ranks; r++) { const samples_t& sample_list = per_rank_samples[r]; From 0fb2ccdb0e51a593d94dca772cbf1dbb5cce1810 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 21 Feb 2019 10:10:52 -0800 Subject: [PATCH 091/443] Revert the changes around merge_skip_overlapped --- src/proto/lbann.proto | 1 - src/proto/proto_common.cpp | 7 ------- 2 files changed, 8 deletions(-) diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index eea4ce07e80..1ceba293ef6 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -56,7 +56,6 @@ message Reader { int64 max_neighborhood = 113; // pilot2_molecular_reader int32 num_image_srcs = 114; // data_reader_multi_images float scaling_factor_int16 = 116; // for numpy_npz_reader with int16 data - bool merge_skip_overlapped = 117; // for data_reader_merge_samples int32 max_files_to_load = 1000; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 501d2d065a2..d6ee41f077e 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -204,16 +204,9 @@ void init_data_readers(lbann_comm *comm, const lbann_data::LbannPB& p, std::map< filedir = filedir + "/"; } auto paths = glob(filedir + readme.data_file_pattern()); - if(readme.merge_skip_overlapped()) { - assert((paths.size()%comm->get_num_trainers()) == 0); - } std::vector npy_readers; for(auto i = paths.begin(); i != paths.end(); i++) { const auto path = *i; - if(readme.merge_skip_overlapped() - && (std::distance(paths.begin(), i)%comm->get_procs_per_trainer()) != comm->get_rank_in_trainer()) { - continue; - } if(master) { std::cout << "Loading file: " << path << std::endl; } if (readme.format() == "numpy") { auto *reader_numpy = new numpy_reader(false); From 437be863bb96d09b62842c9fcf3ae3285cc0367a Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 14:53:33 -0800 Subject: [PATCH 092/443] Tweak so optimizer can accept L2 regularization gradient. L2 regularization submits gradient at end of backprop. --- include/lbann/layers/transform/weights.hpp | 2 +- include/lbann/optimizers/adam.hpp | 4 ++-- src/optimizers/adam.cu | 8 +++----- src/optimizers/optimizer.cpp | 13 ++++++++----- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/include/lbann/layers/transform/weights.hpp b/include/lbann/layers/transform/weights.hpp index c834497e9f9..8ffb89dd892 100644 --- a/include/lbann/layers/transform/weights.hpp +++ b/include/lbann/layers/transform/weights.hpp @@ -172,7 +172,7 @@ class weights_layer : public transform_layer { } void bp_compute() override { - constexpr DataType zero = 1; + constexpr DataType zero = 0; constexpr DataType one = 1; // Get optimizer diff --git a/include/lbann/optimizers/adam.hpp b/include/lbann/optimizers/adam.hpp index 6e4cfd63595..da35786a927 100644 --- a/include/lbann/optimizers/adam.hpp +++ b/include/lbann/optimizers/adam.hpp @@ -93,10 +93,10 @@ class adam : public optimizer { /** Hyperparameter exploration. */ friend class lbann_callback_perturb_adam; - /** CPU optimization step. */ + /** CPU implementation of optimization step. */ void step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient); #ifdef LBANN_HAS_CUDA - /** GPU optimization step. */ + /** GPU implementation of optimization step. */ void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient); #endif // LBANN_HAS_CUDA diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index a19bbb97385..034c74d2e69 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -95,18 +95,16 @@ void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { // Launch CUDA kernel constexpr size_t block_size = 256; - dim3 block_dims, grid_dims; - block_dims.x = block_size; - grid_dims.x = (local_size + block_size - 1) / block_size; + const size_t grid_size = (local_size + block_size - 1) / block_size; cudaStream_t stream = El::GPUManager::Stream(); if (values.Contiguous() && gradient.Contiguous() && m_moment1->Contiguous() && m_moment2->Contiguous()) { - contiguous_kernel<<>>( + contiguous_kernel<<>>( local_size, correction, m_eps, m_beta1, m_beta2, values.Buffer(), gradient.LockedBuffer(), m_moment1->Buffer(), m_moment2->Buffer()); } else { - noncontiguous_kernel<<>>( + noncontiguous_kernel<<>>( local_height, local_width, correction, m_eps, m_beta1, m_beta2, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), diff --git a/src/optimizers/optimizer.cpp b/src/optimizers/optimizer.cpp index 6046928c484..e0eb8cff479 100644 --- a/src/optimizers/optimizer.cpp +++ b/src/optimizers/optimizer.cpp @@ -116,8 +116,11 @@ AbsDistMat& optimizer::get_gradient() { m_gradient_status = optimizer_gradient_status::ready; } if (m_gradient_status != optimizer_gradient_status::ready) { - LBANN_ERROR("expected gradient to be \"ready\", but its status is " - "\"" + to_string(m_gradient_status) + "\""); + std::ostringstream err; + err << "unexpected gradient status (expected " + << "\"" << to_string(optimizer_gradient_status::ready) << "\", " + << "but found \"" << to_string(m_gradient_status) << "\")"; + LBANN_ERROR(err.str()); } // Return gradient @@ -150,6 +153,9 @@ void optimizer::add_to_gradient(const AbsDistMat& gradient, } // Add to gradient + if (m_gradient_status == optimizer_gradient_status::allreduce_started) { + finish_gradient_allreduce(); + } switch (m_gradient_status) { case optimizer_gradient_status::ready: if (allreduce_needed) { @@ -174,9 +180,6 @@ void optimizer::add_to_gradient(const AbsDistMat& gradient, } break; case optimizer_gradient_status::allreduce_started: - LBANN_ERROR("attempted to add to gradient " - "after a non-blocking allreduce has been launched"); - break; default: LBANN_ERROR("unexpected gradient status " "(" + to_string(m_gradient_status) + ")"); From f27c48e172c1f28fdc659ae3ae6f3eab88560a3c Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 21 Feb 2019 14:55:38 -0800 Subject: [PATCH 093/443] Fixed a bug where the mapping of samples to ranks in the data store did not properly take into account the bracketing by the mini-batch size. When computing the original owners it is necessary to have the original mini-batch size. To compute the owner index first find its position inside of the mini-batch (mod mini-batch size) and then find how it is striped across the ranks in the trainer. For the exchange data routines, the sample target or owner is calculated modulo the active mini-batch size. --- include/lbann/data_readers/data_reader.hpp | 2 +- .../data_readers/data_reader_jag_conduit.hpp | 2 +- include/lbann/data_store/data_store_jag.hpp | 4 ++-- include/lbann/data_store/generic_data_store.hpp | 2 +- src/data_readers/data_reader.cpp | 2 +- src/data_readers/data_reader_jag_conduit.cpp | 4 ++-- src/data_store/data_store_jag.cpp | 17 ++++++++++------- src/data_store/generic_data_store.cpp | 2 +- src/utils/lbann_library.cpp | 2 +- 9 files changed, 20 insertions(+), 17 deletions(-) diff --git a/include/lbann/data_readers/data_reader.hpp b/include/lbann/data_readers/data_reader.hpp index 44e6f662cc7..1d93e4a219b 100644 --- a/include/lbann/data_readers/data_reader.hpp +++ b/include/lbann/data_readers/data_reader.hpp @@ -691,7 +691,7 @@ class generic_data_reader : public lbann_image_preprocessor { } /// sets up a data_store. - virtual void setup_data_store(model *m); + virtual void setup_data_store(model *m, int mini_batch_size); void set_gan_labelling(bool has_gan_labelling) { m_gan_labelling = has_gan_labelling; diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index ea6219435f5..0b61880347f 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -237,7 +237,7 @@ class data_reader_jag_conduit : public generic_data_reader { #ifndef _JAG_OFFLINE_TOOL_MODE_ /// sets up a data_store. - void setup_data_store(model *m) override; + void setup_data_store(model *m, int mini_batch_size) override; #endif // _JAG_OFFLINE_TOOL_MODE_ /// A untiliy function to convert the pointer to image data into an opencv image diff --git a/include/lbann/data_store/data_store_jag.hpp b/include/lbann/data_store/data_store_jag.hpp index be1f98ee97b..70ba226d90b 100644 --- a/include/lbann/data_store/data_store_jag.hpp +++ b/include/lbann/data_store/data_store_jag.hpp @@ -57,7 +57,7 @@ class data_store_jag : public generic_data_store { //! dtor ~data_store_jag() override; - void setup() override; + void setup(int mini_batch_size) override; /// returns the conduit node const conduit::Node & get_conduit_node(int data_id) const; @@ -125,7 +125,7 @@ protected : void build_node_for_sending(const conduit::Node &node_in, conduit::Node &node_out); /// fills in m_owner, which maps index -> owning processor - void build_owner_map(); + void build_owner_map(int mini_batch_size); /// maps processor id -> set of indices (whose associated samples) /// this proc needs to send. (formerly called "proc_to_indices) diff --git a/include/lbann/data_store/generic_data_store.hpp b/include/lbann/data_store/generic_data_store.hpp index 920bcf81f95..64b1874759e 100644 --- a/include/lbann/data_store/generic_data_store.hpp +++ b/include/lbann/data_store/generic_data_store.hpp @@ -63,7 +63,7 @@ class generic_data_store { virtual generic_data_store * copy() const = 0; /// called by generic_data_reader::setup_data_store - virtual void setup(); + virtual void setup(int mini_batch_size); /// called by generic_data_reader::update; /// this method calls exchange_data if m_epoch > 1 diff --git a/src/data_readers/data_reader.cpp b/src/data_readers/data_reader.cpp index 7edd9242db8..3dc2f6db45f 100644 --- a/src/data_readers/data_reader.cpp +++ b/src/data_readers/data_reader.cpp @@ -704,7 +704,7 @@ double generic_data_reader::get_use_percent() const { return m_use_percent; } -void generic_data_reader::setup_data_store(model *m) { +void generic_data_reader::setup_data_store(model *m, int mini_batch_size) { m_data_store = nullptr; } diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index acc55ec2717..6e944a97905 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -1427,13 +1427,13 @@ bool data_reader_jag_conduit::fetch_label(CPUMat& Y, int data_id, int mb_idx) { return true; } -void data_reader_jag_conduit::setup_data_store(model *m) { +void data_reader_jag_conduit::setup_data_store(model *m, int mini_batch_size) { if (m_data_store != nullptr) { delete m_data_store; } m_jag_store = new data_store_jag(this, m); // *data_store_jag m_data_store = m_jag_store; // *generic_data_store - m_data_store->setup(); + m_data_store->setup(mini_batch_size); } void data_reader_jag_conduit::save_image(Mat& pixels, const std::string filename, bool do_scale) { diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index 3c25c557804..e25fad71937 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -49,7 +49,7 @@ data_store_jag::data_store_jag( data_store_jag::~data_store_jag() {} -void data_store_jag::setup() { +void data_store_jag::setup(int mini_batch_size) { double tm1 = get_time(); std::stringstream err; @@ -62,8 +62,8 @@ void data_store_jag::setup() { LBANN_ERROR("out-of-memory mode for data_store_jag has not been implemented"); } - generic_data_store::setup(); - build_owner_map(); + generic_data_store::setup(mini_batch_size); + build_owner_map(mini_batch_size); m_super_node = options::get()->get_bool("super_node"); if (m_master) { @@ -380,7 +380,7 @@ int data_store_jag::build_indices_i_will_recv(int current_pos, int mb_size) { int k = 0; for (int i=current_pos; i< current_pos + mb_size; ++i) { auto index = (*m_shuffled_indices)[i]; - if (i % m_np == m_rank) { + if ((i % mb_size) % m_np == m_rank) { int owner = m_owner[index]; m_indices_to_recv[owner].insert(index); k++; @@ -397,7 +397,7 @@ int data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { auto index = (*m_shuffled_indices)[i]; /// If this rank owns the index send it to the (i%m_np)'th rank if (m_data.find(index) != m_data.end()) { - m_indices_to_send[i % m_np].insert(index); + m_indices_to_send[(i % mb_size) % m_np].insert(index); // Sanity check if (m_owner[index] != m_rank) { @@ -411,11 +411,14 @@ int data_store_jag::build_indices_i_will_send(int current_pos, int mb_size) { return k; } -void data_store_jag::build_owner_map() { +void data_store_jag::build_owner_map(int mini_batch_size) { m_owner.clear(); for (size_t i = 0; i < m_shuffled_indices->size(); i++) { auto index = (*m_shuffled_indices)[i]; - m_owner[index] = i % m_np; + /// To compute the owner index first find its position inside of + /// the mini-batch (mod mini-batch size) and then find how it is + /// striped across the ranks in the trainer + m_owner[index] = (i % mini_batch_size) % m_np; } } diff --git a/src/data_store/generic_data_store.cpp b/src/data_store/generic_data_store.cpp index 6bed2560593..117096408b8 100644 --- a/src/data_store/generic_data_store.cpp +++ b/src/data_store/generic_data_store.cpp @@ -130,7 +130,7 @@ void generic_data_store::get_my_datastore_indices() { } } -void generic_data_store::setup() { +void generic_data_store::setup(int mini_batch_size) { set_shuffled_indices( &(m_reader->get_shuffled_indices()) ); set_num_global_indices(); m_num_readers = m_reader->get_num_parallel_readers(); diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index fe7d2f5bfc1..7f6a73fd496 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -192,7 +192,7 @@ model *build_model_from_prototext(int argc, char **argv, } for (auto r : data_readers) { if (!r.second) continue; - r.second->setup_data_store(model); + r.second->setup_data_store(model, pb_model->mini_batch_size()); } } From 09b8bc2aac6c7fa79b5dbbf5303343a515e5c46c Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 21 Feb 2019 15:01:04 -0800 Subject: [PATCH 094/443] Removed the header file for the deprecated data_reader_jag_conduit_hdf5 reader. --- .../data_reader_jag_conduit_hdf5.hpp | 214 ------------------ include/lbann/lbann.hpp | 1 - 2 files changed, 215 deletions(-) delete mode 100644 include/lbann/data_readers/data_reader_jag_conduit_hdf5.hpp diff --git a/include/lbann/data_readers/data_reader_jag_conduit_hdf5.hpp b/include/lbann/data_readers/data_reader_jag_conduit_hdf5.hpp deleted file mode 100644 index af27eda86d8..00000000000 --- a/include/lbann/data_readers/data_reader_jag_conduit_hdf5.hpp +++ /dev/null @@ -1,214 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -//////////////////////////////////////////////////////////////////////////////// - -#ifndef _DATA_READER_JAG_CONDUIT_HDF5_HPP_ -#define _DATA_READER_JAG_CONDUIT_HDF5_HPP_ - -#include "lbann_config.hpp" // may define LBANN_HAS_CONDUIT - -#ifdef LBANN_HAS_CONDUIT -#include "lbann/data_readers/opencv.hpp" -#include "data_reader.hpp" -#include "conduit/conduit.hpp" -#include "conduit/conduit_relay.hpp" -#include "lbann/data_readers/cv_process.hpp" -#include -#include -#include - -namespace lbann { - -class jag_store; - -/** - * Loads the pairs of JAG simulation inputs and results from a conduit-wrapped hdf5 file - */ -class data_reader_jag_conduit_hdf5 : public generic_data_reader { - public: - using ch_t = float; ///< jag output image channel type - using scalar_t = double; ///< jag scalar output type - using input_t = double; ///< jag input parameter type - - /** - * Dependent/indepdendent variable types - * - JAG_Image: simulation output images - * - JAG_Scalar: simulation output scalars - * - JAG_Input: simulation input parameters - * - Undefined: the default - */ - enum variable_t {Undefined=0, JAG_Image, JAG_Scalar, JAG_Input}; - using TypeID = conduit::DataType::TypeID; - - data_reader_jag_conduit_hdf5(bool shuffle = true) = delete; - data_reader_jag_conduit_hdf5(const std::shared_ptr& pp, bool shuffle = true); - data_reader_jag_conduit_hdf5(const data_reader_jag_conduit_hdf5&); - data_reader_jag_conduit_hdf5& operator=(const data_reader_jag_conduit_hdf5&); - ~data_reader_jag_conduit_hdf5() override; - data_reader_jag_conduit_hdf5* copy() const override { return new data_reader_jag_conduit_hdf5(*this); } - - std::string get_type() const override { - return "data_reader_jag_conduit_hdf5"; - } - - /// Load data and do data reader's chores. - void load() override; - - void setup(int num_io_threads, std::shared_ptr io_thread_pool) override; - - /// Return the number of samples - size_t get_num_samples() const; - - /// Return the number of measurement views - unsigned int get_num_img_srcs() const; - // Return the number of channels in an image - unsigned int get_num_channels() const; - /// Return the linearized size of an image; - size_t get_linearized_image_size() const; - /// Return the linearized size of one channel in the image - size_t get_linearized_channel_size() const; - /// Return the linearized size of scalar outputs - size_t get_linearized_scalar_size() const; - /// Return the linearized size of inputs - size_t get_linearized_input_size() const; - - /// Return the total linearized size of data - int get_linearized_data_size() const override; - /// Return the total linearized size of response - int get_linearized_response_size() const override; - /// Return the per-source linearized sizes of composite data - std::vector get_linearized_data_sizes() const; - /// Return the per-source linearized sizes of composite response - std::vector get_linearized_response_sizes() const; - - /// Return the dimension of data - const std::vector get_data_dims() const override; - - int get_num_labels() const override; - int get_linearized_label_size() const override; - - /// Show the description - std::string get_description() const; - - /// Return the image simulation output of the i-th sample - std::vector get_cv_images(const size_t i, int tid) const; - - template - static size_t add_val(const std::string key, const conduit::Node& n, std::vector& vals); - - /// sets up a data_store. - void setup_data_store(model *m) override; - - /// A untiliy function to convert the pointer to image data into an opencv image - static cv::Mat cast_to_cvMat(const std::pair img, const int height); - - void set_image_dims(const int width, const int height, const int ch=1); - - void set_scalar_keys(const std::string &keys) { m_scalar_keys = keys; } - void set_input_keys(const std::string &keys) { m_input_keys = keys; } - void set_image_views(const std::string &views) { m_image_views = views; } - void set_image_channels(const std::string &channels) { m_image_channels = channels; } - - void post_update() override; - - protected: - - friend jag_store; - - virtual void set_defaults(); - virtual bool replicate_processor(const cv_process& pp, const int nthreads); - virtual void copy_members(const data_reader_jag_conduit_hdf5& rhs); - - bool fetch_datum(CPUMat& X, int data_id, int mb_idx); - - virtual std::vector - create_datum_views(CPUMat& X, const std::vector& sizes, const int mb_idx) const; - - bool fetch_label(CPUMat& X, int data_id, int mb_idx) override; - - bool fetch_response(CPUMat& X, int data_id, int mb_idx) override; - - /// Check if the given sample id is valid - bool check_sample_id(const size_t i) const; - - /// Choose the image closest to the bang time among those associated with the i-th sample - std::vector choose_image_near_bang_time(const size_t i) const; - - jag_store * get_jag_store() const { return m_jag_store; } - - int m_image_width; ///< image width - int m_image_height; ///< image height - int m_image_num_channels; ///< number of image channels - - /// Whether data have been loaded - bool m_is_data_loaded; - - int m_num_labels; ///< number of labels - - /// preprocessor duplicated for each omp thread - std::vector > m_pps; - std::unique_ptr m_master_pps; - - /// jag_store; replaces m_data - jag_store *m_jag_store; - - bool m_owns_jag_store; - - /** - * Set of keys that are associated with non_numerical values. - * Such a variable requires a specific method for mapping to a numeric value. - * When a key is found in the set, the variable is ignored. Therefore, - * when a conversion is defined for such a key, remove it from the set. - */ - static const std::set non_numeric_vars; - - /** - * indicate if all the input variables are of the input_t type, in which case - * we can rely on a data extraction method with lower overhead. - */ - bool m_uniform_input_type; - - /** - * maps integers to sample IDs. In the future the sample IDs may - * not be integers; also, this map only includes sample IDs that - * have /performance/success = 1 - */ - std::unordered_map m_success_map; - - std::set m_emi_selectors; - - std::string m_scalar_keys; - std::string m_input_keys; - std::string m_image_views; - std::string m_image_channels; - - data_reader_jag_conduit_hdf5* m_primary_reader; -}; - - - -} // end of namespace lbann -#endif // LBANN_HAS_CONDUIT -#endif // _DATA_READER_JAG_CONDUIT_HDF5_HPP_ diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 8b3da4d1d83..396d67287df 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -113,7 +113,6 @@ #include "lbann/data_readers/data_reader_synthetic.hpp" #include "lbann/data_readers/data_reader_jag.hpp" #include "lbann/data_readers/data_reader_jag_conduit.hpp" -#include "lbann/data_readers/data_reader_jag_conduit_hdf5.hpp" #include "lbann/data_readers/data_reader_nci.hpp" #include "lbann/data_readers/data_reader_numpy.hpp" #include "lbann/data_readers/data_reader_csv.hpp" From 9e33ffe5145555f02dd287c51c128995f927b212 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 15:49:43 -0800 Subject: [PATCH 095/443] Refactoring SGD optimizer. Adapting to refactored optimizer base class. --- include/lbann/lbann.hpp | 4 +- include/lbann/optimizers/optimizer.hpp | 13 ++- include/lbann/optimizers/sgd.hpp | 24 +++-- src/callbacks/CMakeLists.txt | 2 +- src/optimizers/CMakeLists.txt | 4 +- src/optimizers/adam.cu | 2 +- src/optimizers/optimizer.cpp | 4 +- src/optimizers/sgd.cpp | 108 +++++++++++++--------- src/optimizers/sgd.cu | 100 ++++++++++---------- src/proto/factories/callback_factory.cpp | 2 - src/proto/factories/optimizer_factory.cpp | 2 +- 11 files changed, 147 insertions(+), 118 deletions(-) diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 1b9d2bd3057..84fd441990b 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -146,7 +146,7 @@ #include "lbann/callbacks/callback_dump_gradients.hpp" #include "lbann/callbacks/callback_dump_minibatch_sample_indices.hpp" #include "lbann/callbacks/callback_early_stopping.hpp" -// #include "lbann/callbacks/callback_ltfb.hpp" +#include "lbann/callbacks/callback_ltfb.hpp" #include "lbann/callbacks/callback_save_images.hpp" #include "lbann/callbacks/callback_save_model.hpp" #include "lbann/callbacks/profiler.hpp" @@ -174,7 +174,7 @@ #include "lbann/optimizers/adam.hpp" // #include "lbann/optimizers/hypergradient_adam.hpp" // #include "lbann/optimizers/rmsprop.hpp" -// #include "lbann/optimizers/sgd.hpp" +#include "lbann/optimizers/sgd.hpp" /// Objective functions #include "lbann/objective_functions/objective_function.hpp" diff --git a/include/lbann/optimizers/optimizer.hpp b/include/lbann/optimizers/optimizer.hpp index c131771beb3..7801a6d9e65 100644 --- a/include/lbann/optimizers/optimizer.hpp +++ b/include/lbann/optimizers/optimizer.hpp @@ -106,7 +106,18 @@ class optimizer { */ AbsDistMat& get_gradient(); - /** Add to the objective function gradient w.r.t. the weights. */ + /** Add to the objective function gradient w.r.t. the weights. + * @param gradient Contribution to gradient. + * @param scale Scaling factor for gradient + * contribution. + * @param allreduce_needed Whether the gradient contribution + * requires an allreduce over its redundant + * communicator. If false, duplicated data + * (over the redundant communicator) is + * assumed to be identical. If true, an + * allreduce is performed lazily when the + * gradient is accessed. + */ void add_to_gradient(const AbsDistMat& gradient, DataType scale = DataType(1), bool allreduce_needed = false); diff --git a/include/lbann/optimizers/sgd.hpp b/include/lbann/optimizers/sgd.hpp index 6a1aa27b589..bf5c122018f 100644 --- a/include/lbann/optimizers/sgd.hpp +++ b/include/lbann/optimizers/sgd.hpp @@ -24,8 +24,8 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZERS_SGD_HPP -#define LBANN_OPTIMIZERS_SGD_HPP +#ifndef LBANN_OPTIMIZERS_SGD_HPP_INCLUDED +#define LBANN_OPTIMIZERS_SGD_HPP_INCLUDED #include "lbann/optimizers/optimizer.hpp" @@ -34,6 +34,7 @@ namespace lbann { /** Stochastic gradient descent optimizer. * * Supports momentum and Nesterov acceleration. + * @todo Dedicated optimizers for momentum or Nesterov SGD. */ class sgd : public optimizer { @@ -59,14 +60,12 @@ class sgd : public optimizer { /** Velocity for momentum optimizer. */ AbsDistMat& get_velocity(); - void setup(weights& w) override; + void setup(weights* w = nullptr) override; - /** Perform the computation in an optimization step. */ +protected: + + /** Computation for an optimization step. */ void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; -#ifdef LBANN_HAS_CUDNN - /** Perform the computation in an optimization step on GPU. */ - void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) override; -#endif // LBANN_HAS_CUDNN private: @@ -77,6 +76,13 @@ class sgd : public optimizer { /** Velocity for momentum optimizer. */ std::unique_ptr m_velocity; + /** CPU implementation of momentum or Nesterov step. */ + void momentum_step_cpu(AbsDistMat& values, const AbsDistMat& gradient); +#ifdef LBANN_HAS_CUDA + /** GPU implementation of momentum or Nesterov step. */ + void momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient); +#endif // LBANN_HAS_CUDA + //************************************************************************ // Checkpointing //************************************************************************ @@ -114,4 +120,4 @@ class sgd : public optimizer { } // namespace lbann -#endif // LBANN_OPTIMIZERS_SGD_HPP +#endif // LBANN_OPTIMIZERS_SGD_HPP_INCLUDED diff --git a/src/callbacks/CMakeLists.txt b/src/callbacks/CMakeLists.txt index d7fa7c08db9..2b29975b44b 100644 --- a/src/callbacks/CMakeLists.txt +++ b/src/callbacks/CMakeLists.txt @@ -19,7 +19,7 @@ set_full_path(THIS_DIR_SOURCES callback_imcomm.cpp callback_io.cpp callback_learning_rate.cpp -# callback_ltfb.cpp + callback_ltfb.cpp callback_perturb_adam.cpp callback_print.cpp callback_save_images.cpp diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index cfded8d9c77..37bca5d7dd4 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -5,7 +5,7 @@ set_full_path(THIS_DIR_SOURCES # hypergradient_adam.cpp optimizer.cpp # rmsprop.cpp -# sgd.cpp + sgd.cpp ) if (LBANN_HAS_CUDA) @@ -14,7 +14,7 @@ if (LBANN_HAS_CUDA) # adagrad.cu adam.cu # rmsprop.cu -# sgd.cu + sgd.cu ) endif () diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index 034c74d2e69..ca9ec7cc465 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -44,7 +44,7 @@ __global__ void noncontiguous_kernel(size_t height, size_t moment1_ldim, DataType * __restrict__ moment2, size_t moment2_ldim) { - const auto& gid = blockIdx.x * blockDim.x + threadIdx.x; + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const auto& row = gid % height; const auto& col = gid / height; const auto& g = gradient[row + col * gradient_ldim] + eps; diff --git a/src/optimizers/optimizer.cpp b/src/optimizers/optimizer.cpp index e0eb8cff479..3d5afeac622 100644 --- a/src/optimizers/optimizer.cpp +++ b/src/optimizers/optimizer.cpp @@ -229,10 +229,10 @@ void optimizer::setup(weights* w) { const auto& height = m_weights->get_matrix_height(); const auto& width = m_weights->get_matrix_width(); const AbsDistMat& values = m_weights->get_values(); - m_gradient.reset(values.Construct(values.Grid(), values.Root())); + m_gradient.reset(AbsDistMat::Instantiate(values.DistData())); m_gradient->AlignWith(values); m_gradient->Resize(height, width); - m_gradient_v.reset(values.Construct(values.Grid(), values.Root())); + m_gradient_v.reset(AbsDistMat::Instantiate(values.DistData())); m_gradient_v->AlignWith(values); #ifdef HYDROGEN_HAVE_CUB if (m_gradient_v->GetLocalDevice() == El::Device::GPU) { diff --git a/src/optimizers/sgd.cpp b/src/optimizers/sgd.cpp index d243ce820b6..cae1e6e68df 100644 --- a/src/optimizers/sgd.cpp +++ b/src/optimizers/sgd.cpp @@ -29,7 +29,7 @@ namespace lbann { -sgd::sgd(lbann_comm *comm, +sgd::sgd(lbann_comm* comm, DataType learning_rate, DataType momentum, bool nesterov) @@ -71,7 +71,7 @@ AbsDistMat& sgd::get_velocity() { return const_cast(static_cast(*this).get_velocity()); } -void sgd::setup(weights& w) { +void sgd::setup(weights* w) { optimizer::setup(w); const auto& gradient = this->get_gradient(); m_velocity.reset(AbsDistMat::Instantiate(gradient.DistData())); @@ -79,62 +79,82 @@ void sgd::setup(weights& w) { } void sgd::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { - - // SGD without momentum is just an Axpy if (m_momentum == DataType(0)) { - El::Axpy(-m_learning_rate, gradient, values); - return; + // Vanilla SGD + El::Axpy(-this->get_learning_rate(), gradient, values); + } else { + // Momentum or Nesterov SGD + switch (values.GetLocalDevice()) { + case El::Device::CPU: momentum_step_cpu(values, gradient); break; +#ifdef LBANN_HAS_CUDA + case El::Device::GPU: momentum_step_gpu(values, gradient); break; +#endif // LBANN_HAS_CUDA + default: + std::ostringstream err; + err << "unsupported device type " + << "(" << static_cast(values.GetLocalDevice()) << ")"; + LBANN_ERROR(err.str()); + } } +} + +void sgd::momentum_step_cpu(AbsDistMat& values, const AbsDistMat& gradient) { // Get local matrix data - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); + const auto& learning_rate = this->get_learning_rate(); + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); DataType* __restrict__ values_buffer = values.Buffer(); - const int values_ldim = values.LDim(); const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - const int gradient_ldim = gradient.LDim(); DataType* __restrict__ velocity_buffer = m_velocity->Buffer(); - const int velocity_ldim = m_velocity->LDim(); - // Check if matrix data is contiguous - if (values_ldim != local_height - || gradient_ldim != local_height - || velocity_ldim != local_height) { - // (Nesterov) momentum SGD for non-contiguous data - LBANN_OMP_PARALLEL_FOR_COLLAPSE2 - for (int j=0; jContiguous()) { + const size_t local_size = local_height * local_width; if (m_nesterov) { - // Nesterov's accelerated gradient descent for contiguous data + + // Nesterov SGD for contiguous data LBANN_OMP_PARALLEL_FOR - for (int i=0; iLDim(); + LBANN_OMP_PARALLEL_FOR_COLLAPSE2 + for (size_t col = 0; col < local_width; ++col) { + for (size_t row=0; row < local_height; ++row) { + const auto& g = gradient_buffer[row+col*gradient_ldim]; + auto& v = velocity_buffer[row+col*velocity_ldim]; + auto& x = values_buffer[row+col*values_ldim]; + v = m_momentum * v + g; + x -= (m_nesterov ? + learning_rate * (m_momentum * v + g) : + learning_rate * v); + } + } + } } @@ -146,7 +166,7 @@ void sgd::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { bool sgd::save_to_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::save_to_checkpoint_shared(p, name_prefix); - if (m_comm->am_trainer_master()) { + if (get_comm().am_trainer_master()) { pack_scalars(p); } @@ -160,11 +180,11 @@ bool sgd::save_to_checkpoint_shared(persist& p, std::string name_prefix) { bool sgd::load_from_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::load_from_checkpoint_shared(p, name_prefix); struct packing_header header; - if (m_comm->am_trainer_master()) { + if (get_comm().am_trainer_master()) { unpack_scalars(p, &header); } - m_comm->trainer_broadcast(0, header); + get_comm().trainer_broadcast(0, header); unpack_header(header); char l_name[512]; diff --git a/src/optimizers/sgd.cu b/src/optimizers/sgd.cu index 5ccc1b3093d..30ac6a3a0cc 100644 --- a/src/optimizers/sgd.cu +++ b/src/optimizers/sgd.cu @@ -30,88 +30,82 @@ namespace lbann { namespace { -__global__ void momentum_kernel(int height, - int width, +__global__ void momentum_kernel(size_t height, + size_t width, DataType learning_rate, DataType momentum, DataType * __restrict__ values, - int values_ldim, + size_t values_ldim, const DataType * __restrict__ gradient, - int gradient_ldim, + size_t gradient_ldim, DataType * __restrict__ velocity, - int velocity_ldim) { - const int tid = blockIdx.x * blockDim.x + threadIdx.x; - const int num_threads = gridDim.x * blockDim.x; - for (int pos = tid; pos < height * width; pos += num_threads) { - const auto& i = pos % height; - const auto& j = pos / height; - const auto& g = gradient[i + j * gradient_ldim]; - auto& v = velocity[i + j * velocity_ldim]; - auto& x = values[i + j * values_ldim]; + size_t velocity_ldim) { + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + const size_t nthreads = gridDim.x * blockDim.x; + for (size_t pos = gid; pos < height * width; pos += nthreads) { + const auto& row = pos % height; + const auto& col = pos / height; + const auto& g = gradient[row + col * gradient_ldim]; + auto& v = velocity[row + col * velocity_ldim]; + auto& x = values[row + col * values_ldim]; v = momentum * v + g; x -= learning_rate * v; } } -__global__ void nesterov_kernel(int height, - int width, +__global__ void nesterov_kernel(size_t height, + size_t width, DataType learning_rate, DataType momentum, DataType * __restrict__ values, - int values_ldim, + size_t values_ldim, const DataType * __restrict__ gradient, - int gradient_ldim, + size_t gradient_ldim, DataType * __restrict__ velocity, - int velocity_ldim) { - const int tid = blockIdx.x * blockDim.x + threadIdx.x; - const int num_threads = gridDim.x * blockDim.x; - for (int pos = tid; pos < height * width; pos += num_threads) { - const auto& i = pos % height; - const auto& j = pos / height; - const auto& g = gradient[i + j * gradient_ldim]; - auto& v = velocity[i + j * velocity_ldim]; - auto& x = values[i + j * values_ldim]; + size_t velocity_ldim) { + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + const size_t nthreads = gridDim.x * blockDim.x; + for (size_t pos = gid; pos < height * width; pos += nthreads) { + const auto& row = pos % height; + const auto& col = pos / height; + const auto& g = gradient[row + col * gradient_ldim]; + auto& v = velocity[row + col * velocity_ldim]; + auto& x = values[row + col * values_ldim]; v = momentum * v + g; x -= learning_rate * (momentum * v + g); } } -} +} // namespace -void sgd::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { +void sgd::momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient) { // Get matrix dimensions - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - const int size = local_height * local_width; - if (size <= 0) { return; } - - // SGD without momentum - if (m_momentum == DataType(0)) { - El::Axpy(-m_learning_rate, gradient, values); - return; - } + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + const size_t local_size = local_height * local_width; + if (local_size <= 0) { return; } // Launch CUDA kernels for momentum SGD or NAG - const int block_size = 256; - dim3 block_dims, grid_dims; - block_dims.x = block_size; - grid_dims.x = (size + block_size - 1) / block_size; + constexpr size_t block_size = 256; + const size_t grid_size = (local_size + block_size - 1) / block_size; cudaStream_t stream = El::GPUManager::Stream(); if (m_nesterov) { - nesterov_kernel<<>> - (local_height, local_width, m_learning_rate, m_momentum, - values.Buffer(), values.LDim(), - gradient.LockedBuffer(), gradient.LDim(), - m_velocity->Buffer(), m_velocity->LDim()); + nesterov_kernel<<>>( + local_height, local_width, + this->get_learning_rate(), m_momentum, + values.Buffer(), values.LDim(), + gradient.LockedBuffer(), gradient.LDim(), + m_velocity->Buffer(), m_velocity->LDim()); } else { - momentum_kernel<<>> - (local_height, local_width, m_learning_rate, m_momentum, - values.Buffer(), values.LDim(), - gradient.LockedBuffer(), gradient.LDim(), - m_velocity->Buffer(), m_velocity->LDim()); + momentum_kernel<<>>( + local_height, local_width, + this->get_learning_rate(), m_momentum, + values.Buffer(), values.LDim(), + gradient.LockedBuffer(), gradient.LDim(), + m_velocity->Buffer(), m_velocity->LDim()); } } -} // namespace lbann +} // namespace lbann diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index 3105ef8814a..bedb7c8c515 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -96,7 +96,6 @@ lbann_callback* construct_callback(lbann_comm* comm, // Inter-model communication ////////////////////////////////////////////////////////////// -#if 0 if (proto_cb.has_ltfb()) { const auto& params = proto_cb.ltfb(); return new lbann_callback_ltfb(params.batch_interval(), @@ -106,7 +105,6 @@ lbann_callback* construct_callback(lbann_comm* comm, lbann_callback_ltfb::string_to_comm_algo(params.communication_algorithm()), summarizer); } -#endif // 0 /// @todo if (proto_cb.has_imcomm()) { const auto& params = proto_cb.imcomm(); diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index aa4526371a7..167bd7be729 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -32,7 +32,6 @@ namespace proto { optimizer* construct_optimizer(lbann_comm* comm, const lbann_data::Optimizer& proto_opt) { -#if 0 // Stochastic gradient descent if (proto_opt.has_sgd()) { const auto& params = proto_opt.sgd(); @@ -42,6 +41,7 @@ optimizer* construct_optimizer(lbann_comm* comm, params.nesterov()); } +#if 0 // AdaGrad if (proto_opt.has_adagrad()) { const auto& params = proto_opt.adagrad(); From c9aca694b693af6e2544d5af4253ecee4a607e20 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 16:14:00 -0800 Subject: [PATCH 096/443] Bugfix for GPU Adam. --- src/optimizers/adam.cu | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index ca9ec7cc465..f84b6096f06 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -45,15 +45,17 @@ __global__ void noncontiguous_kernel(size_t height, DataType * __restrict__ moment2, size_t moment2_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; - const auto& row = gid % height; - const auto& col = gid / height; - const auto& g = gradient[row + col * gradient_ldim] + eps; - auto& m1 = moment1[row + col * moment1_ldim]; - auto& m2 = moment2[row + col * moment2_ldim]; - auto& x = values[row + col * values_ldim]; - m1 = beta1 * m1 + (DataType(1) - beta1) * g; - m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; - x -= correction * m1 / (cuda::sqrt(m2) + eps); + if (gid < height * width) { + const auto& row = gid % height; + const auto& col = gid / height; + const auto& g = gradient[row + col * gradient_ldim] + eps; + auto& m1 = moment1[row + col * moment1_ldim]; + auto& m2 = moment2[row + col * moment2_ldim]; + auto& x = values[row + col * values_ldim]; + m1 = beta1 * m1 + (DataType(1) - beta1) * g; + m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; + x -= correction * m1 / (cuda::sqrt(m2) + eps); + } } __global__ void contiguous_kernel(size_t size, @@ -65,14 +67,16 @@ __global__ void contiguous_kernel(size_t size, const DataType * __restrict__ gradient, DataType * __restrict__ moment1, DataType * __restrict__ moment2) { - const auto& gid = threadIdx.x + blockIdx.x * blockDim.x; - const auto& g = gradient[gid] + eps; - auto& m1 = moment1[gid]; - auto& m2 = moment2[gid]; - auto& x = values[gid]; - m1 = beta1 * m1 + (DataType(1) - beta1) * g; - m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; - x -= correction * m1 / (cuda::sqrt(m2) + eps); + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + if (gid < size) { + const auto& g = gradient[gid] + eps; + auto& m1 = moment1[gid]; + auto& m2 = moment2[gid]; + auto& x = values[gid]; + m1 = beta1 * m1 + (DataType(1) - beta1) * g; + m2 = beta2 * m2 + (DataType(1) - beta2) * g * g; + x -= correction * m1 / (cuda::sqrt(m2) + eps); + } } } // namespace From 6e699e90fa913028207dce7bb7da67f7d46af4bf Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 17:12:28 -0800 Subject: [PATCH 097/443] Refactoring AdaGrad optimizer. Adapting to refactored optimizer base class. --- include/lbann/lbann.hpp | 2 +- include/lbann/optimizers/adagrad.hpp | 64 +++++++------- include/lbann/optimizers/sgd.hpp | 7 +- src/optimizers/CMakeLists.txt | 4 +- src/optimizers/adagrad.cpp | 102 ++++++++++------------ src/optimizers/adagrad.cu | 56 ++++++------ src/optimizers/adam.cpp | 4 +- src/optimizers/adam.cu | 2 +- src/optimizers/sgd.cpp | 4 +- src/optimizers/sgd.cu | 2 +- src/proto/factories/optimizer_factory.cpp | 2 +- 11 files changed, 119 insertions(+), 130 deletions(-) diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 84fd441990b..f99450e40eb 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -170,7 +170,7 @@ #include "lbann/weights/variance_scaling_initializers.hpp" /// Optimizers -// #include "lbann/optimizers/adagrad.hpp" +#include "lbann/optimizers/adagrad.hpp" #include "lbann/optimizers/adam.hpp" // #include "lbann/optimizers/hypergradient_adam.hpp" // #include "lbann/optimizers/rmsprop.hpp" diff --git a/include/lbann/optimizers/adagrad.hpp b/include/lbann/optimizers/adagrad.hpp index 1acc258a04f..6f827ecbe9c 100644 --- a/include/lbann/optimizers/adagrad.hpp +++ b/include/lbann/optimizers/adagrad.hpp @@ -24,67 +24,69 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZER_ADAGRAD_HPP -#define LBANN_OPTIMIZER_ADAGRAD_HPP +#ifndef LBANN_OPTIMIZERS_ADAGRAD_HPP_INCLUDED +#define LBANN_OPTIMIZERS_ADAGRAD_HPP_INCLUDED #include "lbann/optimizers/optimizer.hpp" namespace lbann { -/** AdaGrad optimizer. */ +/** AdaGrad optimizer. + * + * Reference: + * + * John Duchi, Elad Hazan, and Yoram Singer. "Adaptive subgradient + * methods for online learning and stochastic optimization." Journal + * of Machine Learning Research 12, no. Jul (2011): 2121-2159. + */ class adagrad : public optimizer { - public: +public: - /** Constructor. */ adagrad(lbann_comm *comm, DataType learning_rate, DataType eps = DataType(1e-8)); - - /** Copy constructor. */ adagrad(const adagrad& other); - /** Copy assignment operator. */ adagrad& operator=(const adagrad& other); - /** Destructor. */ - ~adagrad() override; - /** Create a copy. */ + ~adagrad() override = default; adagrad* copy() const override { return new adagrad(*this); } - /** Get the optimizer name. */ + /** Human-readable type name. */ std::string get_type() const override { return "AdaGrad"; } /** Human-readable description. */ description get_description() const override; - /** Setup optimizer. */ - void setup(weights& w) override; + void setup(weights* w = nullptr) override; + +protected: - /** Perform the computation in an optimization step. */ + /** Computation for an optimization step. */ void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; + +private: + + /** Small factor to avoid division by zero. */ + DataType m_eps; + /** AdaGrad cache. */ + std::unique_ptr m_cache; + + /** CPU implementation of optimization step. */ + void step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient); #ifdef LBANN_HAS_CUDNN - /** Perform the computation in an optimization step on GPU. */ - void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) override; + /** GPU implementation of optimization step. */ + void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient); #endif // LBANN_HAS_CUDNN - /// Set parameters to optimize and initialize optimizer - void setup(AbsDistMat *parameters) ; - /// Update parameters using objective function gradient - void update(const AbsDistMat *gradient) ; - std::string name() const { return "adagrad"; } - - private: + // =========================================== + // Checkpointing + // =========================================== bool save_to_checkpoint_shared(persist& p, std::string m_name) override; bool load_from_checkpoint_shared(persist& p, std::string m_name) override; - bool save_to_checkpoint_distributed(persist& p, std::string m_name) override; bool load_from_checkpoint_distributed(persist& p, std::string m_name) override; - /** Small factor to avoid division by zero. */ - DataType m_eps; - /** AdaGrad cache. */ - AbsDistMat *m_cache; - }; } // namespace lbann -#endif // LBANN_OPTIMIZER_ADAGRAD_HPP +#endif // LBANN_OPTIMIZERS_ADAGRAD_HPP_INCLUDED diff --git a/include/lbann/optimizers/sgd.hpp b/include/lbann/optimizers/sgd.hpp index bf5c122018f..7ee3068b8fa 100644 --- a/include/lbann/optimizers/sgd.hpp +++ b/include/lbann/optimizers/sgd.hpp @@ -83,9 +83,9 @@ class sgd : public optimizer { void momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient); #endif // LBANN_HAS_CUDA -//************************************************************************ -// Checkpointing -//************************************************************************ + // =========================================== + // Checkpointing + // =========================================== struct packing_header { DataType momentum; @@ -112,7 +112,6 @@ class sgd : public optimizer { bool save_to_checkpoint_shared(persist& p, std::string m_name) override; bool load_from_checkpoint_shared(persist& p, std::string m_name) override; - bool save_to_checkpoint_distributed(persist& p, std::string m_name) override; bool load_from_checkpoint_distributed(persist& p, std::string m_name) override; diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index 37bca5d7dd4..e4a6d20a3e7 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -1,6 +1,6 @@ # Add the source files for this directory set_full_path(THIS_DIR_SOURCES -# adagrad.cpp + adagrad.cpp adam.cpp # hypergradient_adam.cpp optimizer.cpp @@ -11,7 +11,7 @@ set_full_path(THIS_DIR_SOURCES if (LBANN_HAS_CUDA) # Add the CUDA source files for this directory set_full_path(THIS_DIR_CU_SOURCES -# adagrad.cu + adagrad.cu adam.cu # rmsprop.cu sgd.cu diff --git a/src/optimizers/adagrad.cpp b/src/optimizers/adagrad.cpp index 0948da5518a..21d7d8c654e 100644 --- a/src/optimizers/adagrad.cpp +++ b/src/optimizers/adagrad.cpp @@ -30,98 +30,84 @@ namespace lbann { adagrad::adagrad(lbann_comm *comm, DataType learning_rate, DataType eps) - : optimizer(comm, learning_rate), m_eps(eps), m_cache(nullptr) {} + : optimizer(comm, learning_rate), m_eps(eps) {} adagrad::adagrad(const adagrad& other) - : optimizer(other), m_eps(other.m_eps), m_cache(other.m_cache) { - if (m_cache != nullptr) { m_cache = m_cache->Copy(); } -} + : optimizer(other), + m_eps(other.m_eps), + m_cache(other.m_cache ? other.m_cache->Copy() : nullptr) {} adagrad& adagrad::operator=(const adagrad& other) { optimizer::operator=(other); m_eps = other.m_eps; - - // Copy cache matrix - if (m_cache != nullptr && other.m_cache != nullptr - && m_cache->DistData() == other.m_cache->DistData()) { - El::Copy(*other.m_cache, *m_cache); - } - else { - if (m_cache != nullptr) { delete m_cache; } - m_cache = other.m_cache; - if (m_cache != nullptr) { m_cache = m_cache->Copy(); } - } - + m_cache.reset(other.m_cache ? other.m_cache->Copy() : nullptr); return *this; } -adagrad::~adagrad() { - if (m_cache != nullptr) { delete m_cache; } -} - description adagrad::get_description() const { auto&& desc = optimizer::get_description(); desc.add("eps", m_eps); return desc; } -void adagrad::setup(weights& w) { +void adagrad::setup(weights* w) { optimizer::setup(w); - m_cache = m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root()); - El::Zeros(*m_cache, m_gradient->Height(), m_gradient->Width()); + const auto& gradient = this->get_gradient(); + m_cache.reset(AbsDistMat::Instantiate(gradient.DistData())); + El::Zeros(*m_cache, gradient.Height(), gradient.Width()); } void adagrad::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { + switch (values.GetLocalDevice()) { + case El::Device::CPU: step_compute_cpu(values, gradient); break; +#ifdef LBANN_HAS_CUDA + case El::Device::GPU: step_compute_gpu(values, gradient); break; +#endif // LBANN_HAS_CUDA + default: + std::ostringstream err; + err << "unsupported device type " + << "(" << static_cast(values.GetLocalDevice()) << ")"; + LBANN_ERROR(err.str()); + } +} + +void adagrad::step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient) { // Get local matrix data - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); DataType* __restrict__ values_buffer = values.Buffer(); - const int values_ldim = values.LDim(); + const size_t values_ldim = values.LDim(); const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - const int gradient_ldim = gradient.LDim(); + const size_t gradient_ldim = gradient.LDim(); DataType* __restrict__ cache_buffer = m_cache->Buffer(); - const int cache_ldim = m_cache->LDim(); - - // Check if matrix data is contiguous - if (values_ldim != local_height - || gradient_ldim != local_height - || cache_ldim != local_height) { - // Update with non-contiguous data - LBANN_OMP_PARALLEL_FOR_COLLAPSE2 - for (int j=0; jLDim(); + + // Apply AdaGrad step + const auto& learning_rate = get_learning_rate(); + LBANN_OMP_PARALLEL_FOR_COLLAPSE2 + for (size_t col = 0; col < local_width; ++col) { + for (size_t row = 0; row < local_height; ++row) { + auto& x = values_buffer[row+col*values_ldim]; + const auto& g = gradient_buffer[row+col*gradient_ldim]; + auto& c = cache_buffer[row+col*cache_ldim]; c += g * g; - x -= m_learning_rate * g / (std::sqrt(c) + m_eps); + x -= learning_rate * g / (std::sqrt(c) + m_eps); } } + } -//////////////////////////////////////////////////////////// +// ============================================= // Checkpointing -//////////////////////////////////////////////////////////// +// ============================================= bool adagrad::save_to_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::save_to_checkpoint_shared(p, name_prefix); char l_name[512]; sprintf(l_name, "%s_optimizer_cache_%lldx%lld", name_prefix.c_str(), m_cache->Height(), m_cache->Width()); - p.write_distmat(persist_type::train, l_name, m_cache); + p.write_distmat(persist_type::train, l_name, m_cache.get()); return true; } @@ -131,7 +117,7 @@ bool adagrad::load_from_checkpoint_shared(persist& p, std::string name_prefix) { char l_name[512]; sprintf(l_name, "%s_optimizer_cache_%lldx%lld.bin", name_prefix.c_str(), m_cache->Height(), m_cache->Width()); - p.read_distmat(persist_type::train, l_name, m_cache); + p.read_distmat(persist_type::train, l_name, m_cache.get()); return true; } @@ -156,4 +142,4 @@ bool adagrad::load_from_checkpoint_distributed(persist& p, std::string name_pref return true; } -} // namespace lbann +} // namespace lbann diff --git a/src/optimizers/adagrad.cu b/src/optimizers/adagrad.cu index 65abf7dbdc3..6df85552495 100644 --- a/src/optimizers/adagrad.cu +++ b/src/optimizers/adagrad.cu @@ -30,40 +30,42 @@ namespace lbann { namespace { -__global__ void adagrad_kernel(int height, - int width, - DataType learning_rate, - DataType eps, - DataType * __restrict__ values, - int values_ldim, - const DataType * __restrict__ gradient, - int gradient_ldim, - DataType * __restrict__ cache, - int cache_ldim) { - const int gid = blockIdx.x * blockDim.x + threadIdx.x; - const int num_threads = gridDim.x * blockDim.x; - for (int pos = gid; pos < height * width; pos += num_threads) { - const auto& i = pos % height; - const auto& j = pos / height; - auto& x = values[i + j * values_ldim]; - const auto& g = gradient[i + j * gradient_ldim]; - auto& c = cache[i + j * cache_ldim]; +__global__ void kernel(size_t height, + size_t width, + DataType learning_rate, + DataType eps, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ cache, + size_t cache_ldim) { + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + const size_t nthreads = blockDim.x * gridDim.x; + for (size_t pos = gid; pos < height * width; pos += nthreads) { + const auto& row = pos % height; + const auto& col = pos / height; + auto& x = values[row + col * values_ldim]; + const auto& g = gradient[row + col * gradient_ldim]; + auto& c = cache[row + col * cache_ldim]; c += g * g; x -= learning_rate * g / (cuda::sqrt(c) + eps); } } -} +} // namespace void adagrad::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - const int size = local_height * local_width; - const int block_dim = 256; - const int grid_dim = (size + block_dim - 1) / block_dim; - if (grid_dim > 0) { - adagrad_kernel<<>>( - local_height, local_width, m_learning_rate, m_eps, + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + const size_t local_size = local_height * local_width; + if (local_size > 0) { + constexpr size_t block_size = 256; + const size_t grid_size = (local_size + block_size - 1) / block_size; + auto&& stream = El::GPUManager::Stream(); + kernel<<>>( + local_height, local_width, + this->get_learning_rate(), m_eps, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), m_cache->Buffer(), m_cache->LDim()); diff --git a/src/optimizers/adam.cpp b/src/optimizers/adam.cpp index 92774b50eca..d0b3444f22e 100644 --- a/src/optimizers/adam.cpp +++ b/src/optimizers/adam.cpp @@ -173,9 +173,9 @@ void adam::step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient) { } -//////////////////////////////////////////////////////////// +// ============================================= // Checkpointing -//////////////////////////////////////////////////////////// +// ============================================= bool adam::save_to_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::save_to_checkpoint_shared(p, name_prefix); diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index f84b6096f06..3312737e83c 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -100,7 +100,7 @@ void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { // Launch CUDA kernel constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; - cudaStream_t stream = El::GPUManager::Stream(); + auto&& stream = El::GPUManager::Stream(); if (values.Contiguous() && gradient.Contiguous() && m_moment1->Contiguous() && m_moment2->Contiguous()) { contiguous_kernel<<>>( diff --git a/src/optimizers/sgd.cpp b/src/optimizers/sgd.cpp index cae1e6e68df..f28bc219d3b 100644 --- a/src/optimizers/sgd.cpp +++ b/src/optimizers/sgd.cpp @@ -159,9 +159,9 @@ void sgd::momentum_step_cpu(AbsDistMat& values, const AbsDistMat& gradient) { } -//////////////////////////////////////////////////////////// +// ============================================= // Checkpointing -//////////////////////////////////////////////////////////// +// ============================================= bool sgd::save_to_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::save_to_checkpoint_shared(p, name_prefix); diff --git a/src/optimizers/sgd.cu b/src/optimizers/sgd.cu index 30ac6a3a0cc..6917dd684ab 100644 --- a/src/optimizers/sgd.cu +++ b/src/optimizers/sgd.cu @@ -89,7 +89,7 @@ void sgd::momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient) { // Launch CUDA kernels for momentum SGD or NAG constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; - cudaStream_t stream = El::GPUManager::Stream(); + auto&& stream = El::GPUManager::Stream(); if (m_nesterov) { nesterov_kernel<<>>( local_height, local_width, diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index 167bd7be729..d4af25c890d 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -41,13 +41,13 @@ optimizer* construct_optimizer(lbann_comm* comm, params.nesterov()); } -#if 0 // AdaGrad if (proto_opt.has_adagrad()) { const auto& params = proto_opt.adagrad(); return new adagrad(comm, params.learn_rate(), params.eps()); } +#if 0 // RMSProp if (proto_opt.has_rmsprop()) { const auto& params = proto_opt.rmsprop(); From 59813bbfa86cc0eb10087fabfa6d924044f9c417 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 18:02:44 -0800 Subject: [PATCH 098/443] Refactoring RMSprop optimizer. Adapting to refactored optimizer base class. --- include/lbann/lbann.hpp | 2 +- include/lbann/optimizers/adagrad.hpp | 2 +- include/lbann/optimizers/rmsprop.hpp | 35 +++++++---- src/optimizers/CMakeLists.txt | 4 +- src/optimizers/adagrad.cpp | 6 +- src/optimizers/rmsprop.cpp | 77 ++++++++++++----------- src/optimizers/rmsprop.cu | 46 +++++++------- src/optimizers/sgd.cpp | 6 +- src/proto/factories/optimizer_factory.cpp | 2 - 9 files changed, 97 insertions(+), 83 deletions(-) diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index f99450e40eb..3bacd228b46 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -173,7 +173,7 @@ #include "lbann/optimizers/adagrad.hpp" #include "lbann/optimizers/adam.hpp" // #include "lbann/optimizers/hypergradient_adam.hpp" -// #include "lbann/optimizers/rmsprop.hpp" +#include "lbann/optimizers/rmsprop.hpp" #include "lbann/optimizers/sgd.hpp" /// Objective functions diff --git a/include/lbann/optimizers/adagrad.hpp b/include/lbann/optimizers/adagrad.hpp index 6f827ecbe9c..95473fde883 100644 --- a/include/lbann/optimizers/adagrad.hpp +++ b/include/lbann/optimizers/adagrad.hpp @@ -42,7 +42,7 @@ namespace lbann { class adagrad : public optimizer { public: - adagrad(lbann_comm *comm, + adagrad(lbann_comm* comm, DataType learning_rate, DataType eps = DataType(1e-8)); adagrad(const adagrad& other); diff --git a/include/lbann/optimizers/rmsprop.hpp b/include/lbann/optimizers/rmsprop.hpp index e34b21de13d..d1a4bb35731 100644 --- a/include/lbann/optimizers/rmsprop.hpp +++ b/include/lbann/optimizers/rmsprop.hpp @@ -32,11 +32,15 @@ namespace lbann { -/** RMSprop optimizer. */ +/** RMSprop optimizer. + * + * See + * https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf. + */ class rmsprop : public optimizer { public: - rmsprop(lbann_comm *comm, + rmsprop(lbann_comm* comm, DataType learning_rate, DataType decay_rate, DataType eps = DataType(1e-8)); @@ -50,15 +54,13 @@ class rmsprop : public optimizer { /** Human-readable description. */ description get_description() const override; - /** Setup optimizer. */ - void setup(weights& w) override; + void setup(weights* w = nullptr) override; - /** Perform the computation in an optimization step. */ - void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; -#ifdef LBANN_HAS_CUDA - /** Perform the computation in an optimization step on GPU. */ - void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) override; -#endif // LBANN_HAS_CUDA +protected: + + /** Computation for an optimization step. */ + void step_compute(AbsDistMat& values, + const AbsDistMat& gradient) override; private: @@ -69,9 +71,16 @@ class rmsprop : public optimizer { /** RMSprop cache. */ std::unique_ptr m_cache; -//************************************************************************ -// Checkpointing -//************************************************************************ + /** CPU implementation of optimization step. */ + void step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient); +#ifdef LBANN_HAS_CUDA + /** GPU implementation of optimization step. */ + void step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient); +#endif // LBANN_HAS_CUDA + + // =========================================== + // Checkpointing + // =========================================== struct packing_header { DataType decay_rate; diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index e4a6d20a3e7..ff3c4d2d2b1 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -4,7 +4,7 @@ set_full_path(THIS_DIR_SOURCES adam.cpp # hypergradient_adam.cpp optimizer.cpp -# rmsprop.cpp + rmsprop.cpp sgd.cpp ) @@ -13,7 +13,7 @@ if (LBANN_HAS_CUDA) set_full_path(THIS_DIR_CU_SOURCES adagrad.cu adam.cu -# rmsprop.cu + rmsprop.cu sgd.cu ) endif () diff --git a/src/optimizers/adagrad.cpp b/src/optimizers/adagrad.cpp index 21d7d8c654e..b42ed735aa2 100644 --- a/src/optimizers/adagrad.cpp +++ b/src/optimizers/adagrad.cpp @@ -76,11 +76,11 @@ void adagrad::step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient) { // Get local matrix data const size_t local_height = values.LocalHeight(); const size_t local_width = values.LocalWidth(); - DataType* __restrict__ values_buffer = values.Buffer(); + auto* __restrict__ values_buffer = values.Buffer(); const size_t values_ldim = values.LDim(); - const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); + const auto* __restrict__ gradient_buffer = gradient.LockedBuffer(); const size_t gradient_ldim = gradient.LDim(); - DataType* __restrict__ cache_buffer = m_cache->Buffer(); + auto* __restrict__ cache_buffer = m_cache->Buffer(); const size_t cache_ldim = m_cache->LDim(); // Apply AdaGrad step diff --git a/src/optimizers/rmsprop.cpp b/src/optimizers/rmsprop.cpp index 14057e0ae42..e28dc17a481 100644 --- a/src/optimizers/rmsprop.cpp +++ b/src/optimizers/rmsprop.cpp @@ -58,53 +58,58 @@ description rmsprop::get_description() const { return desc; } -void rmsprop::setup(weights& w) { +void rmsprop::setup(weights* w) { optimizer::setup(w); - m_cache.reset(m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root())); - El::Zeros(*m_cache, m_gradient->Height(), m_gradient->Width()); + const auto& gradient = this->get_gradient(); + m_cache.reset(AbsDistMat::Instantiate(gradient.DistData())); + El::Zeros(*m_cache, gradient.Height(), gradient.Width()); } void rmsprop::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { + switch (values.GetLocalDevice()) { + case El::Device::CPU: step_compute_cpu(values, gradient); break; +#ifdef LBANN_HAS_CUDA + case El::Device::GPU: step_compute_gpu(values, gradient); break; +#endif // LBANN_HAS_CUDA + default: + std::ostringstream err; + err << "unsupported device type " + << "(" << static_cast(values.GetLocalDevice()) << ")"; + LBANN_ERROR(err.str()); + } +} + +void rmsprop::step_compute_cpu(AbsDistMat& values, const AbsDistMat& gradient) { // Get local matrix data - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - DataType* __restrict__ values_buffer = values.Buffer(); - const int values_ldim = values.LDim(); - const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - const int gradient_ldim = gradient.LDim(); - DataType* __restrict__ cache_buffer = m_cache->Buffer(); - const int cache_ldim = m_cache->LDim(); - - // Check if matrix data is contiguous - if (values_ldim != local_height - || gradient_ldim != local_height - || cache_ldim != local_height) { - // Update with non-contiguous data - LBANN_OMP_PARALLEL_FOR_COLLAPSE2 - for (int j=0; jBuffer(); + const size_t cache_ldim = m_cache->LDim(); + + // Apply RMSprop step + const auto& learning_rate = get_learning_rate(); + LBANN_OMP_PARALLEL_FOR_COLLAPSE2 + for (size_t col = 0; col < local_width; ++col) { + for (size_t row = 0; row < local_height; ++row) { + auto& x = values_buffer[row+col*values_ldim]; + const auto& g = gradient_buffer[row+col*gradient_ldim]; + auto& c = cache_buffer[row+col*cache_ldim]; c = m_decay_rate * c + (DataType(1) - m_decay_rate) * g * g; - x -= m_learning_rate * g / (std::sqrt(c) + m_eps); + x -= learning_rate * g / (std::sqrt(c) + m_eps); } } + } +// ============================================= +// Checkpointing +// ============================================= + bool rmsprop::save_to_checkpoint_shared(persist& p, std::string name_prefix) { optimizer::save_to_checkpoint_shared(p, name_prefix); diff --git a/src/optimizers/rmsprop.cu b/src/optimizers/rmsprop.cu index d65eaeb49b2..76cfa338f75 100644 --- a/src/optimizers/rmsprop.cu +++ b/src/optimizers/rmsprop.cu @@ -31,20 +31,20 @@ namespace lbann { namespace { -__global__ void rmsprop_kernel(El::Int height, - El::Int width, - DataType learning_rate, - DataType decay_rate, - DataType eps, - DataType * __restrict__ values, - El::Int values_ldim, - const DataType * __restrict__ gradient, - El::Int gradient_ldim, - DataType * __restrict__ cache, - El::Int cache_ldim) { - const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; - const El::Int nthreads = gridDim.x * blockDim.x; - for (El::Int pos = gid; pos < height * width; pos += nthreads) { +__global__ void kernel(size_t height, + size_t width, + DataType learning_rate, + DataType decay_rate, + DataType eps, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ cache, + size_t cache_ldim) { + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + const size_t nthreads = gridDim.x * blockDim.x; + for (size_t pos = gid; pos < height * width; pos += nthreads) { const auto& row = pos % height; const auto& col = pos / height; const auto& g = gradient[row + col * gradient_ldim]; @@ -58,14 +58,16 @@ __global__ void rmsprop_kernel(El::Int height, } // namespace void rmsprop::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { - const El::Int local_height = values.LocalHeight(); - const El::Int local_width = values.LocalWidth(); - const El::Int size = local_height * local_width; - constexpr El::Int block_dim = 256; - const El::Int grid_dim = (size + block_dim - 1) / block_dim; - if (grid_dim > 0) { - rmsprop_kernel<<>>( - local_height, local_width, m_learning_rate, m_decay_rate, m_eps, + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + const size_t local_size = local_height * local_width; + if (local_size > 0) { + constexpr size_t block_size = 256; + const size_t grid_size = (local_size + block_size - 1) / block_size; + auto&& stream = El::GPUManager::Stream(); + kernel<<>>( + local_height, local_width, + this->get_learning_rate(), m_decay_rate, m_eps, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), m_cache->Buffer(), m_cache->LDim()); diff --git a/src/optimizers/sgd.cpp b/src/optimizers/sgd.cpp index f28bc219d3b..b1ebf3b744b 100644 --- a/src/optimizers/sgd.cpp +++ b/src/optimizers/sgd.cpp @@ -104,9 +104,9 @@ void sgd::momentum_step_cpu(AbsDistMat& values, const AbsDistMat& gradient) { const auto& learning_rate = this->get_learning_rate(); const size_t local_height = values.LocalHeight(); const size_t local_width = values.LocalWidth(); - DataType* __restrict__ values_buffer = values.Buffer(); - const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - DataType* __restrict__ velocity_buffer = m_velocity->Buffer(); + auto* __restrict__ values_buffer = values.Buffer(); + const auto* __restrict__ gradient_buffer = gradient.LockedBuffer(); + auto* __restrict__ velocity_buffer = m_velocity->Buffer(); if (values.Contiguous() && gradient.Contiguous() && m_velocity->Contiguous()) { diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index d4af25c890d..8f7b3d21a40 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -47,7 +47,6 @@ optimizer* construct_optimizer(lbann_comm* comm, return new adagrad(comm, params.learn_rate(), params.eps()); } -#if 0 // RMSProp if (proto_opt.has_rmsprop()) { const auto& params = proto_opt.rmsprop(); @@ -56,7 +55,6 @@ optimizer* construct_optimizer(lbann_comm* comm, params.decay_rate(), params.eps()); } -#endif // Adam if (proto_opt.has_adam()) { From 6769749ab193ae88bfdfe7fdc2fbad053f2681f3 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 21 Feb 2019 18:21:01 -0800 Subject: [PATCH 099/443] Refactoring hypergradient Adam optimizer. Adapting to refactored optimizer base class. --- include/lbann/lbann.hpp | 2 +- include/lbann/optimizers/adagrad.hpp | 4 +- .../lbann/optimizers/hypergradient_adam.hpp | 69 +++---- include/lbann/optimizers/rmsprop.hpp | 2 +- src/optimizers/CMakeLists.txt | 2 +- src/optimizers/hypergradient_adam.cpp | 169 ++++++------------ src/proto/factories/optimizer_factory.cpp | 2 - 7 files changed, 95 insertions(+), 155 deletions(-) diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 3bacd228b46..8b3da4d1d83 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -172,7 +172,7 @@ /// Optimizers #include "lbann/optimizers/adagrad.hpp" #include "lbann/optimizers/adam.hpp" -// #include "lbann/optimizers/hypergradient_adam.hpp" +#include "lbann/optimizers/hypergradient_adam.hpp" #include "lbann/optimizers/rmsprop.hpp" #include "lbann/optimizers/sgd.hpp" diff --git a/include/lbann/optimizers/adagrad.hpp b/include/lbann/optimizers/adagrad.hpp index 95473fde883..05a06949246 100644 --- a/include/lbann/optimizers/adagrad.hpp +++ b/include/lbann/optimizers/adagrad.hpp @@ -42,9 +42,7 @@ namespace lbann { class adagrad : public optimizer { public: - adagrad(lbann_comm* comm, - DataType learning_rate, - DataType eps = DataType(1e-8)); + adagrad(lbann_comm* comm, DataType learning_rate, DataType eps = 1e-8); adagrad(const adagrad& other); adagrad& operator=(const adagrad& other); ~adagrad() override = default; diff --git a/include/lbann/optimizers/hypergradient_adam.hpp b/include/lbann/optimizers/hypergradient_adam.hpp index dbf6dd1c27a..18084423486 100644 --- a/include/lbann/optimizers/hypergradient_adam.hpp +++ b/include/lbann/optimizers/hypergradient_adam.hpp @@ -24,55 +24,57 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_OPTIMIZER_HYPERGRADIENT_ADAM_HPP -#define LBANN_OPTIMIZER_HYPERGRADIENT_ADAM_HPP +#ifndef LBANN_OPTIMIZERS_HYPERGRADIENT_ADAM_HPP_INCLUDED +#define LBANN_OPTIMIZERS_HYPERGRADIENT_ADAM_HPP_INCLUDED #include "lbann/optimizers/optimizer.hpp" namespace lbann { /** Hypergradient Adam optimizer. + * Reference: - * Baydin et al. "Online Learning Rate Adaptation with Hypergradient Descent", 2017. + + * Baydin et al. "Online Learning Rate Adaptation with Hypergradient + * Descent", 2017. */ class hypergradient_adam : public optimizer { - public: - - /** Constructor - * @param init_learning_rate Initial Adam learning rate (0.001 reasonable). - * @param hyper_learning_rate Hypergradient learning rate. - * @param beta1 Decay rate for the first moment moving average. - * @param beta2 Decay rate for the second moment moving average. - * @param eps A small value. +public: + + /** @param init_learning_rate Initial Adam learning rate (0.001 is + * reasonable). + * @param hyper_learning_rate Hypergradient learning rate. + * @param beta1 Decay rate for the first moment + * moving average. + * @param beta2 Decay rate for the second moment + * moving average. + * @param eps Small factor to avoid division by + * zero. */ hypergradient_adam(lbann_comm *comm, - DataType init_learning_rate, - DataType hyper_learning_rate = DataType(1e-7), - DataType beta1 = DataType(0.9), - DataType beta2 = DataType(0.99), - DataType eps = DataType(1e-8)); - - /** Copy constructor. */ + DataType init_learning_rate = 1e-3, + DataType hyper_learning_rate = 1e-7, + DataType beta1 = 0.9, + DataType beta2 = 0.99, + DataType eps = 1e-8); hypergradient_adam(const hypergradient_adam& other); - /** Copy assignment operator. */ hypergradient_adam& operator=(const hypergradient_adam& other); - /** Destructor. */ - ~hypergradient_adam() override; - /** Create a copy. */ + ~hypergradient_adam() override = default; hypergradient_adam* copy() const override { return new hypergradient_adam(*this); } - /** Returns the optimizer name. */ + /** Human-readable type name. */ std::string get_type() const override { return "hypergradient Adam"; } /** Human-readable description. */ description get_description() const override; - /** Setup optimizer. */ - void setup(weights& w) override; + void setup(weights* w = nullptr) override; - /** Perform the computation in an optimization step. */ +protected: + + /** Computation for an optimization step. */ void step_compute(AbsDistMat& values, const AbsDistMat& gradient) override; - private: +private: /** Hypergradient learning rate. */ DataType m_hyper_learning_rate; @@ -87,15 +89,16 @@ class hypergradient_adam : public optimizer { /** beta2 ^ iteration. */ DataType m_current_beta2; /** First moment estimates. */ - AbsDistMat *m_moment1; + std::unique_ptr m_moment1; /** Second moment estimates. */ - AbsDistMat *m_moment2; + std::unique_ptr m_moment2; /** Gradient estimate from the prior step (for hypergradient). */ - AbsDistMat *m_old_gradient; + std::unique_ptr m_old_gradient; - //************************************************************************ + // =========================================== // Checkpointing - //************************************************************************ + // =========================================== + /* struct used to serialize mode fields in file and MPI transfer */ struct packing_header { DataType hyper_learning_rate; @@ -154,4 +157,4 @@ class hypergradient_adam : public optimizer { } // namespace lbann -#endif // LBANN_OPTIMIZER_HYPERGRADIENT_ADAM_HPP +#endif // LBANN_OPTIMIZER_HYPERGRADIENT_ADAM_HPP_INCLUDED diff --git a/include/lbann/optimizers/rmsprop.hpp b/include/lbann/optimizers/rmsprop.hpp index d1a4bb35731..8b9a0f33594 100644 --- a/include/lbann/optimizers/rmsprop.hpp +++ b/include/lbann/optimizers/rmsprop.hpp @@ -43,7 +43,7 @@ class rmsprop : public optimizer { rmsprop(lbann_comm* comm, DataType learning_rate, DataType decay_rate, - DataType eps = DataType(1e-8)); + DataType eps = 1e-8); rmsprop(const rmsprop& other); rmsprop& operator=(const rmsprop& other); ~rmsprop() override = default; diff --git a/src/optimizers/CMakeLists.txt b/src/optimizers/CMakeLists.txt index ff3c4d2d2b1..bb2d84ada91 100644 --- a/src/optimizers/CMakeLists.txt +++ b/src/optimizers/CMakeLists.txt @@ -2,7 +2,7 @@ set_full_path(THIS_DIR_SOURCES adagrad.cpp adam.cpp -# hypergradient_adam.cpp + hypergradient_adam.cpp optimizer.cpp rmsprop.cpp sgd.cpp diff --git a/src/optimizers/hypergradient_adam.cpp b/src/optimizers/hypergradient_adam.cpp index 1c10649e13c..db3d9762e96 100644 --- a/src/optimizers/hypergradient_adam.cpp +++ b/src/optimizers/hypergradient_adam.cpp @@ -41,10 +41,7 @@ hypergradient_adam::hypergradient_adam(lbann_comm *comm, m_beta2(beta2), m_eps(eps), m_current_beta1(1), - m_current_beta2(1), - m_moment1(nullptr), - m_moment2(nullptr), - m_old_gradient(nullptr) {} + m_current_beta2(1) {} hypergradient_adam::hypergradient_adam(const hypergradient_adam& other) : optimizer(other), @@ -54,13 +51,10 @@ hypergradient_adam::hypergradient_adam(const hypergradient_adam& other) m_eps(other.m_eps), m_current_beta1(other.m_current_beta1), m_current_beta2(other.m_current_beta2), - m_moment1(other.m_moment1), - m_moment2(other.m_moment2), - m_old_gradient(other.m_old_gradient) { - if (m_moment1 != nullptr) { m_moment1 = m_moment1->Copy(); } - if (m_moment2 != nullptr) { m_moment2 = m_moment2->Copy(); } - if (m_old_gradient != nullptr) { m_old_gradient = m_old_gradient->Copy(); } -} + m_moment1(other.m_moment1 ? other.m_moment1->Copy() : nullptr), + m_moment2(other.m_moment2 ? other.m_moment2->Copy() : nullptr), + m_old_gradient(other.m_old_gradient ? + other.m_old_gradient->Copy() : nullptr) {} hypergradient_adam& hypergradient_adam::operator=(const hypergradient_adam& other) { optimizer::operator=(other); @@ -70,45 +64,13 @@ hypergradient_adam& hypergradient_adam::operator=(const hypergradient_adam& othe m_eps = other.m_eps; m_current_beta1 = other.m_current_beta1; m_current_beta2 = other.m_current_beta2; - - // Copy matrices - if (m_moment1 != nullptr && other.m_moment1 != nullptr - && m_moment1->DistData() == other.m_moment1->DistData()) { - El::Copy(*other.m_moment1, *m_moment1); - } - else { - if (m_moment1 != nullptr) { delete m_moment1; } - m_moment1 = other.m_moment1; - if (m_moment1 != nullptr) { m_moment1 = m_moment1->Copy(); } - } - if (m_moment2 != nullptr && other.m_moment2 != nullptr - && m_moment2->DistData() == other.m_moment2->DistData()) { - El::Copy(*other.m_moment2, *m_moment2); - } - else { - if (m_moment2 != nullptr) { delete m_moment2; } - m_moment2 = other.m_moment2; - if (m_moment2 != nullptr) { m_moment2 = m_moment2->Copy(); } - } - if (m_old_gradient != nullptr && other.m_old_gradient != nullptr - && m_old_gradient->DistData() == other.m_old_gradient->DistData()) { - El::Copy(*other.m_old_gradient, *m_old_gradient); - } - else { - if (m_old_gradient != nullptr) { delete m_old_gradient; } - m_old_gradient = other.m_old_gradient; - if (m_old_gradient != nullptr) { m_old_gradient = m_old_gradient->Copy(); } - } - + m_moment1.reset(other.m_moment1 ? other.m_moment1->Copy() : nullptr); + m_moment2.reset(other.m_moment2 ? other.m_moment2->Copy() : nullptr); + m_old_gradient.reset(other.m_old_gradient ? + other.m_old_gradient->Copy() : nullptr); return *this; } -hypergradient_adam::~hypergradient_adam() { - if(m_moment1 != nullptr) { delete m_moment1; } - if(m_moment2 != nullptr) { delete m_moment2; } - if(m_old_gradient != nullptr) { delete m_old_gradient; } -} - description hypergradient_adam::get_description() const { auto&& desc = optimizer::get_description(); desc.add("Hypergradient learning rate", m_hyper_learning_rate); @@ -118,21 +80,22 @@ description hypergradient_adam::get_description() const { return desc; } -void hypergradient_adam::setup(weights& w) { +void hypergradient_adam::setup(weights* w) { optimizer::setup(w); - m_moment1 = m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root()); - m_moment2 = m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root()); - m_old_gradient = m_gradient->Construct(m_gradient->Grid(), - m_gradient->Root()); - El::Zeros(*m_moment1, m_gradient->Height(), m_gradient->Width()); - El::Zeros(*m_moment2, m_gradient->Height(), m_gradient->Width()); - El::Zeros(*m_old_gradient, m_gradient->Height(), m_gradient->Width()); + const auto& gradient = this->get_gradient(); + m_moment1.reset(AbsDistMat::Instantiate(gradient.DistData())); + m_moment2.reset(AbsDistMat::Instantiate(gradient.DistData())); + m_old_gradient.reset(AbsDistMat::Instantiate(gradient.DistData())); + El::Zeros(*m_moment1, gradient.Height(), gradient.Width()); + El::Zeros(*m_moment2, gradient.Height(), gradient.Width()); + El::Zeros(*m_old_gradient, gradient.Height(), gradient.Width()); } void hypergradient_adam::step_compute(AbsDistMat& values, const AbsDistMat& gradient) { + if (values.GetLocalDevice() != El::Device::CPU) { + LBANN_ERROR("hypergradient Adam is only supported on CPU"); + } // Precompute the bias correction. m_current_beta1 *= m_beta1; @@ -141,81 +104,59 @@ void hypergradient_adam::step_compute(AbsDistMat& values, (DataType(1) - m_current_beta1); // Get local matrix data - const int local_height = values.LocalHeight(); - const int local_width = values.LocalWidth(); - DataType* __restrict__ values_buffer = values.Buffer(); - const int values_ldim = values.LDim(); + const size_t local_height = values.LocalHeight(); + const size_t local_width = values.LocalWidth(); + auto* __restrict__ values_buffer = values.Buffer(); + const size_t values_ldim = values.LDim(); const DataType* __restrict__ gradient_buffer = gradient.LockedBuffer(); - const int gradient_ldim = gradient.LDim(); - DataType* __restrict__ moment1_buffer = m_moment1->Buffer(); - const int moment1_ldim = m_moment1->LDim(); - DataType* __restrict__ moment2_buffer = m_moment2->Buffer(); - const int moment2_ldim = m_moment2->LDim(); - DataType* __restrict__ old_gradient_buffer = m_old_gradient->Buffer(); - const int old_gradient_ldim = m_old_gradient->LDim(); + const size_t gradient_ldim = gradient.LDim(); + auto* __restrict__ moment1_buffer = m_moment1->Buffer(); + const size_t moment1_ldim = m_moment1->LDim(); + auto* __restrict__ moment2_buffer = m_moment2->Buffer(); + const size_t moment2_ldim = m_moment2->LDim(); + auto* __restrict__ old_gradient_buffer = m_old_gradient->Buffer(); + const size_t old_gradient_ldim = m_old_gradient->LDim(); // Compute the learning rate update. DataType lr_update = El::Dot(gradient, *m_old_gradient); - m_learning_rate += m_hyper_learning_rate * lr_update; - - // Check if matrix data is contiguous. - if (values_ldim != local_height - || gradient_ldim != local_height - || moment1_ldim != local_height - || moment2_ldim != local_height - || old_gradient_ldim != local_height) { - // Non-contiguous data. - LBANN_OMP_PARALLEL_FOR_COLLAPSE2 - for (int j = 0; j < local_width; ++j) { - for (int i = 0; i < local_height; ++i) { - DataType& x = values_buffer[i+j*values_ldim]; - const DataType g = gradient_buffer[i+j*gradient_ldim] + m_eps; - DataType& m1 = moment1_buffer[i+j*moment1_ldim]; - DataType& m2 = moment2_buffer[i+j*moment2_ldim]; - DataType& old_c = old_gradient_buffer[i+j*old_gradient_ldim]; - m1 = m_beta1 * m1 + (DataType(1) - m_beta1) * g; - m2 = m_beta2 * m2 + (DataType(1) - m_beta2) * g * g; - old_c = correction * m1 / (std::sqrt(m2) + m_eps); - x -= m_learning_rate * old_c; - } - } - } else { - // Contiguous data. - LBANN_OMP_PARALLEL_FOR - for (int i = 0; i < local_height * local_width; ++i) { - DataType& x = values_buffer[i]; - // Add eps here to avoid denormalized floats. - const DataType g = gradient_buffer[i] + m_eps; - DataType& m1 = moment1_buffer[i]; - DataType& m2 = moment2_buffer[i]; - DataType& old_c = old_gradient_buffer[i]; - // Update the first/second moment estimates. + auto learning_rate = this->get_learning_rate(); + learning_rate += m_hyper_learning_rate * lr_update; + this->set_learning_rate(learning_rate); + + // Hypergradient Adam step + LBANN_OMP_PARALLEL_FOR_COLLAPSE2 + for (size_t col = 0; col < local_width; ++col) { + for (size_t row = 0; row < local_height; ++row) { + auto& x = values_buffer[row+col*values_ldim]; + const auto g = gradient_buffer[row+col*gradient_ldim] + m_eps; + auto& m1 = moment1_buffer[row+col*moment1_ldim]; + auto& m2 = moment2_buffer[row+col*moment2_ldim]; + auto& old_c = old_gradient_buffer[row+col*old_gradient_ldim]; m1 = m_beta1 * m1 + (DataType(1) - m_beta1) * g; m2 = m_beta2 * m2 + (DataType(1) - m_beta2) * g * g; - // Compute the unbiased gradient estimate. old_c = correction * m1 / (std::sqrt(m2) + m_eps); - // Parameter update. - x -= m_learning_rate * old_c; + x -= learning_rate * old_c; } } + } bool hypergradient_adam::save_to_checkpoint_shared(persist& p, std::string name_prefix) { if(p.get_cb_type() == callback_type::batch) optimizer::save_to_checkpoint_shared(p,name_prefix); - if (m_comm->am_trainer_master()) { + if (get_comm().am_trainer_master()) { pack_scalars(p); } char l_name[512]; sprintf(l_name, "%s_optimizer_adam_moment1_%lldx%lld", name_prefix.c_str(), m_moment1->Height(), m_moment2->Width()); - p.write_distmat(persist_type::train, l_name, m_moment1); + p.write_distmat(persist_type::train, l_name, m_moment1.get()); sprintf(l_name, "%s_optimizer_adam_moment2_%lldx%lld", name_prefix.c_str(), m_moment2->Height(), m_moment2->Width()); - p.write_distmat(persist_type::train, l_name, m_moment2); + p.write_distmat(persist_type::train, l_name, m_moment2.get()); sprintf(l_name, "%s_optimizer_adam_old_gradient_%lldx%lld", name_prefix.c_str(), m_old_gradient->Height(), m_old_gradient->Width()); - p.write_distmat(persist_type::train, l_name, m_old_gradient); + p.write_distmat(persist_type::train, l_name, m_old_gradient.get()); return true; } @@ -224,23 +165,23 @@ bool hypergradient_adam::load_from_checkpoint_shared(persist& p, std::string nam if(p.get_cb_type() == callback_type::batch) optimizer::load_from_checkpoint_shared(p,name_prefix); struct packing_header header; - if (m_comm->am_trainer_master()) { + if (get_comm().am_trainer_master()) { unpack_scalars(p, &header); } - m_comm->trainer_broadcast(0, header); + get_comm().trainer_broadcast(0, header); unpack_header(header); char l_name[512]; sprintf(l_name, "%s_optimizer_adam_moment1_%lldx%lld.bin", name_prefix.c_str(), m_moment1->Height(), m_moment2->Width()); - p.read_distmat(persist_type::train, l_name, m_moment1); + p.read_distmat(persist_type::train, l_name, m_moment1.get()); sprintf(l_name, "%s_optimizer_adam_moment2_%lldx%lld.bin", name_prefix.c_str(), m_moment2->Height(), m_moment2->Width()); - p.read_distmat(persist_type::train, l_name, m_moment2); + p.read_distmat(persist_type::train, l_name, m_moment2.get()); sprintf(l_name, "%s_optimizer_adam_old_gradient_%lldx%lld.bin", name_prefix.c_str(), m_old_gradient->Height(), m_old_gradient->Width()); - p.read_distmat(persist_type::train, l_name, m_old_gradient); + p.read_distmat(persist_type::train, l_name, m_old_gradient.get()); return true; } diff --git a/src/proto/factories/optimizer_factory.cpp b/src/proto/factories/optimizer_factory.cpp index 8f7b3d21a40..8ba69185168 100644 --- a/src/proto/factories/optimizer_factory.cpp +++ b/src/proto/factories/optimizer_factory.cpp @@ -66,7 +66,6 @@ optimizer* construct_optimizer(lbann_comm* comm, params.eps()); } -#if 0 // Hypergradient Adam if (proto_opt.has_hypergradient_adam()) { const auto& params = proto_opt.hypergradient_adam(); @@ -77,7 +76,6 @@ optimizer* construct_optimizer(lbann_comm* comm, params.beta2(), params.eps()); } -#endif // Return null pointer if no optimizer is specified return nullptr; From 4defdf75113b4e28158bd963485ecb13d5015115 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Fri, 22 Feb 2019 08:44:08 -0800 Subject: [PATCH 100/443] Added a path delimiter to the modify_file_name function so that prepended .s work. --- src/utils/file_utils.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils/file_utils.cpp b/src/utils/file_utils.cpp index a12ddbd2d66..76472794ebd 100644 --- a/src/utils/file_utils.cpp +++ b/src/utils/file_utils.cpp @@ -147,6 +147,7 @@ std::string modify_file_name(const std::string file_name, const std::string tag, name = name + '_' + tag; } + dir = add_delimiter(dir); if(!ext.empty()) { return (dir + name + '.' + ext); }else { From 1c2066f96967a756a36b59b28b55d0084c74cb60 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Fri, 22 Feb 2019 09:37:01 -0800 Subject: [PATCH 101/443] Created an explicit function for opening and caching the file handle. This allows many of the JAG conduit data reader's functions to be made const again. Temporarily disabled the closing of file handles when there is a hash collision. This removes the enforcement of the file descriptor limit. --- .../data_readers/data_reader_jag_conduit.hpp | 14 +++--- .../lbann/data_readers/sample_list_jag.hpp | 49 ++++++++++++++----- .../data_readers/sample_list_jag_impl.hpp | 8 +-- src/data_readers/data_reader_jag_conduit.cpp | 40 ++++++--------- 4 files changed, 62 insertions(+), 49 deletions(-) diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index 4684cb1721c..fb97a47ee2b 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -216,19 +216,19 @@ class data_reader_jag_conduit : public generic_data_reader { std::string get_description() const; /// Return the image simulation output of the i-th sample - std::vector get_cv_images(const size_t i, conduit::Node& sample); + std::vector get_cv_images(const size_t i, conduit::Node& sample) const; /** * Return the images of the i-th sample as an 1-D vector of lbann::DataType * There is one image per view, each of which is taken at closest to the bang time. */ - std::vector get_images(const size_t i, conduit::Node& sample); + std::vector get_images(const size_t i, conduit::Node& sample) const; /// Return the scalar simulation output data of the i-th sample - std::vector get_scalars(const size_t i, conduit::Node& sample); + std::vector get_scalars(const size_t i, conduit::Node& sample) const; /// Return the simulation input parameters of the i-th sample - std::vector get_inputs(const size_t i, conduit::Node& sample); + std::vector get_inputs(const size_t i, conduit::Node& sample) const; template static size_t add_val(const std::string key, const conduit::Node& n, std::vector& vals); @@ -247,7 +247,7 @@ class data_reader_jag_conduit : public generic_data_reader { static std::string to_string(const variable_t t); /// print the schema of the specific sample identified by a given id - void print_schema(const size_t i); + void print_schema(const size_t i) const; void clear_image_normalization_params(); void clear_scalar_normalization_params(); @@ -353,13 +353,13 @@ class data_reader_jag_conduit : public generic_data_reader { /** Load the conduit node with the data of the sample i identified by key * from the file that contains the sample. */ - bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node); + bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const; /// Check if a key exist for sample i bool has_conduit_path(const size_t i, const std::string& key) const; void close_conduit_node(const size_t i); /// Obtain image data - std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample); + std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample) const; bool data_store_active() const { bool flag = generic_data_reader::data_store_active(); diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index c863391c588..22901b01236 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -162,9 +162,7 @@ class sample_list_jag { m_sample_id_map[id] = filename; } - void set_samples_hdf5_handle(sample_id_t id, hid_t h) { - const std::string& filename = m_sample_id_map[id]; - + void set_files_hdf5_handle(const std::string& filename, hid_t h) { int bucket_count = m_open_fd_map.bucket_count(); int bucket = m_open_fd_map.bucket(filename); if(m_open_fd_map.bucket_size(bucket) > 0) { @@ -184,12 +182,12 @@ class sample_list_jag { + "' has a corrupt file descriptor = " + std::to_string(old_h)); } - conduit::relay::io::hdf5_close_file(old_h); - int num_erased = m_open_fd_map.erase(old_filename); - if(num_erased != 1) { - LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename - + "' that had a file descriptor = " + std::to_string(old_h)); - } + // conduit::relay::io::hdf5_close_file(old_h); + // int num_erased = m_open_fd_map.erase(old_filename); + // if(num_erased != 1) { + // LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename + // + "' that had a file descriptor = " + std::to_string(old_h)); + // } } @@ -203,11 +201,36 @@ class sample_list_jag { LBANN_ERROR(std::string{} + " :: the buckets don't match original bucket " + std::to_string(bucket) + " with a count of " + std::to_string(bucket_count) + " and new bucket " + std::to_string(bucket2) + " and a new count of " + std::to_string(bucket_count2)); } - if(m_open_fd_map.bucket_size(bucket) != 1) { - LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " - + std::to_string(bucket) + " not " - + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); + // if(m_open_fd_map.bucket_size(bucket) != 1) { + // LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " + // + std::to_string(bucket) + " not " + // + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); + // } + } + + void set_samples_hdf5_handle(sample_id_t id, hid_t h) { + const std::string& filename = m_sample_id_map[id]; + set_files_hdf5_handle(filename, h); + } + + hid_t open_samples_hdf5_handle(const size_t i) { + const sample_t& s = m_sample_list[i]; + sample_id_t id = s.first; + hid_t h = get_samples_hdf5_handle(id); + if (h <= static_cast(0)) { + const std::string& file_name = get_samples_filename(id); + const std::string conduit_file_path = add_delimiter(get_samples_dirname()) + file_name; + if (file_name.empty() || !check_if_file_exists(conduit_file_path)) { + LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' does not exist."); + } + h = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + if (h <= static_cast(0)) { + LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); + } + set_samples_hdf5_handle(id, h); } + + return h; } void all_gather_archive(const std::string &archive, std::vector& gathered_archive, lbann_comm& comm); diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index 642e4a65ccc..ea3d47190af 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -276,6 +276,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str continue; // skipping the file } + set_files_hdf5_handle(filename, hdf5_file_hnd); + if(m_file_map.count(filename) > 0) { if(sample_names.size() != m_file_map[filename]) { LBANN_ERROR(std::string("The same file ") @@ -308,8 +310,6 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str + std::string(" samples, but found ") + std::to_string(valid_sample_count)); } - - conduit::relay::io::hdf5_close_file(hdf5_file_hnd); } if (m_header.get_num_files() != cnt_files) { @@ -363,6 +363,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str continue; // skipping the file } + set_files_hdf5_handle(filename, hdf5_file_hnd); + if(m_file_map.count(filename) > 0) { if(sample_names.size() != m_file_map[filename]) { LBANN_ERROR(std::string("The same file ") @@ -398,8 +400,6 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str + std::string(" samples, but found ") + std::to_string(valid_sample_count)); } - - conduit::relay::io::hdf5_close_file(hdf5_file_hnd); } if (m_header.get_num_files() != cnt_files) { diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index c5602347934..dd985ed6dd9 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -308,28 +308,16 @@ const conduit::Node& data_reader_jag_conduit::get_conduit_node(const conduit::No return n_base[key]; } -bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) { +bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const { const sample_t& s = m_sample_list[i]; const std::string& sample_name = s.second; const std::string path = sample_name + key; sample_id_t id = s.first; hid_t h = m_sample_list.get_samples_hdf5_handle(id); - const std::string& file_name = m_sample_list.get_samples_filename(id); - if (h <= static_cast(0)) { - const std::string conduit_file_path = add_delimiter(m_sample_list.get_samples_dirname()) + file_name; - if (file_name.empty() || !check_if_file_exists(conduit_file_path)) { - LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' does not exist."); - } - h = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); - if (h <= static_cast(0)) { - LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); - } - m_sample_list.set_samples_hdf5_handle(id, h); - } - - if (!conduit::relay::io::hdf5_has_path(h, path)) { - LBANN_ERROR(get_type() + ":: Cannot open HDF5 path in file " + file_name + \ + if (h <= static_cast(0) || !conduit::relay::io::hdf5_has_path(h, path)) { + const std::string& file_name = m_sample_list.get_samples_filename(id); + LBANN_ERROR(get_type() + ":: Cannot open file " + file_name + \ " for sample "+ sample_name); return false; } @@ -556,7 +544,7 @@ void data_reader_jag_conduit::check_image_data() { return; } - size_t first_idx = m_sample_list.get_indexer().get_partition_offset(); + size_t first_idx = (m_sample_list[0]).first; if (!has_conduit_path(first_idx, "")) { _THROW_LBANN_EXCEPTION_(_CN_, "check_image_data() : no sample by " + m_sample_list[first_idx].second); return; @@ -637,7 +625,7 @@ void data_reader_jag_conduit::check_scalar_keys() { std::set keys_conduit; conduit::Node n_scalar; - size_t first_idx = m_sample_list.get_indexer().get_partition_offset(); + size_t first_idx = (m_sample_list[0]).first; load_conduit_node(first_idx, m_output_scalar_prefix, n_scalar); const std::vector& child_names = n_scalar.child_names(); for (const auto& key: child_names) { @@ -703,7 +691,7 @@ void data_reader_jag_conduit::check_input_keys() { std::map keys_conduit; conduit::Node n_input; - size_t first_idx = m_sample_list.get_indexer().get_partition_offset(); + size_t first_idx = (m_sample_list[0]).first; load_conduit_node(first_idx, "/inputs", n_input); conduit::NodeConstIterator itr = n_input.children(); @@ -1100,7 +1088,7 @@ bool data_reader_jag_conduit::check_non_numeric(const std::string key) { std::vector< std::vector > -data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& sample) { +data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& sample) const { std::vector< std::vector > image_ptrs; image_ptrs.reserve(m_emi_image_keys.size()); @@ -1145,7 +1133,7 @@ void data_reader_jag_conduit::image_normalization(cv::Mat& img, size_t i, size_t img.convertTo(img, -1, tr.first, tr.second); } -std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_id, conduit::Node& sample) { +std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_id, conduit::Node& sample) const { const std::vector< std::vector > img_data(get_image_data(sample_id, sample)); std::vector images; @@ -1183,7 +1171,7 @@ std::vector data_reader_jag_conduit::get_cv_images(const size_t sample_ return images; } -std::vector data_reader_jag_conduit::get_images(const size_t sample_id, conduit::Node& sample) { +std::vector data_reader_jag_conduit::get_images(const size_t sample_id, conduit::Node& sample) const { std::vector< std::vector > img_data(get_image_data(sample_id, sample)); std::vector images; @@ -1221,7 +1209,7 @@ std::vector data_reader_jag_conduit::get_images(c return images; } -std::vector data_reader_jag_conduit::get_scalars(const size_t sample_id, conduit::Node& sample) { +std::vector data_reader_jag_conduit::get_scalars(const size_t sample_id, conduit::Node& sample) const { std::vector scalars; scalars.reserve(m_scalar_keys.size()); @@ -1247,7 +1235,7 @@ std::vector data_reader_jag_conduit::get_scal return scalars; } -std::vector data_reader_jag_conduit::get_inputs(const size_t sample_id, conduit::Node& sample) { +std::vector data_reader_jag_conduit::get_inputs(const size_t sample_id, conduit::Node& sample) const { std::vector inputs; inputs.reserve(m_input_keys.size()); @@ -1407,6 +1395,8 @@ bool data_reader_jag_conduit::fetch_datum(CPUMat& X, int data_id, int mb_idx) { if (data_store_active()) { const conduit::Node& ds_node = m_jag_store->get_conduit_node(data_id); node.set_external(ds_node); + }else { + m_sample_list.open_samples_hdf5_handle(data_id); } for(size_t i = 0u; ok && (i < X_v.size()); ++i) { @@ -1469,7 +1459,7 @@ void data_reader_jag_conduit::save_image(Mat& pixels, const std::string filename internal_save_image(pixels, filename, m_image_height, m_image_width, 1, do_scale); } -void data_reader_jag_conduit::print_schema(const size_t sample_id) { +void data_reader_jag_conduit::print_schema(const size_t sample_id) const { //@TODO revisit later -- don't know how to handle this yet if (m_data_store != nullptr) { return; From 88e11b8336760c09ce944d61e9f785682c496296 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Fri, 22 Feb 2019 17:43:12 -0800 Subject: [PATCH 102/443] Layers to provide mini-batch size and index. --- include/lbann/layers/misc/CMakeLists.txt | 2 + .../lbann/layers/misc/mini_batch_index.hpp | 92 +++++++++++++++++++ include/lbann/layers/misc/mini_batch_size.hpp | 77 ++++++++++++++++ include/lbann/lbann.hpp | 2 + src/proto/factories/layer_factory.cpp | 2 + src/proto/lbann.proto | 4 + 6 files changed, 179 insertions(+) create mode 100644 include/lbann/layers/misc/mini_batch_index.hpp create mode 100644 include/lbann/layers/misc/mini_batch_size.hpp diff --git a/include/lbann/layers/misc/CMakeLists.txt b/include/lbann/layers/misc/CMakeLists.txt index 65f75921f2b..2b5808fdfa7 100644 --- a/include/lbann/layers/misc/CMakeLists.txt +++ b/include/lbann/layers/misc/CMakeLists.txt @@ -3,6 +3,8 @@ set_full_path(THIS_DIR_HEADERS covariance.hpp variance.hpp channelwise_mean.hpp + mini_batch_index.hpp + mini_batch_size.hpp ) # Propagate the files up the tree diff --git a/include/lbann/layers/misc/mini_batch_index.hpp b/include/lbann/layers/misc/mini_batch_index.hpp new file mode 100644 index 00000000000..e7b24675d18 --- /dev/null +++ b/include/lbann/layers/misc/mini_batch_index.hpp @@ -0,0 +1,92 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#ifndef LBANN_LAYERS_MISC_MINI_BATCH_INDEX_HPP_INCLUDED +#define LBANN_LAYERS_MISC_MINI_BATCH_INDEX_HPP_INCLUDED + +#include "lbann/layers/layer.hpp" + +namespace lbann { + +/** @brief Mini-batch index. + * + * Output tensor is a 1D tensor with a single entry containing the + * mini-batch sample. Each sample in a model's mini-batch has a + * unique index in [0, mini_batch_size). + */ +template +class mini_batch_index_layer : public Layer { +public: + + mini_batch_index_layer(lbann_comm* comm) : Layer(comm) { + this->m_expected_num_parent_layers = 0; + } + + mini_batch_index_layer* copy() const override { return new mini_batch_index_layer(*this); } + std::string get_type() const override { return "mini-batch index"; } + data_layout get_data_layout() const override { return Layout; } + El::Device get_device_allocation() const override { return Device; } + +protected: + + void setup_dims() override { + Layer::setup_dims(); + set_output_dims({1}); + } + + void fp_compute() override { + + // Get output matrix + auto& output = get_activations(); + auto& local_output = output.Matrix(); + const auto& local_width = local_output.Width(); + + // Create temporary matrix if output matrix is not on CPU + CPUMat local_output_v; + if (local_output.GetDevice() == El::Device::CPU) { + El::View(local_output_v, local_output); + } else { + local_output_v.Resize(1, local_width); + } + + // Populate matrix on CPU + LBANN_OMP_PARALLEL_FOR + for (El::Int col = 0; col < local_width; ++col) { + local_output_v(0, col) = DataType(output.GlobalCol(col)); + } + + // Copy result from CPU if needed + if (!local_output_v.Viewing()) { + El::Copy(local_output_v, local_output); + } + + } + +}; + +} // namespace lbann + +#endif // LBANN_LAYERS_MISC_MINI_BATCH_INDEX_HPP_INCLUDED diff --git a/include/lbann/layers/misc/mini_batch_size.hpp b/include/lbann/layers/misc/mini_batch_size.hpp new file mode 100644 index 00000000000..a827e15a104 --- /dev/null +++ b/include/lbann/layers/misc/mini_batch_size.hpp @@ -0,0 +1,77 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#ifndef LBANN_LAYERS_MISC_MINI_BATCH_SIZE_HPP_INCLUDED +#define LBANN_LAYERS_MISC_MINI_BATCH_SIZE_HPP_INCLUDED + +#include "lbann/layers/layer.hpp" + +namespace lbann { + +/** @brief Mini-batch size. + * + * Output tensor is a 1D tensor with a single entry containing the + * model's current mini-batch size. + */ +template +class mini_batch_size_layer : public Layer { +public: + + mini_batch_size_layer(lbann_comm* comm) : Layer(comm) { + this->m_expected_num_parent_layers = 0; + } + + mini_batch_size_layer* copy() const override { return new mini_batch_size_layer(*this); } + std::string get_type() const override { return "mini-batch size"; } + data_layout get_data_layout() const override { return Layout; } + El::Device get_device_allocation() const override { return Device; } + +protected: + + void setup_dims() override { + Layer::setup_dims(); + set_output_dims({1}); + } + + void fp_setup_outputs(El::Int mini_batch_size) override { + Layer::fp_setup_outputs(mini_batch_size); + m_mini_batch_size = mini_batch_size; + } + + void fp_compute() override { + El::Fill(get_activations(), DataType(m_mini_batch_size)); + } + +private: + + /** Mini-batch size. */ + El::Int m_mini_batch_size = 0; + +}; + +} // namespace lbann + +#endif // LBANN_LAYERS_MISC_MINI_BATCH_SIZE_HPP_INCLUDED diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 396d67287df..6e62bd04a2e 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -101,6 +101,8 @@ #include "lbann/layers/misc/covariance.hpp" #include "lbann/layers/misc/variance.hpp" #include "lbann/layers/misc/channelwise_mean.hpp" +#include "lbann/layers/misc/mini_batch_index.hpp" +#include "lbann/layers/misc/mini_batch_size.hpp" /// Data readers #include "lbann/data_readers/data_reader_imagenet.hpp" diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 4c8f698d02f..264235a3dc4 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -608,6 +608,8 @@ std::unique_ptr construct_layer( "a data-parallel layout"); } } + CONSTRUCT_LAYER(mini_batch_index); + CONSTRUCT_LAYER(mini_batch_size); // Throw exception if layer has not been constructed err << "could not construct layer " << proto_layer.name(); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 4d0a4ea1c8d..43e7e2c3bbd 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -883,6 +883,8 @@ message Layer { Covariance covariance = 600; Variance variance = 601; ChannelwiseMean channelwise_mean = 602; + MiniBatchIndex mini_batch_index = 603; + MiniBatchSize mini_batch_size = 604; } /////////////////////// @@ -1240,3 +1242,5 @@ message Variance { bool biased = 1; //Whether to use a biased variance estimate } message ChannelwiseMean {} +message MiniBatchIndex {} +message MiniBatchSize {} \ No newline at end of file From 849ce2956cc259f07459679602e317fbd0f8773c Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Fri, 22 Feb 2019 17:52:15 -0800 Subject: [PATCH 103/443] Removing "zero" layer. The zero layer was unintuitive (it doesn't output a zero tensor) and not general. --- include/lbann/layers/transform/zero.hpp | 114 ------------------------ include/lbann/lbann.hpp | 1 - src/proto/factories/layer_factory.cpp | 5 -- src/proto/lbann.proto | 7 -- 4 files changed, 127 deletions(-) delete mode 100644 include/lbann/layers/transform/zero.hpp diff --git a/include/lbann/layers/transform/zero.hpp b/include/lbann/layers/transform/zero.hpp deleted file mode 100644 index 57ec9d4d5d9..00000000000 --- a/include/lbann/layers/transform/zero.hpp +++ /dev/null @@ -1,114 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. -// Produced at the Lawrence Livermore National Laboratory. -// Written by the LBANN Research Team (B. Van Essen, et al.) listed in -// the CONTRIBUTORS file. -// -// LLNL-CODE-697807. -// All rights reserved. -// -// This file is part of LBANN: Livermore Big Artificial Neural Network -// Toolkit. For details, see http://software.llnl.gov/LBANN or -// https://github.com/LLNL/LBANN. -// -// Licensed under the Apache License, Version 2.0 (the "Licensee"); you -// may not use this file except in compliance with the License. You may -// obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the license. -//////////////////////////////////////////////////////////////////////////////// - -#ifndef LBANN_LAYER_ZERO_HPP_INCLUDED -#define LBANN_LAYER_ZERO_HPP_INCLUDED - -#include "lbann/layers/transform/transform.hpp" - -namespace lbann { - -/** @brief - * - * Layer outputs (transform previous activations to) zeros. - * use case: transforms part or all samples in a minibatch to zero - * - * @param first_half output zeros for the first half of minibatch samples if true - * @param second_half output zeros for second half of minibatch samples if true - * @todo Change the name. "zero_layer" is extremely misleading. - * @todo Replace with more general functionality. - */ -template -class zero_layer : public transform_layer { - private: - bool m_first_half; - bool m_second_half; - - public: - zero_layer(lbann_comm *comm, - bool first_half=true, - bool second_half=true) - : transform_layer(comm), - m_first_half(first_half), - m_second_half(second_half) { - - } - zero_layer* copy() const override { return new zero_layer(*this); } - std::string get_type() const override { return "zero"; } - data_layout get_data_layout() const override { return T_layout; } - El::Device get_device_allocation() const override { return Dev; } - - description get_description() const override { - auto&& desc = transform_layer::get_description(); - desc.add("First half", m_first_half); - desc.add("Second half", m_second_half); - return desc; - } - -protected: - - void fp_compute() override { - const auto& input = get_prev_activations(); - const auto& local_input = input.LockedMatrix(); - auto& local_output = get_local_activations(); - const int local_height = local_input.Height(); - const int local_width = local_input.Width(); - for (int col = 0; col < local_width; ++col) { - for (int row = 0; row < local_height; ++row) { - const DataType x = local_input(row, col); - DataType& y = local_output(row, col); - if(m_first_half) - y = input.GlobalCol(col) < local_width/2 ? DataType(0) : x; - if(m_second_half) - y = input.GlobalCol(col) >= local_width/2 ? DataType(0) : x; - } - } - } - - void bp_compute() override { - const auto& input = get_prev_error_signals(); - const auto& local_gradient_wrt_output = get_local_prev_error_signals(); - auto& local_gradient_wrt_input = get_local_error_signals(); - const int local_height = input.LocalHeight(); - const int local_width = input.LocalWidth(); - for (int col = 0; col < local_width; ++col) { - for (int row = 0; row < local_height; ++row) { - const DataType dy = local_gradient_wrt_output(row, col); - DataType& dx = local_gradient_wrt_input(row, col); - if(m_first_half) - dx = input.GlobalCol(col) < local_width/2 ? DataType(0) : dy; - if(m_second_half) - dx = input.GlobalCol(col) >= local_width/2 ? DataType(0) : dy; - } - } - } - - -}; - -} // namespace lbann - -#endif // LBANN_LAYER_ZERO_HPP_INCLUDED diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 6e62bd04a2e..85acf5add94 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -73,7 +73,6 @@ #include "lbann/layers/transform/constant.hpp" #include "lbann/layers/transform/dummy.hpp" #include "lbann/layers/transform/hadamard.hpp" -#include "lbann/layers/transform/zero.hpp" #include "lbann/layers/transform/reduction.hpp" #include "lbann/layers/transform/evaluation.hpp" #include "lbann/layers/transform/gaussian.hpp" diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 264235a3dc4..80b5be52918 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -298,11 +298,6 @@ std::unique_ptr construct_layer( comm, dims, params.min(), params.max()); } } - if (proto_layer.has_zero()) { - const auto& params = proto_layer.zero(); - return lbann::make_unique>( - comm, params.first_half(), params.second_half()); - } if (proto_layer.has_pooling()) { const auto& params = proto_layer.pooling(); const auto& mode_str = params.pool_mode(); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 43e7e2c3bbd..7f51c759e37 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -772,7 +772,6 @@ message Layer { Unpooling unpooling = 304; Hadamard hadamard = 308; Constant constant = 309; - Zero zero = 315; Reduction reduction = 310; Evaluation evaluation = 311; Gaussian gaussian = 312; @@ -1088,12 +1087,6 @@ message Constant { string num_neurons=2; } - -message Zero { - bool first_half=1; //default: true - bool second_half=2; //default: true -} - message Reduction { string mode=1; //"sum" or "average" } From 7aea4afe5c7d2dbb2c2b0d74d6d9eae759763dc1 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Fri, 22 Feb 2019 18:08:36 -0800 Subject: [PATCH 104/443] Removing "zero" layer from model zoo. --- .../gan/mnist/adversarial_model.prototext | 52 +++++++++++++++---- .../gan/mnist/discriminator_model.prototext | 52 +++++++++++++++---- 2 files changed, 84 insertions(+), 20 deletions(-) diff --git a/model_zoo/models/gan/mnist/adversarial_model.prototext b/model_zoo/models/gan/mnist/adversarial_model.prototext index c95dba69ad3..644df4a90f3 100644 --- a/model_zoo/models/gan/mnist/adversarial_model.prototext +++ b/model_zoo/models/gan/mnist/adversarial_model.prototext @@ -83,15 +83,50 @@ model { split {} } + # Divide mini-batch samples into two halves + layer { + name: "mb_index" + mini_batch_index {} + } + layer { + name: "mb_size" + mini_batch_size {} + } + layer { + parents: "mb_index mb_size" + name: "mb_factor" + divide {} + } + layer { + parents: "mb_factor" + name: "in_second_half_scalar" + round {} + } + layer { + parents: "in_second_half_scalar" + name: "in_second_half_scalar3d" + reshape { + dims: "1 1 1" + } + } + layer { + parents: "in_second_half_scalar3d" + name: "in_second_half" + hint_layer: "data" + tessellate {} + } + layer { + parents: "in_second_half" + name: "in_first_half" + not {} + } + #ZERO layer { - parents: "data" + parents: "data in_second_half" name: "zero_data" data_layout: "data_parallel" - zero { - first_half: false - second_half: true - } + multiply {} } @@ -246,13 +281,10 @@ model { #ZERO layer { - parents: "reshape1" + parents: "reshape1 in_first_half" name: "zero_fake" data_layout: "data_parallel" - zero { - first_half:true - second_half:false - } + multiply {} } #SUM diff --git a/model_zoo/models/gan/mnist/discriminator_model.prototext b/model_zoo/models/gan/mnist/discriminator_model.prototext index 98e17c9b396..063390d93dd 100644 --- a/model_zoo/models/gan/mnist/discriminator_model.prototext +++ b/model_zoo/models/gan/mnist/discriminator_model.prototext @@ -76,15 +76,50 @@ model { split {} } + # Divide mini-batch samples into two halves + layer { + name: "mb_index" + mini_batch_index {} + } + layer { + name: "mb_size" + mini_batch_size {} + } + layer { + parents: "mb_index mb_size" + name: "mb_factor" + divide {} + } + layer { + parents: "mb_factor" + name: "in_second_half_scalar" + round {} + } + layer { + parents: "in_second_half_scalar" + name: "in_second_half_scalar3d" + reshape { + dims: "1 1 1" + } + } + layer { + parents: "in_second_half_scalar3d" + name: "in_second_half" + hint_layer: "data" + tessellate {} + } + layer { + parents: "in_second_half" + name: "in_first_half" + not {} + } + #ZERO layer { - parents: "data" + parents: "data in_second_half" name: "zero_data" data_layout: "data_parallel" - zero { - first_half: false - second_half: true - } + multiply {} } @@ -256,13 +291,10 @@ model { #ZERO layer { - parents: "reshape1" + parents: "reshape1 in_first_half" name: "zero_fake" data_layout: "data_parallel" - zero { - first_half: true - second_half: false - } + multiply {} } #SUM From ce46259cae1950dc362cc5efe57d18277789487b Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Mon, 25 Feb 2019 09:38:16 -0800 Subject: [PATCH 105/443] fix errors introduced by merge --- src/utils/lbann_library.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index 9145829f75c..31dc46dbf54 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -197,7 +197,7 @@ std::unique_ptr build_model_from_prototext( } for (auto&& r : data_readers) { if (!r.second) continue; - r.second->setup_data_store(model, pb_model->mini_batch_size()); + r.second->setup_data_store(ret_model.get(), pb_model->mini_batch_size()); } } From 45e7e03deaa8c8d628d761dd9c74f824d83926e8 Mon Sep 17 00:00:00 2001 From: Nikoli Dryden Date: Mon, 25 Feb 2019 14:17:05 -0600 Subject: [PATCH 106/443] Bump CORAL NCCL version in build_lbann_lc.sh. --- scripts/build_lbann_lc.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_lbann_lc.sh b/scripts/build_lbann_lc.sh index ba5a18c06d7..e1cf89cfbc4 100755 --- a/scripts/build_lbann_lc.sh +++ b/scripts/build_lbann_lc.sh @@ -583,7 +583,7 @@ if [ "${CLUSTER}" == "surface" -o "${CORAL}" -eq 1 -o "${CLUSTER}" == "pascal" ] WITH_ALUMINUM=${WITH_ALUMINUM:-ON} ALUMINUM_WITH_NCCL=${ALUMINUM_WITH_NCCL:-ON} if [[ ${CORAL} -eq 1 ]]; then - export NCCL_DIR=/usr/workspace/wsb/brain/nccl2/nccl_2.3.7-1+cuda9.2_ppc64le + export NCCL_DIR=/usr/workspace/wsb/brain/nccl2/nccl_2.4.2-1+cuda9.2_ppc64le module del cuda CUDA_TOOLKIT_MODULE=${CUDA_TOOLKIT_MODULE:-cuda/9.2.148} else From cf2f3c5353f22350619d41cf1e91aeec7ac20605 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 25 Feb 2019 13:35:49 -0800 Subject: [PATCH 107/443] Review suggestions from @ndryden. More descriptive CUDA kernel names. Expanding documentation in `optimizer::add_to_gradient` function. --- src/optimizers/adagrad.cu | 22 ++++++++-------- src/optimizers/adam.cu | 50 ++++++++++++++++++------------------ src/optimizers/optimizer.cpp | 5 +++- src/optimizers/rmsprop.cu | 24 ++++++++--------- 4 files changed, 52 insertions(+), 49 deletions(-) diff --git a/src/optimizers/adagrad.cu b/src/optimizers/adagrad.cu index 6df85552495..568a28a0080 100644 --- a/src/optimizers/adagrad.cu +++ b/src/optimizers/adagrad.cu @@ -30,16 +30,16 @@ namespace lbann { namespace { -__global__ void kernel(size_t height, - size_t width, - DataType learning_rate, - DataType eps, - DataType * __restrict__ values, - size_t values_ldim, - const DataType * __restrict__ gradient, - size_t gradient_ldim, - DataType * __restrict__ cache, - size_t cache_ldim) { +__global__ void adagrad_kernel(size_t height, + size_t width, + DataType learning_rate, + DataType eps, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ cache, + size_t cache_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t pos = gid; pos < height * width; pos += nthreads) { @@ -63,7 +63,7 @@ void adagrad::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; auto&& stream = El::GPUManager::Stream(); - kernel<<>>( + adagrad_kernel<<>>( local_height, local_width, this->get_learning_rate(), m_eps, values.Buffer(), values.LDim(), diff --git a/src/optimizers/adam.cu b/src/optimizers/adam.cu index 3312737e83c..4d4cfd9aec5 100644 --- a/src/optimizers/adam.cu +++ b/src/optimizers/adam.cu @@ -30,20 +30,20 @@ namespace lbann { namespace { -__global__ void noncontiguous_kernel(size_t height, - size_t width, - DataType correction, - DataType eps, - DataType beta1, - DataType beta2, - DataType * __restrict__ values, - size_t values_ldim, - const DataType * __restrict__ gradient, - size_t gradient_ldim, - DataType * __restrict__ moment1, - size_t moment1_ldim, - DataType * __restrict__ moment2, - size_t moment2_ldim) { +__global__ void adam_noncontiguous_kernel(size_t height, + size_t width, + DataType correction, + DataType eps, + DataType beta1, + DataType beta2, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ moment1, + size_t moment1_ldim, + DataType * __restrict__ moment2, + size_t moment2_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < height * width) { const auto& row = gid % height; @@ -58,15 +58,15 @@ __global__ void noncontiguous_kernel(size_t height, } } -__global__ void contiguous_kernel(size_t size, - DataType correction, - DataType eps, - DataType beta1, - DataType beta2, - DataType * __restrict__ values, - const DataType * __restrict__ gradient, - DataType * __restrict__ moment1, - DataType * __restrict__ moment2) { +__global__ void adam_contiguous_kernel(size_t size, + DataType correction, + DataType eps, + DataType beta1, + DataType beta2, + DataType * __restrict__ values, + const DataType * __restrict__ gradient, + DataType * __restrict__ moment1, + DataType * __restrict__ moment2) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < size) { const auto& g = gradient[gid] + eps; @@ -103,12 +103,12 @@ void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { auto&& stream = El::GPUManager::Stream(); if (values.Contiguous() && gradient.Contiguous() && m_moment1->Contiguous() && m_moment2->Contiguous()) { - contiguous_kernel<<>>( + adam_contiguous_kernel<<>>( local_size, correction, m_eps, m_beta1, m_beta2, values.Buffer(), gradient.LockedBuffer(), m_moment1->Buffer(), m_moment2->Buffer()); } else { - noncontiguous_kernel<<>>( + adam_noncontiguous_kernel<<>>( local_height, local_width, correction, m_eps, m_beta1, m_beta2, values.Buffer(), values.LDim(), gradient.LockedBuffer(), gradient.LDim(), diff --git a/src/optimizers/optimizer.cpp b/src/optimizers/optimizer.cpp index 3d5afeac622..3a0819c9d5a 100644 --- a/src/optimizers/optimizer.cpp +++ b/src/optimizers/optimizer.cpp @@ -138,7 +138,10 @@ void optimizer::add_to_gradient(const AbsDistMat& gradient, } if (scale == DataType(0)) { return; } - // Make a view or copy of input matrix in correct distribution + // Make sure input matrix is in correct distribution + // Note: If input matrix is already in correct distribution, just + // make a matrix view. Otherwise redistribute and possibly allreduce + // the matrix. m_gradient_v->Empty(); m_gradient_v->AlignWith(*m_gradient); if (m_gradient_v->DistData() == gradient.DistData()) { diff --git a/src/optimizers/rmsprop.cu b/src/optimizers/rmsprop.cu index 76cfa338f75..4d53511d20b 100644 --- a/src/optimizers/rmsprop.cu +++ b/src/optimizers/rmsprop.cu @@ -31,17 +31,17 @@ namespace lbann { namespace { -__global__ void kernel(size_t height, - size_t width, - DataType learning_rate, - DataType decay_rate, - DataType eps, - DataType * __restrict__ values, - size_t values_ldim, - const DataType * __restrict__ gradient, - size_t gradient_ldim, - DataType * __restrict__ cache, - size_t cache_ldim) { +__global__ void rmsprop_kernel(size_t height, + size_t width, + DataType learning_rate, + DataType decay_rate, + DataType eps, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ cache, + size_t cache_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = gridDim.x * blockDim.x; for (size_t pos = gid; pos < height * width; pos += nthreads) { @@ -65,7 +65,7 @@ void rmsprop::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) { constexpr size_t block_size = 256; const size_t grid_size = (local_size + block_size - 1) / block_size; auto&& stream = El::GPUManager::Stream(); - kernel<<>>( + rmsprop_kernel<<>>( local_height, local_width, this->get_learning_rate(), m_decay_rate, m_eps, values.Buffer(), values.LDim(), From b06b6ca64c7ae1d05bf54a2b34ab8f4f2a7d6afd Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 25 Feb 2019 14:44:37 -0800 Subject: [PATCH 108/443] Optimizing CUDA kernels for momentum optimizer. --- include/lbann/optimizers/sgd.hpp | 16 +++++---- src/optimizers/sgd.cu | 62 +++++++++++++++++++++----------- 2 files changed, 52 insertions(+), 26 deletions(-) diff --git a/include/lbann/optimizers/sgd.hpp b/include/lbann/optimizers/sgd.hpp index 7ee3068b8fa..664682c7478 100644 --- a/include/lbann/optimizers/sgd.hpp +++ b/include/lbann/optimizers/sgd.hpp @@ -38,7 +38,7 @@ namespace lbann { */ class sgd : public optimizer { - public: +public: sgd(lbann_comm *comm, DataType learning_rate, @@ -55,9 +55,9 @@ class sgd : public optimizer { /** Human-readable description. */ description get_description() const override; - /** Velocity for momentum optimizer. */ + /** Accumulated gradients for momentum optimizer. */ const AbsDistMat& get_velocity() const; - /** Velocity for momentum optimizer. */ + /** Accumulated gradients for momentum optimizer. */ AbsDistMat& get_velocity(); void setup(weights* w = nullptr) override; @@ -69,11 +69,15 @@ class sgd : public optimizer { private: - /** Momentum. */ + /** @brief Decay rate for gradient accumulation. + * @detailed A momentum of zero corresponds to vanilla SGD. + */ DataType m_momentum; - /** Nesterov acceleration. */ + /** Whether to apply Nesterov acceleration. */ bool m_nesterov; - /** Velocity for momentum optimizer. */ + /** @brief Accumulated gradients. + * @detailed Not used for vanilla SGD. + */ std::unique_ptr m_velocity; /** CPU implementation of momentum or Nesterov step. */ diff --git a/src/optimizers/sgd.cu b/src/optimizers/sgd.cu index 6917dd684ab..88f40a62207 100644 --- a/src/optimizers/sgd.cu +++ b/src/optimizers/sgd.cu @@ -30,21 +30,20 @@ namespace lbann { namespace { -__global__ void momentum_kernel(size_t height, - size_t width, - DataType learning_rate, - DataType momentum, - DataType * __restrict__ values, - size_t values_ldim, - const DataType * __restrict__ gradient, - size_t gradient_ldim, - DataType * __restrict__ velocity, - size_t velocity_ldim) { +__global__ void momentum_noncontiguous_kernel(size_t height, + size_t width, + DataType learning_rate, + DataType momentum, + DataType * __restrict__ values, + size_t values_ldim, + const DataType * __restrict__ gradient, + size_t gradient_ldim, + DataType * __restrict__ velocity, + size_t velocity_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; - const size_t nthreads = gridDim.x * blockDim.x; - for (size_t pos = gid; pos < height * width; pos += nthreads) { - const auto& row = pos % height; - const auto& col = pos / height; + if (gid < height * width) { + const auto& row = gid % height; + const auto& col = gid / height; const auto& g = gradient[row + col * gradient_ldim]; auto& v = velocity[row + col * velocity_ldim]; auto& x = values[row + col * values_ldim]; @@ -53,6 +52,22 @@ __global__ void momentum_kernel(size_t height, } } +__global__ void momentum_contiguous_kernel(size_t size, + DataType learning_rate, + DataType momentum, + DataType * __restrict__ values, + const DataType * __restrict__ gradient, + DataType * __restrict__ velocity) { + const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; + if (gid < size) { + const auto& g = gradient[gid]; + auto& v = velocity[gid]; + auto& x = values[gid]; + v = momentum * v + g; + x -= learning_rate * v; + } +} + __global__ void nesterov_kernel(size_t height, size_t width, DataType learning_rate, @@ -98,12 +113,19 @@ void sgd::momentum_step_gpu(AbsDistMat& values, const AbsDistMat& gradient) { gradient.LockedBuffer(), gradient.LDim(), m_velocity->Buffer(), m_velocity->LDim()); } else { - momentum_kernel<<>>( - local_height, local_width, - this->get_learning_rate(), m_momentum, - values.Buffer(), values.LDim(), - gradient.LockedBuffer(), gradient.LDim(), - m_velocity->Buffer(), m_velocity->LDim()); + if (values.Contiguous() && gradient.Contiguous() + && m_velocity->Contiguous()) { + momentum_contiguous_kernel<<>>( + local_size, this->get_learning_rate(), m_momentum, + values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer()); + } else { + momentum_noncontiguous_kernel<<>>( + local_height, local_width, + this->get_learning_rate(), m_momentum, + values.Buffer(), values.LDim(), + gradient.LockedBuffer(), gradient.LDim(), + m_velocity->Buffer(), m_velocity->LDim()); + } } } From 3fbfdc2201b975ab1ed2a0ee22d62f5528601437 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Tue, 26 Feb 2019 11:45:00 -0800 Subject: [PATCH 109/443] Show the number of nodes in log and lbplot --- scripts/proto/lbann/plot/parser.py | 2 ++ scripts/proto/lbann/plot/plot.py | 20 ++++++++++++++++++-- src/utils/lbann_library.cpp | 1 + 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/scripts/proto/lbann/plot/parser.py b/scripts/proto/lbann/plot/parser.py index a9dc04b120e..22a0d3228f9 100644 --- a/scripts/proto/lbann/plot/parser.py +++ b/scripts/proto/lbann/plot/parser.py @@ -7,6 +7,8 @@ ('val_loss', 'validation objective function : ([0-9.]+)', lambda r: float(r.group(1))), ('val_acc', 'validation categorical accuracy : ([0-9.]+)', lambda r: float(r.group(1))/100.0), ('val_time', 'validation run time : ([0-9.]+)', lambda r: float(r.group(1))), + ('num_procs', 'Total number of processes\s*:\s*([\d]+)', lambda r: int(r.group(1))), + ('num_procs_on_node', 'Processes on node\s*:\s*([\d]+)', lambda r: int(r.group(1))), ] def parse(file_path): diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index 8b58360d066..82671dd2cb5 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -41,7 +41,7 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', run_name_list = stat_name_list # Create table for comparing trials stat_table = tt.Texttable() - headings = ['Trial', 'Num Epochs', 'Avg. Train Time (s)', 'Avg. Val Time (s)'] + headings = ['Trial', 'Num Procs', 'Num Nodes', 'Num Epochs', 'Avg. Train Time (s)', 'Avg. Val Time (s)'] if plot_accuracy: headings += ['Peak Train Acc', 'Peak Val Acc'] @@ -64,6 +64,22 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', print('ERROR: Invalid file extension: {} from {}\nPlease provide either an LBANN output file with .out or .txt extension or a PyTorch output file with .json extension.'.format(stat_ext, stat_path)) sys.exit(1) + # Total number of processes + def parse_num(d, key): + if key in d.keys(): + assert len(set(d[key])) == 1 + return d[key][0] + else: + return None + + num_procs = parse_num(d, 'num_procs') + num_procs_on_node = parse_num(d, 'num_procs_on_node') + if num_procs is not None and num_procs_on_node is not None: + assert (num_procs % num_procs_on_node) == 0 + num_nodes = int(num_procs / num_procs_on_node) + else : + num_nodes = None + # Total epochs of training total_epochs = len(d['val_time']) @@ -100,7 +116,7 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', stat_dict_list.append((run_name, d)) # Add row to stats table for current trial - stat_table.add_row([run_name, total_epochs, avg_train_time, avg_val_time] \ + stat_table.add_row([run_name, num_procs, num_nodes, total_epochs, avg_train_time, avg_val_time] \ + ([peak_train_acc, peak_val_acc] if plot_accuracy else []) \ + [min_train_loss, min_val_loss]) diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index 7f6a73fd496..88b3412cf26 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -241,6 +241,7 @@ void print_lbann_configuration(lbann_data::Model *pb_model, lbann_comm *comm, in // Report hardware settings std::cout << "Hardware properties (for master process)" << std::endl << " Processes on node : " << comm->get_procs_per_node() << std::endl + << " Total number of processes : " << comm->get_procs_in_world() << std::endl << " OpenMP threads per process : " << omp_get_max_threads() << std::endl << " I/O threads per process (+offset) : " << io_threads_per_process << " (+" << io_threads_offset << ")" << std::endl; From 62eee82b39de795636cda06c18be19da4bc3a49a Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Tue, 26 Feb 2019 11:47:02 -0800 Subject: [PATCH 110/443] Fix a bug when #procs is not provided --- scripts/proto/lbann/plot/plot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index 82671dd2cb5..c8e2233347e 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -66,8 +66,7 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', # Total number of processes def parse_num(d, key): - if key in d.keys(): - assert len(set(d[key])) == 1 + if key in d.keys() and len(set(d[key])) == 1: return d[key][0] else: return None @@ -79,6 +78,7 @@ def parse_num(d, key): num_nodes = int(num_procs / num_procs_on_node) else : num_nodes = None + print('WARNING: No process counts are provided.') # Total epochs of training total_epochs = len(d['val_time']) From bfb8bd306f5593e8a629d704b4726a345ce98023 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 26 Feb 2019 16:09:55 -0800 Subject: [PATCH 111/443] Bugfixes in layer copy constructors. --- include/lbann/layers/learning/base_convolution.hpp | 7 ++++--- include/lbann/layers/transform/pooling.hpp | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/lbann/layers/learning/base_convolution.hpp b/include/lbann/layers/learning/base_convolution.hpp index f8175301b16..5630a3aa3d2 100644 --- a/include/lbann/layers/learning/base_convolution.hpp +++ b/include/lbann/layers/learning/base_convolution.hpp @@ -999,13 +999,14 @@ class base_convolution_layer : public learning_layer { cudnnDataType_t data_type; cudnnTensorFormat_t format; int num_dims; + std::vector dims(1); CHECK_CUDNN(cudnnGetFilterNdDescriptor(src, - 0, + dims.size(), &data_type, &format, &num_dims, - nullptr)); - std::vector dims(num_dims); + dims.data())); + dims.resize(num_dims); CHECK_CUDNN(cudnnGetFilterNdDescriptor(src, num_dims, &data_type, diff --git a/include/lbann/layers/transform/pooling.hpp b/include/lbann/layers/transform/pooling.hpp index b60e17b653c..f7c960c86aa 100644 --- a/include/lbann/layers/transform/pooling.hpp +++ b/include/lbann/layers/transform/pooling.hpp @@ -532,7 +532,7 @@ class pooling_layer : public transform_layer { nullptr)); std::vector dims(num_dims), pads(num_dims), strides(num_dims); CHECK_CUDNN(cudnnGetPoolingNdDescriptor(src, - 0, + num_dims, &mode, &nan_propagation, &num_dims, From 4acb5503c6796ec84854a6756f7ef57225be43bf Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 07:09:07 -0800 Subject: [PATCH 112/443] Changed the handling of open file descriptors to use a list of access times within the epoch and a deque-based heap to track the access sequence of all files for each rank. This will allow for an optimal queuing of file access patterns for a fixed number of open files allowed. The deque-based heap will allow us to close an open file that will be used farthest in the future to maintain a fixed number of open file descriptors. --- .../data_readers/data_reader_jag_conduit.hpp | 2 +- .../lbann/data_readers/sample_list_jag.hpp | 220 +++++++++++++++--- .../data_readers/sample_list_jag_impl.hpp | 70 +++--- src/data_readers/data_reader_jag_conduit.cpp | 19 +- 4 files changed, 244 insertions(+), 67 deletions(-) diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index fb97a47ee2b..38df939c10a 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -356,7 +356,7 @@ class data_reader_jag_conduit : public generic_data_reader { bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const; /// Check if a key exist for sample i bool has_conduit_path(const size_t i, const std::string& key) const; - void close_conduit_node(const size_t i); + // void close_conduit_node(const size_t i); /// Obtain image data std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample) const; diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 22901b01236..4de9d90e78c 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -14,11 +14,15 @@ #include "lbann/utils/file_utils.hpp" #include +#include #include +#include #include #include #include "conduit/conduit_relay_io_hdf5.hpp" +#define LBANN_MAX_OPEN_DATA_FILES 384 + namespace lbann { struct sample_list_header { @@ -75,12 +79,18 @@ class sample_list_jag { /// To describe a sample as a pair of the file to which it belongs and its name // using sample_t = std::pair; using sample_t = std::pair; - using sample_id_map_t = std::string; + /// Map of the file index to the file name, file descriptor, and + /// and a queue of each step and substep when data will be loaded from the file + using sample_id_map_t = std::tuple>>; // rename + // to sample_to_file_map /// Type for the list of samples using samples_t = std::vector< sample_t >; - using samples_id_map_v_t = std::vector< sample_id_map_t >; + using samples_id_map_v_t = std::vector< sample_id_map_t >; // rename to sample_to_file_v or something + /// Type for the map of file descriptors to usage step and substep + using fd_use_map_t = std::pair>; sample_list_jag(); + ~sample_list_jag(); /// Set the number of partitions and clear internal states void set_num_partitions(size_t n); @@ -142,7 +152,7 @@ class sample_list_jag { const sample_t& operator[](size_t idx) const; const std::string& get_samples_filename(sample_id_t id) const { - return m_sample_id_map[id]; + return std::get<0>(m_sample_id_map[id]); } const std::string& get_samples_dirname() const { @@ -150,26 +160,23 @@ class sample_list_jag { } hid_t get_samples_hdf5_handle(sample_id_t id) const { - const std::string& filename = m_sample_id_map[id]; - hid_t h = 0; - if(m_open_fd_map.count(filename) != 0) { - h = m_open_fd_map.at(filename); - } + hid_t h = std::get<1>(m_sample_id_map[id]); return h; } void set_samples_filename(sample_id_t id, const std::string& filename) { - m_sample_id_map[id] = filename; + std::get<0>(m_sample_id_map[id]) = filename; } - void set_files_hdf5_handle(const std::string& filename, hid_t h) { + void set_samples_hdf5_handle(sample_id_t id, hid_t h) { +#if 0 int bucket_count = m_open_fd_map.bucket_count(); int bucket = m_open_fd_map.bucket(filename); - if(m_open_fd_map.bucket_size(bucket) > 0) { - // if(m_open_fd_map.bucket_size(bucket) != 1) { - // LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " - // + std::to_string(bucket)); - // } + if(!allow_collisions && m_open_fd_map.bucket_size(bucket) > 0) { + if(m_open_fd_map.bucket_size(bucket) != 1) { + LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " + + std::to_string(bucket)); + } auto local_it = m_open_fd_map.begin(bucket); if(local_it == m_open_fd_map.end(bucket)) { LBANN_ERROR(std::string{} + " :: bucket '" + std::to_string(bucket) @@ -182,16 +189,28 @@ class sample_list_jag { + "' has a corrupt file descriptor = " + std::to_string(old_h)); } - // conduit::relay::io::hdf5_close_file(old_h); - // int num_erased = m_open_fd_map.erase(old_filename); - // if(num_erased != 1) { - // LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename - // + "' that had a file descriptor = " + std::to_string(old_h)); - // } + conduit::relay::io::hdf5_close_file(old_h); + int num_erased = m_open_fd_map.erase(old_filename); + if(num_erased != 1) { + LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename + + "' that had a file descriptor = " + std::to_string(old_h)); + } } + if(m_open_fd_pq.size() > 100/*LBANN_MAX_OPEN_DATA_FILES*/) { + std::cout << "The file descriptors are over the limit, lets close " << m_open_fd_pq.top().first << std::endl; + while(!m_open_fd_pq.empty()) { + auto e = m_open_fd_pq.top(); + std::cout << "{" << e.first << ", " << e.second << "}" << std::endl; + // std::cout << q.top() << " "; + m_open_fd_pq.pop(); + } + std::cout << '\n'; + } auto result = m_open_fd_map.emplace(filename, h); + m_open_fd_pq.emplace(std::make_pair(filename,access_count)); + int bucket2 = m_open_fd_map.bucket(filename); int bucket_count2 = m_open_fd_map.bucket_count(); if(!result.second) { @@ -201,16 +220,92 @@ class sample_list_jag { LBANN_ERROR(std::string{} + " :: the buckets don't match original bucket " + std::to_string(bucket) + " with a count of " + std::to_string(bucket_count) + " and new bucket " + std::to_string(bucket2) + " and a new count of " + std::to_string(bucket_count2)); } - // if(m_open_fd_map.bucket_size(bucket) != 1) { - // LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " - // + std::to_string(bucket) + " not " - // + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); + if(m_open_fd_map.bucket_size(bucket) != 1) { + LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " + + std::to_string(bucket) + " not " + + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); + } +#endif + + // for (auto&& e : m_sample_id_map) { + auto&& e = m_sample_id_map[id]; + std::get<1>(e) = h; + // std::cout << "Attempt to set the hdf5 handle " << h << " for filename " << std::get<0>(e) << std::endl; + + // std::cout << "set_files_hdf5_handle existing list for " << id << " {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; + // for (auto&& v : std::get<2>(e)) { + // std::cout << "{" << v.first << "," << v.second << "}, "; + // } + // std::cout << std::endl; + + // if(!m_open_fd_pq.empty()) { + // // std::cout << "set_files_hdf5_handle Priotirty QUeue "; + // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // // auto& q = m_open_fd_pq.front(); + // // std::cout << q.first << " {" << q.second.first << "," << q.second.second << "}, "; + // // std::cout << std::endl; // } + + if(!m_open_fd_pq.empty()) { + /// Before we can enqueue the any new access times for this descriptor, remove any + /// earlier descriptor + std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + if(m_open_fd_pq.front().first == id) { + //LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); + m_open_fd_pq.pop_front(); + } + std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + } + + auto& file_access_queue = std::get<2>(e); + if(!file_access_queue.empty()) { + file_access_queue.pop_front(); + if(!file_access_queue.empty()) { + m_open_fd_pq.emplace_back(std::make_pair(id,file_access_queue.front())); + std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // std::cout << "set_files_hdf5_handle New priotirty queue top "; + // auto& q = m_open_fd_pq.front(); + // for(auto&& q: m_open_fd_pq) { + // std::cout << q.first << " {" << q.second.first << "," << q.second.second << "}, "; + // } + // std::cout << std::endl; + } + // std::cout << "set_files_hdf5_handle updated list for " << id << " {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; + // for (auto&& v : std::get<2>(e)) { + // std::cout << "{" << v.first << "," << v.second << "}, "; + // } + // std::cout << std::endl; + } + + // std::get<1>(m_sample_id_map[id]) = h; + // std::cout << "I am setting the hdf5 handle " << h << " for filename " << filename << std::endl; + + // m_open_fd_map.emplace(std::make_tuple(filename, h, access_count)); + // for (auto&& e : m_sample_id_map) { + // std::cout << "set_files_hdf5_handle {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; + // if(std::get<2>(e).empty()) { + // std::cout << "empty" << std::endl; + // }else { + // for (auto&& v : std::get<2>(e)) { + // std::cout << "{" << v.first << "," << v.second << "}, "; + // } + // std::cout << std::endl; + // } + // } + // for (auto&& e : m_sample_id_map) + // std::cout << "{" << std::get<0)>(e) << ", " << std::get<1>(e) << ", " << std::get<2>(e) << "}" << std::endl; + } - void set_samples_hdf5_handle(sample_id_t id, hid_t h) { - const std::string& filename = m_sample_id_map[id]; - set_files_hdf5_handle(filename, h); + void set_files_hdf5_handle(const std::string& filename, hid_t h) { + sample_id_t id = 0; + for (auto&& e : m_sample_id_map) { + if(std::get<0>(e) == filename) { + break; + } + id++; + } + set_samples_hdf5_handle(id, h); } hid_t open_samples_hdf5_handle(const size_t i) { @@ -228,6 +323,56 @@ class sample_list_jag { LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); } set_samples_hdf5_handle(id, h); + }else { + + if(!m_open_fd_pq.empty()) { + /// Before we can enqueue the any new access times for this descriptor, remove any + /// earlier descriptor + std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + if(m_open_fd_pq.front().first == id) { + // LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); + m_open_fd_pq.pop_front(); + } + std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + } + + // std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty() && m_open_fd_pq.front().first != id) { + // LBANN_WARNING("We have weirdness here, the head of the queue is not " + std::to_string(id)); + // } + + auto& e = m_sample_id_map[id]; + + // std::cout << "open_files_hdf5_handle updated list {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; + // for (auto&& v : std::get<2>(e)) { + // std::cout << "{" << v.first << "," << v.second << "}, "; + // } + // std::cout << std::endl; + + // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty()) { + // // std::cout << "open_files_hdf5_handle priority queue :"; + // auto& p = m_open_fd_pq.front(); + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // std::cout << std::endl; + // } + + auto& file_access_queue = std::get<2>(e); + if(!file_access_queue.empty()) { + file_access_queue.pop_front(); + if(!file_access_queue.empty()) { + m_open_fd_pq.emplace_back(std::make_pair(id,file_access_queue.front())); + std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty()) { + // std::cout << "open_files_hdf5_handle new priority queue :"; + // // auto& p = m_open_fd_pq.front(); + // for(auto&& p: m_open_fd_pq) { + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // } + // std::cout << std::endl; + // } + } + } } return h; @@ -237,6 +382,8 @@ class sample_list_jag { template size_t all_gather_field(T data, std::vector& gathered_data, lbann_comm& comm); void all_gather_packed_lists(lbann_comm& comm); + void compute_epochs_file_usage(const std::vector& shufled_indices, int mini_batch_size, const lbann_comm& comm); + protected: /// Reads a header line from the sample list given as a stream, and use the info string for error message @@ -263,6 +410,11 @@ class sample_list_jag { /// Add the header info to the given string void write_header(std::string& sstr, size_t num_files) const; + static bool pq_cmp(fd_use_map_t left, fd_use_map_t right) { + return ((left.second).first < (right.second).first) || + (((left.second).first == (right.second).first) && + ((left.second).second < (right.second).second)); } + private: /// The number of partitions to divide samples into @@ -274,7 +426,7 @@ class sample_list_jag { /// Contains list of all sample samples_t m_sample_list; - /// Maps sample IDs to file names + /// Maps sample IDs to file names, file descriptors, and use counts samples_id_map_v_t m_sample_id_map; /// Maps a global index to a local index @@ -283,8 +435,14 @@ class sample_list_jag { /// Track the number of samples per file std::unordered_map m_file_map; - /// Track the number of open file descriptors - std::unordered_map m_open_fd_map; + /// Track the number of open file descriptors and how many times + /// each file descriptor will be used + // std::unordered_map m_open_fd_map; + // std::set> m_open_fd_map; + // Using lambda to compare elements. + // auto cmp = [](std::pair left,std::pair right) { return (left.second) > (right.second);}; + // std::priority_queue, std::function> m_open_fd_pq; + std::deque m_open_fd_pq; }; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index ea3d47190af..c19440660bd 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -20,8 +20,6 @@ #include #include -#define LBANN_MAX_OPEN_DATA_FILES 768 - namespace lbann { inline sample_list_header::sample_list_header() @@ -77,13 +75,14 @@ inline size_t sample_list_indexer::get_partition_offset() const { inline sample_list_jag::sample_list_jag() -: m_num_partitions(1u) { - /// Create an unordered map that will not rehash and has a fixed - /// number of buckets. This allows the sample list to easily select - /// a file descriptor for closing - m_open_fd_map.reserve(LBANN_MAX_OPEN_DATA_FILES); - m_open_fd_map.rehash(LBANN_MAX_OPEN_DATA_FILES); - m_open_fd_map.max_load_factor(std::numeric_limits::max()); + : m_num_partitions(1u) {} + +inline sample_list_jag::~sample_list_jag() { + // Close the existing open files + // for(auto f : m_open_fd_map) { + // conduit::relay::io::hdf5_close_file(f.second); + // } + // m_open_fd_map.clear(); } inline void sample_list_jag::set_num_partitions(size_t n) { @@ -276,8 +275,6 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str continue; // skipping the file } - set_files_hdf5_handle(filename, hdf5_file_hnd); - if(m_file_map.count(filename) > 0) { if(sample_names.size() != m_file_map[filename]) { LBANN_ERROR(std::string("The same file ") @@ -292,7 +289,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str } sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(filename); + m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); + set_files_hdf5_handle(filename, hdf5_file_hnd); size_t valid_sample_count = 0u; for(auto s : sample_names) { @@ -363,8 +361,6 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str continue; // skipping the file } - set_files_hdf5_handle(filename, hdf5_file_hnd); - if(m_file_map.count(filename) > 0) { if(sample_names.size() != m_file_map[filename]) { LBANN_ERROR(std::string("The same file ") @@ -381,7 +377,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str std::unordered_set set_of_samples(sample_names.begin(), sample_names.end()); sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(filename); + m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); + set_files_hdf5_handle(filename, hdf5_file_hnd); size_t valid_sample_count = 0u; while(!sstr.eof()) { @@ -524,18 +521,20 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { std::vector per_rank_sample_id_map(num_ranks); std::vector> per_rank_file_map(num_ranks); + // Close the existing open files + for(auto&& e : m_sample_id_map) { + conduit::relay::io::hdf5_close_file(std::get<1>(e)); + std::get<1>(e) = 0; + std::get<2>(e).clear(); + } + m_open_fd_pq.clear(); + size_t num_samples = all_gather_field(m_sample_list, per_rank_samples, comm); size_t num_ids = all_gather_field(m_sample_id_map, per_rank_sample_id_map, comm); size_t num_files = all_gather_field(m_file_map, per_rank_file_map, comm); - // Close the existing open files - for(auto f : m_open_fd_map) { - conduit::relay::io::hdf5_close_file(f.second); - } - m_sample_list.clear(); m_sample_id_map.clear(); - m_open_fd_map.clear(); m_sample_list.reserve(num_samples); m_sample_id_map.reserve(num_ids); @@ -547,18 +546,18 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { const std::unordered_map& file_map = per_rank_file_map[r]; for (const auto& s : sample_list) { sample_id_t index = s.first; - const std::string& filename = sample_id_map[index]; + const std::string& filename = std::get<0>(sample_id_map[index]); if(index >= m_sample_id_map.size() - || (m_sample_id_map.back() != filename)) { + || (std::get<0>(m_sample_id_map.back()) != filename)) { index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(filename); + m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); // Update the file map structure if(m_file_map.count(filename) == 0) { m_file_map[filename] = file_map.at(filename); } }else { for(size_t i = 0; i < m_sample_id_map.size(); i++) { - if(filename == m_sample_id_map[i]) { + if(filename == std::get<0>(m_sample_id_map[i])) { index = i; break; } @@ -571,6 +570,25 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { return; } +inline void sample_list_jag::compute_epochs_file_usage(const std::vector& shuffled_indices, int mini_batch_size, const lbann_comm& comm) { + for (auto&& e : m_sample_id_map) { + std::get<1>(e) = 0; + std::get<2>(e).clear(); + } + + for (size_t i = 0; i < shuffled_indices.size(); i++) { + int idx = shuffled_indices[i]; + const auto& s = m_sample_list[idx]; + sample_id_t index = s.first; + + if((i % mini_batch_size) % comm.get_procs_per_trainer() == static_cast(comm.get_rank_in_trainer())) { + /// Enqueue the iteration step when the sample will get used + int step = i / mini_batch_size; + int substep = (i % mini_batch_size) / comm.get_procs_per_trainer(); + std::get<2>(m_sample_id_map[index]).emplace_back(std::make_pair(step, substep)); + } + } +} inline void sample_list_jag::clear() { m_num_partitions = 1u; @@ -612,7 +630,7 @@ inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { std::map> tmp_file_map; for (const auto& s : m_sample_list) { - std::string filename = m_sample_id_map[s.first]; + std::string filename = std::get<0>(m_sample_id_map[s.first]); tmp_file_map[filename].emplace_back(s.second); } diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index dd985ed6dd9..228ba35480b 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -126,6 +126,7 @@ void data_reader_jag_conduit::shuffle_indices(rng_gen& gen) { return; } generic_data_reader::shuffle_indices(gen); + m_sample_list.compute_epochs_file_usage(get_shuffled_indices(), get_mini_batch_size(), *m_comm); } int data_reader_jag_conduit::compute_max_num_parallel_readers() { @@ -327,16 +328,16 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin return true; } -void data_reader_jag_conduit::close_conduit_node(const size_t i) { - const sample_t& s = m_sample_list[i]; +// void data_reader_jag_conduit::close_conduit_node(const size_t i) { +// const sample_t& s = m_sample_list[i]; - sample_id_t id = s.first; - hid_t h = m_sample_list.get_samples_hdf5_handle(id); - if (h > static_cast(0)) { - conduit::relay::io::hdf5_close_file(h); - m_sample_list.set_samples_hdf5_handle(id, 0); - } -} +// sample_id_t id = s.first; +// hid_t h = m_sample_list.get_samples_hdf5_handle(id); +// if (h > static_cast(0)) { +// conduit::relay::io::hdf5_close_file(h); +// m_sample_list.set_samples_hdf5_handle(id, 0); +// } +// } bool data_reader_jag_conduit::has_conduit_path(const size_t i, const std::string& key) const { const sample_t& s = m_sample_list[i]; From ffddf6220ac2e4ad1a2d0cfe2d61183dd1702d28 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 27 Feb 2019 07:53:22 -0800 Subject: [PATCH 113/443] Add the savefig argument --- scripts/proto/lbann/plot/plot.py | 10 +++++++--- scripts/proto/scripts/plot/lbplot | 4 +++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index c8e2233347e..f482ec145d3 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -25,7 +25,7 @@ def _get_time_axis(time_list, units='hours'): return time_axis def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', - plot_accuracy=True, merge_train_val=False, pretty_ylim=True): + plot_accuracy=True, merge_train_val=False, pretty_ylim=True, savefig=None): """Tabulate and plot stats from LBANN or PyTorch training in common format.""" ### Load stat dicts and print stat summary stat_dict_list = [] @@ -183,5 +183,9 @@ def parse_num(d, key): # Legend position will likely only be good for the test example # plt.legend(loc=(0.25, 1.22)) plt.legend() - # Show the plot - plt.show() + + if savefig is None: + # Show the plot + plt.show() + else: + plt.savefig(savefig) diff --git a/scripts/proto/scripts/plot/lbplot b/scripts/proto/scripts/plot/lbplot index ccb3d711afe..d7d4c98b1b7 100755 --- a/scripts/proto/scripts/plot/lbplot +++ b/scripts/proto/scripts/plot/lbplot @@ -23,13 +23,15 @@ def main(): parser.add_argument("--pretty-ylim", dest="pretty_ylim", action="store_const", const=True, default=False, help="Set ylim to [0,1] for accuracy plots and [0,{}] for loss plots".format(PRETTY_YLIM_LOSS)) + parser.add_argument('--savefig', type=str, default='') args = parser.parse_args() # Tabulate and plot stats from user input files plot(args.stat_path, args.stat_name, ind_var=args.ind_var, time_units=args.time_units, plot_accuracy=(not args.no_accuracy), merge_train_val=args.merge_train_val, - pretty_ylim=args.pretty_ylim) + pretty_ylim=args.pretty_ylim, + savefig=args.savefig if args.savefig != '' else None) if __name__=='__main__': From b6cc308943cc46d4604bfc5859fe1ba1f544891c Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 27 Feb 2019 08:28:33 -0800 Subject: [PATCH 114/443] Add the save-csv argument --- scripts/proto/lbann/plot/plot.py | 25 +++++++++++++++++++------ scripts/proto/scripts/plot/lbplot | 6 ++++-- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index f482ec145d3..3f4808fda8e 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -4,6 +4,7 @@ import json import matplotlib.pyplot as plt import texttable as tt +import pandas as pd # Local imports from . import parser @@ -25,7 +26,7 @@ def _get_time_axis(time_list, units='hours'): return time_axis def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', - plot_accuracy=True, merge_train_val=False, pretty_ylim=True, savefig=None): + plot_accuracy=True, merge_train_val=False, pretty_ylim=True, save_fig=None, save_csv=None): """Tabulate and plot stats from LBANN or PyTorch training in common format.""" ### Load stat dicts and print stat summary stat_dict_list = [] @@ -49,6 +50,8 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', stat_table.header(headings) # Loop through each trial + rows = [] + row_names = [] for run_name, stat_path in zip(run_name_list, stat_path_list): # Load stat file stat_ext = os.path.splitext(stat_path)[1] @@ -116,9 +119,14 @@ def parse_num(d, key): stat_dict_list.append((run_name, d)) # Add row to stats table for current trial - stat_table.add_row([run_name, num_procs, num_nodes, total_epochs, avg_train_time, avg_val_time] \ - + ([peak_train_acc, peak_val_acc] if plot_accuracy else []) \ - + [min_train_loss, min_val_loss]) + row = [run_name, num_procs, num_nodes, total_epochs, avg_train_time, avg_val_time] \ + + ([peak_train_acc, peak_val_acc] if plot_accuracy else []) \ + + [min_train_loss, min_val_loss] + rows.append(row) + row_names.append(run_name) + + for row in rows: + stat_table.add_row(row) # Print the stats table print() @@ -184,8 +192,13 @@ def parse_num(d, key): # plt.legend(loc=(0.25, 1.22)) plt.legend() - if savefig is None: + if save_fig is None: # Show the plot plt.show() else: - plt.savefig(savefig) + plt.savefig(save_fig) + + if save_csv is not None: + df = pd.DataFrame([dict(zip(headings, row)) for row in rows], + index=row_names) + df.to_csv(save_csv) diff --git a/scripts/proto/scripts/plot/lbplot b/scripts/proto/scripts/plot/lbplot index d7d4c98b1b7..6105edc9693 100755 --- a/scripts/proto/scripts/plot/lbplot +++ b/scripts/proto/scripts/plot/lbplot @@ -23,7 +23,8 @@ def main(): parser.add_argument("--pretty-ylim", dest="pretty_ylim", action="store_const", const=True, default=False, help="Set ylim to [0,1] for accuracy plots and [0,{}] for loss plots".format(PRETTY_YLIM_LOSS)) - parser.add_argument('--savefig', type=str, default='') + parser.add_argument('--save-fig', type=str, default='') + parser.add_argument('--save-csv', type=str, default='') args = parser.parse_args() # Tabulate and plot stats from user input files @@ -31,7 +32,8 @@ def main(): plot_accuracy=(not args.no_accuracy), merge_train_val=args.merge_train_val, pretty_ylim=args.pretty_ylim, - savefig=args.savefig if args.savefig != '' else None) + save_fig=args.save_fig if args.save_fig != '' else None, + save_csv=args.save_csv if args.save_csv != '' else None) if __name__=='__main__': From cca484f3b033a7f4b6d2108ca95459d23d1ffc90 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 27 Feb 2019 08:35:08 -0800 Subject: [PATCH 115/443] Add a dependency to setup.py --- scripts/proto/setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/proto/setup.py b/scripts/proto/setup.py index f0a613f64c8..e9f638539dc 100755 --- a/scripts/proto/setup.py +++ b/scripts/proto/setup.py @@ -32,7 +32,8 @@ def getLBANNVersion(): "matplotlib>=2.0.2", "graphviz>=0.10.1", "texttable>=1.4.0", - "nose>=1.3.7"], + "nose>=1.3.7", + "pandas>=0.24.1"], test_suite="nose.collector", tests_require=["nose"], include_package_data=True From d95b3a6230f9c2a4f7ee3fde3c6e925d49c1e72b Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 27 Feb 2019 09:52:01 -0800 Subject: [PATCH 116/443] Add descriptions to the new lbplot arguments --- scripts/proto/scripts/plot/lbplot | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/proto/scripts/plot/lbplot b/scripts/proto/scripts/plot/lbplot index 6105edc9693..db2bc1429d4 100755 --- a/scripts/proto/scripts/plot/lbplot +++ b/scripts/proto/scripts/plot/lbplot @@ -23,8 +23,10 @@ def main(): parser.add_argument("--pretty-ylim", dest="pretty_ylim", action="store_const", const=True, default=False, help="Set ylim to [0,1] for accuracy plots and [0,{}] for loss plots".format(PRETTY_YLIM_LOSS)) - parser.add_argument('--save-fig', type=str, default='') - parser.add_argument('--save-csv', type=str, default='') + parser.add_argument('--save-fig', type=str, default='', + help="Save the training/validation curve plot as an image file.") + parser.add_argument('--save-csv', type=str, default='', + help="Save the sumamry table as a CSV file.") args = parser.parse_args() # Tabulate and plot stats from user input files From a2ef1e34c3d85872a7147758fb750e435c6d5820 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 27 Feb 2019 10:24:43 -0800 Subject: [PATCH 117/443] All member variables of model class are private. Reducing verbosity for epoch/step access functions. --- include/lbann/models/model.hpp | 320 +++++----- model_zoo/lbann.cpp | 4 + src/callbacks/callback_check_dataset.cpp | 6 +- src/callbacks/callback_check_init.cpp | 2 +- src/callbacks/callback_checknan.cpp | 8 +- src/callbacks/callback_checkpoint.cpp | 10 +- src/callbacks/callback_checksmall.cpp | 6 +- src/callbacks/callback_confusion_matrix.cpp | 4 +- src/callbacks/callback_debug.cpp | 13 +- src/callbacks/callback_debug_io.cpp | 29 +- src/callbacks/callback_dump_error_signals.cpp | 4 +- src/callbacks/callback_dump_gradients.cpp | 4 +- ...callback_dump_minibatch_sample_indices.cpp | 17 +- src/callbacks/callback_dump_outputs.cpp | 13 +- src/callbacks/callback_dump_weights.cpp | 2 +- src/callbacks/callback_imcomm.cpp | 6 +- src/callbacks/callback_io.cpp | 4 +- src/callbacks/callback_learning_rate.cpp | 20 +- src/callbacks/callback_ltfb.cpp | 6 +- src/callbacks/callback_perturb_adam.cpp | 2 +- src/callbacks/callback_print.cpp | 4 +- src/callbacks/callback_replace_weights.cpp | 4 +- src/callbacks/callback_save_images.cpp | 8 +- src/callbacks/callback_save_model.cpp | 4 +- src/callbacks/callback_summary.cpp | 22 +- src/callbacks/callback_timer.cpp | 6 +- src/callbacks/callback_variable_minibatch.cpp | 10 +- src/callbacks/profiler.cpp | 22 +- src/data_readers/data_reader.cpp | 4 +- src/data_readers/data_reader_jag_conduit.cpp | 4 +- src/data_readers/data_reader_moving_mnist.cpp | 4 +- src/data_store/data_store_jag.cpp | 2 +- src/data_store/generic_data_store.cpp | 2 +- src/models/model.cpp | 557 +++++++++--------- 34 files changed, 559 insertions(+), 574 deletions(-) diff --git a/include/lbann/models/model.hpp b/include/lbann/models/model.hpp index 0041167c37a..009579c5dbc 100644 --- a/include/lbann/models/model.hpp +++ b/include/lbann/models/model.hpp @@ -53,6 +53,10 @@ class lbann_callback; class model { public: + // =========================================== + // Life cycle functions + // =========================================== + model(lbann_comm *comm, El::Int mini_batch_size, objective_function *obj_fn, @@ -62,58 +66,37 @@ class model { virtual ~model(); virtual model* copy() const = 0; - /** Return model type's name. - * - * The model type name should be a brief, human-readable - * description. + // =========================================== + // Access functions + // =========================================== + + /** @brief Model type's name. + * @detailed Should be a brief, human-readable description of the + * model's architecture. */ virtual std::string get_type() const = 0; - /** Set model instance's name. - * - * Each model should have a unique, preferably human-readable, - * name. + /** @brief Model instance name. + * @detailed Each model in a trainer should have a unique, and + * preferably human-readable, name. + */ + std::string get_name() const noexcept { return m_name; } + /** @brief Model instance name. + * @detailed Each model in a trainer should have a unique, and + * preferably human-readable, name. */ void set_name(std::string name); - /** Return model instance's name. */ - std::string get_name() const { return m_name; } - /** Human-readable description. */ virtual description get_description() const; - /** Set up the model. */ - virtual void setup(std::shared_ptr io_thread_pool); - - /** Add layer to model. */ - virtual void add_layer(std::unique_ptr l); - - /** Add weights to model. */ - void add_weights(weights *w); - - /** Register a new callback for the model. */ - void add_callback(lbann_callback *cb); - - /** Get the list of callbacks for the model. */ - virtual std::vector& get_callbacks() { - return m_callbacks; - } - - /** Register a new metric for the model. */ - void add_metric(metric *m); - - /** Construct an instance of the default optimizer. - * If there is no default optimizer, a null pointer is returned. - */ - optimizer* create_optimizer() const; - /** Return the model's objective function. */ objective_function* get_objective_function() const { return m_objective_function; } /** Return the model's metrics. */ - virtual const std::vector& get_metrics() const { + virtual const std::vector& get_metrics() const { return m_metrics; } @@ -136,13 +119,10 @@ class model { std::vector get_weights(); - /** Replace the model's weights. */ - void replace_weights(std::vector& w); - - /** Copy trained weights from input parameter w. - * Only weight values are placed, pointers and layer structure are in place. - * Weights to be copied are of the same name */ - void copy_trained_weights_from(std::vector& w); + /** Get the list of callbacks for the model. */ + virtual std::vector& get_callbacks() { + return m_callbacks; + } /** Return the I/O thread pool */ std::shared_ptr get_io_thread_pool() { return m_io_thread_pool; } @@ -152,31 +132,22 @@ class model { return m_comm; } - /** Get the current epoch for the model. */ - inline int get_cur_epoch() const { - return m_current_epoch; - } - /** Get the current step for the model. */ - inline int get_cur_step() const { - return m_current_step; /// @todo This should be renamed to get_cur_training step and replaced with one that returns the current based on execution mode - } + void set_execution_mode(execution_mode mode); + execution_mode get_execution_mode() const noexcept; + + /** Number of times the training set has been traversed. */ + inline El::Int get_epoch() const noexcept { return m_epoch; } + + /** @brief Current mini-batch step for current execution mode. + * @detailed Step counts are not reset after each epoch. + */ + El::Int get_step() const noexcept; + + /** @brief Current mini-batch step for given execution mode. + * @detailed Step counts are not reset after each epoch. + */ + El::Int get_step(execution_mode mode) const noexcept; - /** Get the current validation step for the model. */ - inline int get_cur_validation_step() const { - return m_current_validation_step; - } - /** Get the current testing step for the model. */ - inline int get_cur_testing_step() const { - return m_current_testing_step; - } - /** Set the model (and all layers') execution mode. */ - inline void set_execution_mode(execution_mode mode) { - m_execution_mode = mode; - } - /** Get the model's execution mode. */ - inline execution_mode get_execution_mode() const { - return m_execution_mode; - } /** Set the model's current mini-batch size. */ inline void set_current_mini_batch_size(int mini_batch_size) { m_current_mini_batch_size = mini_batch_size; @@ -204,17 +175,6 @@ class model { return m_current_phase; } - /** - * Summarize statistics (e.g. timers, counters); these should be computable - * quickly. - */ - virtual void summarize_stats(lbann_summary& summarizer); - /** - * Summarize matrices (e.g. means); these are called less frequently and can - * be more expensive. - */ - virtual void summarize_matrices(lbann_summary& summarizer); - /** Return true if the flag to stop training is set. */ bool get_terminate_training() const { return m_terminate_training; @@ -224,11 +184,59 @@ class model { m_terminate_training = f; } - /** Train model. */ - virtual void train(int num_epochs, int num_batches=0); + // =========================================== + // Model specification + // =========================================== + + /** Add layer to model. */ + virtual void add_layer(std::unique_ptr l); + + /** Add weights to model. */ + void add_weights(weights *w); + + /** Register a new callback for the model. */ + void add_callback(lbann_callback *cb); + + /** Register a new metric for the model. */ + void add_metric(metric *m); + + /** Replace the model's weights. */ + void replace_weights(std::vector& w); + + /** Copy trained weights from input parameter w. + * Only weight values are placed, pointers and layer structure are in place. + * Weights to be copied are of the same name */ + void copy_trained_weights_from(std::vector& w); + + /** Construct an instance of the default optimizer. + * If there is no default optimizer, a null pointer is returned. + */ + optimizer* create_optimizer() const; + + /** Set a flag that can be used to enable / disable the background I/O activities */ + void allow_background_io_activity(bool enable) { m_background_io_allowed = enable; } + + /** Are background I/O activities enabled by the input layers */ + bool background_io_activity_allowed() { return m_background_io_allowed; } + + // =========================================== + // Setup + // =========================================== + + /** @detailed Must be called after model specification and before + * execution. */ + virtual void setup(std::shared_ptr io_thread_pool); + + // =========================================== + // Execution + // =========================================== + /** Evaluate model. */ virtual void evaluate(execution_mode mode, int num_batches=0); + /** Train model. */ + virtual void train(int num_epochs, int num_batches=0); + /** Run one epoch using only the input layer; this supports * data_store functionality */ @@ -238,11 +246,24 @@ class model { mode requested */ virtual void collect_background_data_fetch(execution_mode mode); - /** Set a flag that can be used to enable / disable the background I/O activities */ - void allow_background_io_activity(bool enable) { m_background_io_allowed = enable; } + // =========================================== + // Summarizer + // =========================================== - /** Are background I/O activities enabled by the input layers */ - bool background_io_activity_allowed() { return m_background_io_allowed; } + /** + * Summarize statistics (e.g. timers, counters); these should be computable + * quickly. + */ + virtual void summarize_stats(lbann_summary& summarizer); + /** + * Summarize matrices (e.g. means); these are called less frequently and can + * be more expensive. + */ + virtual void summarize_matrices(lbann_summary& summarizer); + + // =========================================== + // Checkpointing + // =========================================== /** Checkpoint model to given file descriptor, return number of bytes written */ virtual bool save_to_checkpoint_shared(persist& p); @@ -267,63 +288,6 @@ class model { protected: - /** The objective function used to train the model. */ - objective_function *m_objective_function; - /** Give model a name. */ - std::string m_name; - /** The model's current execution mode. */ - execution_mode m_execution_mode; - /** Flag telling the model to terminate training. */ - bool m_terminate_training; - /** Most recent/current epoch for the model. */ - int m_current_epoch; - /** Most recent/current training step for the model. */ - int m_current_step; - int m_current_validation_step; - int m_current_testing_step; - /** @details Maximum possible minibatch size supported by layers in - * this model. Note that this is local to the particular model, - * not across multiple models. - */ - int m_max_mini_batch_size; - /** Size of the current mini-batch in the model. */ - int m_current_mini_batch_size; - /** The "effective" size of a minibatch. - * - * This is the size of the minibatch across all models and used for - * e.g. correctly averaging gradients from multiple models. - */ - int m_effective_mini_batch_size; - /** current phase (multiple of epoch counts) in training a model */ - int m_current_phase; - /** Communicator for the model. */ - lbann_comm *m_comm; - /** Current callbacks to process. */ - std::vector m_callbacks; - - /** Default optimizer. - * - * If a layer needs to construct an optimizer during setup, it will - * make a copy of the default optimizer. - */ - optimizer *m_default_optimizer; - - /** List of model metrics. - * - * A metric can be used to evaluate the performance of the model - * without affecting the training process. - */ - std::vector m_metrics; - - /** List of weights in model. */ - std::vector m_weights; - - /** Threads available for I/O */ - std::shared_ptr m_io_thread_pool; - - /** Flag that allows input layers to fetch data in the background */ - bool m_background_io_allowed; - /** Check if the model execution mode is valid. */ virtual bool is_execution_mode_valid(execution_mode mode) const; @@ -462,6 +426,78 @@ class model { private: + /** Mathematical function to be minimized during training. */ + objective_function* m_objective_function; + + /** @brief Model instance's name. + * @detailed Each model in a trainer should have a unique, + * preferably human-readable, name. + */ + std::string m_name; + + /** Current execution mode. */ + execution_mode m_execution_mode = execution_mode::training; + + /** @brief Whether to terminate training. + * @detailed If set to true, training will terminate immediately + * before the next epoch. + */ + bool m_terminate_training = false; + + /** Current epoch. */ + El::Int m_epoch = 0; + /** @brief Current mini-batch step for each execution mode. + * @detailed Step counts are not reset after each epoch. + */ + std::map m_step; + + /** Most recent/current training step for the model. */ + int m_current_step; + int m_current_validation_step; + int m_current_testing_step; + /** @details Maximum possible minibatch size supported by layers in + * this model. Note that this is local to the particular model, + * not across multiple models. + */ + int m_max_mini_batch_size; + /** Size of the current mini-batch in the model. */ + int m_current_mini_batch_size; + /** The "effective" size of a minibatch. + * + * This is the size of the minibatch across all models and used for + * e.g. correctly averaging gradients from multiple models. + */ + int m_effective_mini_batch_size; + /** current phase (multiple of epoch counts) in training a model */ + int m_current_phase; + /** Communicator for the model. */ + lbann_comm *m_comm; + /** Current callbacks to process. */ + std::vector m_callbacks; + + /** Default optimizer. + * + * If a layer needs to construct an optimizer during setup, it will + * make a copy of the default optimizer. + */ + optimizer *m_default_optimizer; + + /** List of model metrics. + * + * A metric can be used to evaluate the performance of the model + * without affecting the training process. + */ + std::vector m_metrics; + + /** List of weights in model. */ + std::vector m_weights; + + /** Threads available for I/O */ + std::shared_ptr m_io_thread_pool; + + /** Flag that allows input layers to fetch data in the background */ + bool m_background_io_allowed; + /** @brief List of layers in model. * @details The list is in execution order for forward propagation. */ diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index 5a4d0741bc4..d46a510f459 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -85,6 +85,10 @@ int main(int argc, char *argv[]) { model *model = build_model_from_prototext(argc, argv, pb, comm, io_thread_pool, true); + /// @todo Remove + auto* model_copy = model->copy(); + delete model_copy; + if (! (opts->has_bool("exit_after_setup") && opts->get_bool("exit_after_setup"))) { // Train model diff --git a/src/callbacks/callback_check_dataset.cpp b/src/callbacks/callback_check_dataset.cpp index 602de3460e9..33fb1403eba 100644 --- a/src/callbacks/callback_check_dataset.cpp +++ b/src/callbacks/callback_check_dataset.cpp @@ -58,16 +58,16 @@ void lbann_callback_check_dataset::add_to_set(model *m, Layer *l, int64_t step, } void lbann_callback_check_dataset::on_forward_prop_end(model *m, Layer *l) { - add_to_set(m, l, m->get_cur_step(), training_set); + add_to_set(m, l, m->get_step(), training_set); } void lbann_callback_check_dataset::on_evaluate_forward_prop_end(model *m, Layer *l) { switch(m->get_execution_mode()) { case execution_mode::validation: - add_to_set(m, l, m->get_cur_validation_step(), validation_set); + add_to_set(m, l, m->get_step(), validation_set); break; case execution_mode::testing: - add_to_set(m, l, m->get_cur_testing_step(), testing_set); + add_to_set(m, l, m->get_step(), testing_set); break; default: throw lbann_exception("lbann_callback_check_dataset: invalid execution phase"); diff --git a/src/callbacks/callback_check_init.cpp b/src/callbacks/callback_check_init.cpp index 529cbc42740..ba5a0d04f57 100644 --- a/src/callbacks/callback_check_init.cpp +++ b/src/callbacks/callback_check_init.cpp @@ -33,7 +33,7 @@ namespace lbann { void lbann_callback_check_init::on_train_begin(model *m) { // Skip after the first epoch. - if (m->get_cur_epoch() != 0) { + if (m->get_epoch() != 0) { return; } lbann_comm *comm = m->get_comm(); diff --git a/src/callbacks/callback_checknan.cpp b/src/callbacks/callback_checknan.cpp index 5e627b64a61..1d2f8d9525f 100644 --- a/src/callbacks/callback_checknan.cpp +++ b/src/callbacks/callback_checknan.cpp @@ -80,8 +80,8 @@ void dump_network(model *m) { std::stringstream ss; ss << "model" << m->get_comm()->get_trainer_rank() << "-rank" << m->get_comm()->get_rank_in_trainer() - << "-epoch" << m->get_cur_epoch() - << "-step" << m->get_cur_step() + << "-epoch" << m->get_epoch() + << "-step" << m->get_step(execution_mode::training) << "-" << l->get_name() << "-"; const std::string prefix = ss.str(); for (int i = 0; i < l->get_num_children(); ++i) { @@ -99,8 +99,8 @@ void dump_network(model *m) { std::stringstream ss; ss << "model" << m->get_comm()->get_trainer_rank() << "-rank" << m->get_comm()->get_rank_in_trainer() - << "-epoch" << m->get_cur_epoch() - << "-step" << m->get_cur_step() + << "-epoch" << m->get_epoch() + << "-step" << m->get_step(execution_mode::training) << "-" << w->get_name() << "-"; const std::string prefix = ss.str(); El::Write(w->get_values().LockedMatrix(), diff --git a/src/callbacks/callback_checkpoint.cpp b/src/callbacks/callback_checkpoint.cpp index 35d06149caf..d33b4fbed37 100644 --- a/src/callbacks/callback_checkpoint.cpp +++ b/src/callbacks/callback_checkpoint.cpp @@ -76,7 +76,7 @@ bool lbann_callback_checkpoint::need_checkpoint(model *m) { m_checkpoint_shared = false; m_checkpoint_dist = false; lbann_comm *comm = m->get_comm(); - int cur_epoch = m->get_cur_epoch(); + int cur_epoch = m->get_epoch(); // If we are at the end of a training epoch and the training epoch lands on defined interval, ckpt if (!m_checkpoint_shared && m_checkpoint_epochs > 0 && (p.get_cb_type() == callback_type::epoch || p.get_cb_type() == callback_type::validation)){ m_checkpoint_shared = (cur_epoch > 0) && (cur_epoch % m_checkpoint_epochs == 0); @@ -88,11 +88,11 @@ bool lbann_callback_checkpoint::need_checkpoint(model *m) { // If we are at the end of a training mb step and the training mb step lands on defined interval, trigger checkpoint if (!m_checkpoint_shared && m_checkpoint_steps > 0) { - m_checkpoint_shared = (m->get_cur_step() > 0) && (m->get_cur_step() % m_checkpoint_steps == 0); + m_checkpoint_shared = (m->get_step(execution_mode::training) > 0) && (m->get_step(execution_mode::training) % m_checkpoint_steps == 0); } if(!m_checkpoint_dist && m_ckpt_dist_steps > 0){ - m_checkpoint_dist = (m->get_cur_step() > 0) && (m->get_cur_step() % m_ckpt_dist_steps == 0); + m_checkpoint_dist = (m->get_step(execution_mode::training) > 0) && (m->get_step(execution_mode::training) % m_ckpt_dist_steps == 0); } // check the clock if time-based checkpoint is enabled @@ -135,8 +135,8 @@ bool lbann_callback_checkpoint::checkpoint(model *m) { comm->trainer_barrier(); // let user know we're saving a checkpoint if (comm->am_trainer_master()) { - epoch = m->get_cur_epoch(); - step = m->get_cur_step(); + epoch = m->get_epoch(); + step = m->get_step(execution_mode::training); timer.Start(); printf("Checkpoint: epoch %d step %d ...\n", epoch, step); fflush(stdout); diff --git a/src/callbacks/callback_checksmall.cpp b/src/callbacks/callback_checksmall.cpp index 4982b77733b..578e9fde39b 100644 --- a/src/callbacks/callback_checksmall.cpp +++ b/src/callbacks/callback_checksmall.cpp @@ -36,7 +36,7 @@ void lbann_callback_checksmall::on_forward_prop_end(model *m, Layer *l) { ss << name() << ": " << "[" << std::to_string(m->get_comm()->get_rank_in_world()) << "]: " << "error in activations of " << l->get_name() << " " - << "(step=" << std::to_string(m->get_cur_step()) << ")"; + << "(step=" << std::to_string(m->get_step(execution_mode::training)) << ")"; throw lbann_exception(ss.str()); } } @@ -49,7 +49,7 @@ void lbann_callback_checksmall::on_backward_prop_end(model *m) { ss << name() << ": " << "[" << std::to_string(m->get_comm()->get_rank_in_world()) << "]: " << "error in weights gradient of " << w->get_name() << " " - << "(step=" << std::to_string(m->get_cur_step()) << ")"; + << "(step=" << std::to_string(m->get_step(execution_mode::training)) << ")"; throw lbann_exception(ss.str()); } } @@ -62,7 +62,7 @@ void lbann_callback_checksmall::on_batch_end(model *m) { ss << name() << ": " << "[" << std::to_string(m->get_comm()->get_rank_in_world()) << "]: " << "error in weights of " << w->get_name() << " " - << "(step=" << std::to_string(m->get_cur_step()-1) << ")"; + << "(step=" << std::to_string(m->get_step(execution_mode::training)-1) << ")"; throw lbann_exception(ss.str()); } } diff --git a/src/callbacks/callback_confusion_matrix.cpp b/src/callbacks/callback_confusion_matrix.cpp index bc787bd9080..d0a13e09f92 100644 --- a/src/callbacks/callback_confusion_matrix.cpp +++ b/src/callbacks/callback_confusion_matrix.cpp @@ -207,10 +207,10 @@ void lbann_callback_confusion_matrix::save_confusion_matrix(const model& m) { std::string mode_string; switch (mode) { case execution_mode::training: - mode_string = "train-epoch" + std::to_string(m.get_cur_epoch()); + mode_string = "train-epoch" + std::to_string(m.get_epoch()); break; case execution_mode::validation: - mode_string = "validation-epoch" + std::to_string(m.get_cur_epoch()); + mode_string = "validation-epoch" + std::to_string(m.get_epoch()); break; case execution_mode::testing: mode_string = "test"; diff --git a/src/callbacks/callback_debug.cpp b/src/callbacks/callback_debug.cpp index 71d36a98083..0bc05c0297c 100644 --- a/src/callbacks/callback_debug.cpp +++ b/src/callbacks/callback_debug.cpp @@ -62,17 +62,8 @@ std::string weights_string(const weights& w) { std::string batch_step_string(const model& m) { std::stringstream msg; const auto& mode = m.get_execution_mode(); - msg << _to_string(mode) << " batch"; - switch (mode) { - case execution_mode::training: - msg << " " << m.get_cur_step(); break; - case execution_mode::validation: - msg << " " << m.get_cur_validation_step(); break; - case execution_mode::testing: - msg << " " << m.get_cur_testing_step(); break; - default: break; - } - msg << " (epoch " << m.get_cur_epoch() << ")"; + msg << _to_string(mode) << " batch " << m.get_step(); + msg << " (epoch " << m.get_epoch() << ")"; return msg.str(); } diff --git a/src/callbacks/callback_debug_io.cpp b/src/callbacks/callback_debug_io.cpp index bae9e3a7a5a..36f70ab8e9e 100644 --- a/src/callbacks/callback_debug_io.cpp +++ b/src/callbacks/callback_debug_io.cpp @@ -52,23 +52,10 @@ void lbann::lbann_callback_debug_io::on_forward_prop_begin(model *m, Layer *l) { } void lbann::lbann_callback_debug_io::print_fp_start(model *m, generic_input_layer *input) { - int64_t step; - switch(m->get_execution_mode()) { - case execution_mode::training: - step = m->get_cur_step(); - break; - case execution_mode::validation: - step = m->get_cur_validation_step(); - break; - case execution_mode::testing: - step = m->get_cur_testing_step(); - break; - default: - throw lbann_exception("Illegal execution mode in evaluate forward prop function"); - } + const auto& step = m->get_step(); std::cout << "[" << m->get_comm()->get_trainer_rank() << "." << m->get_comm()->get_rank_in_trainer() - << "] @" << m->get_cur_epoch() << "." << step + << "] @" << m->get_epoch() << "." << step << " Phase: " << _to_string(m->get_execution_mode()) << " starting forward propagation for layer " << input->get_name() << " type: " << input->get_type() @@ -97,17 +84,7 @@ void lbann::lbann_callback_debug_io::print_phase_start(model *m, execution_mode } if (data_reader == nullptr) { return; } - int64_t step; - switch(mode) { - case execution_mode::training: - step = m->get_cur_step(); break; - case execution_mode::validation: - step = m->get_cur_validation_step(); break; - case execution_mode::testing: - step = m->get_cur_testing_step(); break; - default: - throw lbann_exception("Illegal execution mode in evaluate forward prop function"); - } + const auto& step = m->get_step(); if(data_reader->get_rank() < data_reader->get_num_parallel_readers()) { std::cout << "[" << m->get_comm()->get_trainer_rank() diff --git a/src/callbacks/callback_dump_error_signals.cpp b/src/callbacks/callback_dump_error_signals.cpp index c3a793aaa8b..97a510b2ea9 100644 --- a/src/callbacks/callback_dump_error_signals.cpp +++ b/src/callbacks/callback_dump_error_signals.cpp @@ -37,8 +37,8 @@ void lbann_callback_dump_error_signals::on_backward_prop_end(model *m, Layer *l) std::stringstream file; file << m_basename << "model" << m->get_comm()->get_trainer_rank() << "-" - << "epoch" << m->get_cur_epoch() << "-" - << "step" << m->get_cur_step() << "-" + << "epoch" << m->get_epoch() << "-" + << "step" << m->get_step() << "-" << l->get_name() << "-" << "ErrorSignals"; if (l->get_num_parents() > 1) { file << i; } diff --git a/src/callbacks/callback_dump_gradients.cpp b/src/callbacks/callback_dump_gradients.cpp index 7248071e4ac..a45ff04ef69 100644 --- a/src/callbacks/callback_dump_gradients.cpp +++ b/src/callbacks/callback_dump_gradients.cpp @@ -38,8 +38,8 @@ void lbann_callback_dump_gradients::on_backward_prop_end(model *m) { const std::string file = (m_basename + "model" + std::to_string(m->get_comm()->get_trainer_rank()) - + "-epoch" + std::to_string(m->get_cur_epoch()) - + "-step" + std::to_string(m->get_cur_step()) + + "-epoch" + std::to_string(m->get_epoch()) + + "-step" + std::to_string(m->get_step()) + "-" + w->get_name() + "-Gradient"); El::Write(opt->get_gradient(), file, El::ASCII); diff --git a/src/callbacks/callback_dump_minibatch_sample_indices.cpp b/src/callbacks/callback_dump_minibatch_sample_indices.cpp index bd189c8a436..b6f7557aaf5 100644 --- a/src/callbacks/callback_dump_minibatch_sample_indices.cpp +++ b/src/callbacks/callback_dump_minibatch_sample_indices.cpp @@ -58,8 +58,8 @@ void lbann_callback_dump_minibatch_sample_indices::dump_to_file(model *m, Layer + _to_string(m->get_execution_mode()) + "-model" + std::to_string(m->get_comm()->get_trainer_rank()) + "-rank" + std::to_string(m->get_comm()->get_rank_in_trainer()) - + "-epoch" + std::to_string(m->get_cur_epoch()) - + "-step" + std::to_string(m->get_cur_step()) + + "-epoch" + std::to_string(m->get_epoch()) + + "-step" + std::to_string(m->get_step(execution_mode::training)) + "-" + l->get_name() + "-MB_Sample_Indices"); El::Write(*indices, file, El::ASCII); @@ -67,20 +67,11 @@ void lbann_callback_dump_minibatch_sample_indices::dump_to_file(model *m, Layer } void lbann_callback_dump_minibatch_sample_indices::on_forward_prop_end(model *m, Layer *l) { - dump_to_file(m, l, m->get_cur_step()); + dump_to_file(m, l, m->get_step()); } void lbann_callback_dump_minibatch_sample_indices::on_evaluate_forward_prop_end(model *m, Layer *l) { - switch(m->get_execution_mode()) { - case execution_mode::validation: - dump_to_file(m, l, m->get_cur_validation_step()); - break; - case execution_mode::testing: - dump_to_file(m, l, m->get_cur_testing_step()); - break; - default: - throw lbann_exception("lbann_callback_dump_minibatch_sample_indices: invalid execution phase"); - } + dump_to_file(m, l, m->get_step()); } } // namespace lbann diff --git a/src/callbacks/callback_dump_outputs.cpp b/src/callbacks/callback_dump_outputs.cpp index a70689110cb..0a5bac4658a 100644 --- a/src/callbacks/callback_dump_outputs.cpp +++ b/src/callbacks/callback_dump_outputs.cpp @@ -134,17 +134,8 @@ void lbann_callback_dump_outputs::dump_outputs(const model& m, const Layer& l) { // Get mini-batch step information const auto& mode = m.get_execution_mode(); - const auto& epoch = m.get_cur_epoch(); - El::Int step = 0; - switch (mode) { - case execution_mode::training: - step = m.get_cur_step(); break; - case execution_mode::validation: - step = m.get_cur_validation_step(); break; - case execution_mode::testing: - step = m.get_cur_testing_step(); break; - default: LBANN_ERROR("invalid execution mode"); - } + const auto& epoch = m.get_epoch(); + const auto& step = m.get_step(); // Quit if output dump isn't needed if (!m_modes.empty() && m_modes.count(mode) == 0) { return; } diff --git a/src/callbacks/callback_dump_weights.cpp b/src/callbacks/callback_dump_weights.cpp index fff95ae7911..03b0ebe678c 100644 --- a/src/callbacks/callback_dump_weights.cpp +++ b/src/callbacks/callback_dump_weights.cpp @@ -41,7 +41,7 @@ void lbann_callback_dump_weights::on_epoch_end(model *m) { void lbann_callback_dump_weights::dump_weights(model *m, std::string s) { for (weights *w : m->get_weights()) { - std::string epoch = "-epoch" + std::to_string(m->get_cur_epoch()-1); + std::string epoch = "-epoch" + std::to_string(m->get_epoch()-1); if(s != "") { epoch = "-" + s; } diff --git a/src/callbacks/callback_imcomm.cpp b/src/callbacks/callback_imcomm.cpp index e5eeb62b9b2..2f2a5383fa5 100644 --- a/src/callbacks/callback_imcomm.cpp +++ b/src/callbacks/callback_imcomm.cpp @@ -132,7 +132,7 @@ void lbann_callback_imcomm::do_summary(model *m, weights *w, } std::string prefix = w->get_name() + "/imcomm_"; m_summarizer->reduce_scalar(prefix + "time", - im_time, m->get_cur_step()); + im_time, m->get_step(execution_mode::training)); // Use the same approximation the comm layer does. const CPUMat& local_gradients = static_cast(w->get_optimizer()->get_gradient().LockedMatrix()); @@ -141,9 +141,9 @@ void lbann_callback_imcomm::do_summary(model *m, weights *w, size_t bytes_received = sizeof(DataType) * local_gradients.Height() * local_gradients.Width(); m_summarizer->reduce_scalar(prefix + "bytes_sent", - bytes_sent, m->get_cur_step()); + bytes_sent, m->get_step(execution_mode::training)); m_summarizer->reduce_scalar(prefix + "bytes_received", - bytes_received, m->get_cur_step()); + bytes_received, m->get_step(execution_mode::training)); } static std::vector comm_type_names = diff --git a/src/callbacks/callback_io.cpp b/src/callbacks/callback_io.cpp index ee33cea9814..5ea1cb3b178 100644 --- a/src/callbacks/callback_io.cpp +++ b/src/callbacks/callback_io.cpp @@ -48,7 +48,7 @@ void lbann_callback_io::on_epoch_end(model *m) { std::cout << "Rank " << comm->get_trainer_rank() << "." << comm->get_rank_in_trainer() << " processed " << input->get_num_samples_trained() << " training samples of " << input->get_total_num_training_samples() << " (" - << input->get_num_samples_trained() / m->get_cur_epoch() << " per epoch)" << std::endl; + << input->get_num_samples_trained() / m->get_epoch() << " per epoch)" << std::endl; } } } @@ -64,7 +64,7 @@ void lbann_callback_io::on_test_end(model *m) { std::cout << "Rank " << comm->get_trainer_rank() << "." << comm->get_rank_in_trainer() << " processed " << input->get_num_samples_tested() << " test samples of " << input->get_total_num_testing_samples() << " (" - << input->get_num_samples_tested() / m->get_cur_epoch() << " per epoch)" << std::endl; + << input->get_num_samples_tested() / m->get_epoch() << " per epoch)" << std::endl; } } } diff --git a/src/callbacks/callback_learning_rate.cpp b/src/callbacks/callback_learning_rate.cpp index 196dbd30f50..b6f3cd1c38f 100644 --- a/src/callbacks/callback_learning_rate.cpp +++ b/src/callbacks/callback_learning_rate.cpp @@ -71,7 +71,7 @@ void lbann_callback_learning_rate::on_epoch_end(model *m) { if (comm->am_trainer_master() && new_lr != old_global_lr) { std::cout << "Model " << comm->get_trainer_rank() << ": " << "changing global learning rate to " << new_lr - << " at epoch " << m->get_cur_epoch() << std::endl; + << " at epoch " << m->get_epoch() << std::endl; } for (weights *w : m_weights) { optimizer *opt = w->get_optimizer(); @@ -102,7 +102,7 @@ lbann_callback_step_learning_rate::lbann_callback_step_learning_rate( lbann_callback_learning_rate(weights_list), m_step(step), m_amt(amt) {} float lbann_callback_step_learning_rate::global_schedule(model *m) { - if (m->get_cur_epoch() % m_step == 0) { + if (m->get_epoch() % m_step == 0) { return m_cur_global_lr * m_amt; } else { return m_cur_global_lr; @@ -120,8 +120,8 @@ lbann_callback_adaptive_learning_rate::lbann_callback_adaptive_learning_rate( float lbann_callback_adaptive_learning_rate::global_schedule(model *m) { // Determine behavior the first time this is called in an epoch - if (m_cur_epoch != m->get_cur_epoch()) { - m_cur_epoch = m->get_cur_epoch(); + if (m_cur_epoch != m->get_epoch()) { + m_cur_epoch = m->get_epoch(); const execution_mode mode = m->get_execution_mode(); const EvalType score = m->get_objective_function()->get_mean_value(mode); if (score < m_last_score) { @@ -164,12 +164,12 @@ lbann_callback_drop_fixed_learning_rate::lbann_callback_drop_fixed_learning_rate float lbann_callback_drop_fixed_learning_rate::global_schedule(model* m) { // Delete last drop epoch if we have already passed it while (!m_drop_epochs.empty() - && m->get_cur_epoch() > m_drop_epochs.back()) { + && m->get_epoch() > m_drop_epochs.back()) { m_drop_epochs.pop_back(); } // Adjust learning rate if at a drop epoch - if (!m_drop_epochs.empty() && m->get_cur_epoch() == m_drop_epochs.back()) { + if (!m_drop_epochs.empty() && m->get_epoch() == m_drop_epochs.back()) { return m_cur_global_lr * m_amt; } else { return m_cur_global_lr; @@ -203,10 +203,10 @@ void lbann_callback_linear_growth_learning_rate::setup(model *m) { } float lbann_callback_linear_growth_learning_rate::global_schedule(model *m) { - if (m->get_cur_epoch() < m_delay) { + if (m->get_epoch() < m_delay) { return m_cur_global_lr; - } else if (m->get_cur_epoch() <= m_num_epochs + m_delay) { - int num_left = m_num_epochs + m_delay - m->get_cur_epoch(); + } else if (m->get_epoch() <= m_num_epochs + m_delay) { + int num_left = m_num_epochs + m_delay - m->get_epoch(); return m_base_lr + m_inc*(m_num_epochs - num_left); } else { return m_cur_global_lr; @@ -255,7 +255,7 @@ float lbann_callback_poly_learning_rate::global_schedule(model *m) { * Compute the learning rate for the next iteration. */ float lbann_callback_poly_learning_rate::optimizer_schedule(model *m, optimizer &opt) { - const uint64_t cur_iter = static_cast(m->get_cur_step()); + const uint64_t cur_iter = static_cast(m->get_step(execution_mode::training)); if (m_max_iter > cur_iter) { m_lr = static_cast(std::pow(static_cast(m_max_iter - cur_iter)/m_max_iter, m_p)); } diff --git a/src/callbacks/callback_ltfb.cpp b/src/callbacks/callback_ltfb.cpp index c136628affd..433c813249c 100644 --- a/src/callbacks/callback_ltfb.cpp +++ b/src/callbacks/callback_ltfb.cpp @@ -160,7 +160,7 @@ void exchange_models__checkpoint_file(lbann_comm& comm, // Checkpoint directories const auto local_trainer = comm.get_trainer_rank(); - const auto step = m.get_cur_step(); + const auto step = m.get_step(); const std::string send_dir = (m.get_name() + "_trainer" + std::to_string(local_trainer) + "_step" + std::to_string(step)); @@ -219,7 +219,7 @@ void restore_local_model__checkpoint_file(lbann_comm& comm, model& m) { // Checkpoint directories const auto local_trainer = comm.get_trainer_rank(); - const auto step = m.get_cur_step(); + const auto step = m.get_step(); const std::string checkpoint_dir = (m.get_name() + "_trainer" + std::to_string(local_trainer) + "_step" + std::to_string(step)); @@ -345,7 +345,7 @@ void lbann_callback_ltfb::on_batch_begin(model *m) { // Check whether to start LTFB round const auto mode = m->get_execution_mode(); - const auto step = m->get_cur_step(); + const auto step = m->get_step(); if (mode != execution_mode::training || step == 0) { return; } // Print message diff --git a/src/callbacks/callback_perturb_adam.cpp b/src/callbacks/callback_perturb_adam.cpp index 523a5fdb370..5f877b35e86 100644 --- a/src/callbacks/callback_perturb_adam.cpp +++ b/src/callbacks/callback_perturb_adam.cpp @@ -49,7 +49,7 @@ void lbann_callback_perturb_adam::setup(model* m) { } void lbann_callback_perturb_adam::on_batch_begin(model* m) { - if (m_perturb_during_training && m->get_cur_step() > 0) { + if (m_perturb_during_training && m->get_step() > 0) { perturb(*m); } } diff --git a/src/callbacks/callback_print.cpp b/src/callbacks/callback_print.cpp index 92caf987c5f..0295eb12b81 100644 --- a/src/callbacks/callback_print.cpp +++ b/src/callbacks/callback_print.cpp @@ -58,7 +58,7 @@ void lbann_callback_print::on_epoch_begin(model *m) { // Print message std::cout << "--------------------------------------------------------------------------------" << std::endl; - std::cout << "[" << m->get_cur_epoch() << "] Epoch : stats formated [tr/v/te]" + std::cout << "[" << m->get_epoch() << "] Epoch : stats formated [tr/v/te]" << " iter/epoch =" << " [" << input->get_num_iterations_per_epoch(execution_mode::training) @@ -135,7 +135,7 @@ void lbann_callback_print::report_results(model *m) { std::string mode_string; switch (mode) { case execution_mode::training: - mode_string = "training epoch " + std::to_string(m->get_cur_epoch()-1); + mode_string = "training epoch " + std::to_string(m->get_epoch()-1); break; case execution_mode::validation: mode_string = "validation"; diff --git a/src/callbacks/callback_replace_weights.cpp b/src/callbacks/callback_replace_weights.cpp index ad86e15f0df..ef92a1333bf 100644 --- a/src/callbacks/callback_replace_weights.cpp +++ b/src/callbacks/callback_replace_weights.cpp @@ -29,11 +29,11 @@ namespace lbann { void lbann_callback_replace_weights::on_batch_end(model *m) { - const auto& step = m->get_cur_step(); + const auto& step = m->get_step(execution_mode::training); if(step % m_batch_interval == 0) { for(size_t i = 0; i < m_src_layers.size(); i++) { m_dst_layers[i]->replace_weights(m_src_layers[i]); - } + } } } diff --git a/src/callbacks/callback_save_images.cpp b/src/callbacks/callback_save_images.cpp index 6081182c85d..3eede6fe3d0 100644 --- a/src/callbacks/callback_save_images.cpp +++ b/src/callbacks/callback_save_images.cpp @@ -117,13 +117,13 @@ void save_image(std::string prefix, // Write image to file cv::imwrite(prefix + "-" + name + "." + format, img); - + } - + } #endif // LBANN_HAS_OPENCV } - + } // namespace lbann_callback_save_images::lbann_callback_save_images(std::vector layer_names, @@ -139,7 +139,7 @@ lbann_callback_save_images::lbann_callback_save_images(std::vector } void lbann_callback_save_images::on_epoch_end(model *m) { - save_image(m_image_prefix + "epoch" + std::to_string(m->get_cur_epoch()), + save_image(m_image_prefix + "epoch" + std::to_string(m->get_epoch()), m_image_format, m->get_layers(), m_layer_names); diff --git a/src/callbacks/callback_save_model.cpp b/src/callbacks/callback_save_model.cpp index 1b49cc15b2a..cf6e90e8717 100644 --- a/src/callbacks/callback_save_model.cpp +++ b/src/callbacks/callback_save_model.cpp @@ -93,8 +93,8 @@ bool lbann_callback_save_model::save_model_weights(model *m) { lbann_comm *comm = m->get_comm(); comm->trainer_barrier(); // let user know we're saving the weights - int epoch = m->get_cur_epoch(); - int step = m->get_cur_step(); + int epoch = m->get_epoch(); + int step = m->get_step(execution_mode::training); if (comm->am_trainer_master()) { timer.Start(); printf("[%s.%d] Saving model weights: epoch %d step %d ...\n", m->get_name().c_str(), comm->get_trainer_rank(), epoch, step); diff --git a/src/callbacks/callback_summary.cpp b/src/callbacks/callback_summary.cpp index f0064d5ce45..02f14de3027 100644 --- a/src/callbacks/callback_summary.cpp +++ b/src/callbacks/callback_summary.cpp @@ -48,7 +48,7 @@ void lbann_callback_summary::on_train_begin(model *m) { void lbann_callback_summary::on_batch_end(model *m) { prof_region_begin("summary-batch", prof_colors[0], false); m->summarize_stats(*m_summarizer); - if (m_mat_interval > 0 && m->get_cur_step() % m_mat_interval == 0) { + if (m_mat_interval > 0 && m->get_step(execution_mode::training) % m_mat_interval == 0) { m->summarize_matrices(*m_summarizer); } lbann_comm *comm = m->get_comm(); @@ -58,15 +58,15 @@ void lbann_callback_summary::on_batch_end(model *m) { size_t intertrainer_barriers = comm->get_num_intertrainer_barriers(); size_t global_barriers = comm->get_num_global_barriers(); comm->reset_stats_counters(); - m_summarizer->sum_reduce_scalar("bytes_sent", bytes_sent, m->get_cur_step()); + m_summarizer->sum_reduce_scalar("bytes_sent", bytes_sent, m->get_step(execution_mode::training)); m_summarizer->sum_reduce_scalar("bytes_received", bytes_received, - m->get_cur_step()); + m->get_step(execution_mode::training)); m_summarizer->reduce_scalar("trainer_barriers", trainer_barriers, - m->get_cur_step()); + m->get_step(execution_mode::training)); m_summarizer->reduce_scalar("intertrainer_barriers", intertrainer_barriers, - m->get_cur_step()); + m->get_step(execution_mode::training)); m_summarizer->reduce_scalar("global_barriers", global_barriers, - m->get_cur_step()); + m->get_step(execution_mode::training)); prof_region_end("summary-batch", false); } @@ -79,7 +79,7 @@ void lbann_callback_summary::on_epoch_end(model *m) { std::transform(metric_name.begin(), metric_name.end(), metric_name.begin(), [] (char c) { return c == ' ' ? '_' : c; }); std::string phase = "train_" + metric_name; - m_summarizer->reduce_scalar(phase, train_score, m->get_cur_step()); + m_summarizer->reduce_scalar(phase, train_score, m->get_step(execution_mode::training)); } save_histograms(m); m_summarizer->flush(); @@ -96,7 +96,7 @@ void lbann_callback_summary::on_test_end(model *m) { std::transform(metric_name.begin(), metric_name.end(), metric_name.begin(), [] (char c) { return c == ' ' ? '_' : c; }); std::string phase = "test_" + metric_name; - m_summarizer->reduce_scalar(phase, test_score, m->get_cur_step()); + m_summarizer->reduce_scalar(phase, test_score, m->get_step(execution_mode::training)); } // Reset counters incremented during test phase. comm->reset_stats_counters(); @@ -113,7 +113,7 @@ void lbann_callback_summary::save_histograms(model *m) { AbsDistMatReadProxy acts(layer->get_activations(i)); m_summarizer->reduce_histogram(prefix + "activations" + std::to_string(i), acts.GetLocked(), - m->get_cur_step()); + m->get_step(execution_mode::training)); } } for (const auto& w : m->get_weights()) { @@ -121,13 +121,13 @@ void lbann_callback_summary::save_histograms(model *m) { AbsDistMatReadProxy weights(w->get_values()); m_summarizer->reduce_histogram(prefix + "weights", weights.GetLocked(), - m->get_cur_step()); + m->get_step(execution_mode::training)); optimizer *opt = w->get_optimizer(); if (opt != nullptr) { AbsDistMatReadProxy gradients(opt->get_gradient()); m_summarizer->reduce_histogram(prefix + "weights_gradient", gradients.GetLocked(), - m->get_cur_step()); + m->get_step(execution_mode::training)); } } } diff --git a/src/callbacks/callback_timer.cpp b/src/callbacks/callback_timer.cpp index 8fe764cce95..358a3657425 100644 --- a/src/callbacks/callback_timer.cpp +++ b/src/callbacks/callback_timer.cpp @@ -40,8 +40,8 @@ void lbann_callback_timer::batch_timing_end(const model& m) { const auto& batch_time = get_time() - m_batch_start_times[mode]; m_batch_times[mode].push_back(batch_time); if (m_summarizer != nullptr) { - m_summarizer->reduce_scalar("minibatch_time", batch_time, m.get_cur_step()-1); - m_summarizer->reduce_scalar_all("minibatch_time", batch_time, m.get_cur_step()-1); + m_summarizer->reduce_scalar("minibatch_time", batch_time, m.get_step(execution_mode::training)-1); + m_summarizer->reduce_scalar_all("minibatch_time", batch_time, m.get_step(execution_mode::training)-1); } } @@ -88,7 +88,7 @@ void lbann_callback_timer::timing_end(model& m) { std::string mode_string; switch(mode) { case execution_mode::training: - mode_string = "training epoch " + std::to_string(m.get_cur_epoch()-1); + mode_string = "training epoch " + std::to_string(m.get_epoch()-1); break; case execution_mode::validation: mode_string = "validation"; diff --git a/src/callbacks/callback_variable_minibatch.cpp b/src/callbacks/callback_variable_minibatch.cpp index d2c947450ea..617e57402d4 100644 --- a/src/callbacks/callback_variable_minibatch.cpp +++ b/src/callbacks/callback_variable_minibatch.cpp @@ -39,7 +39,7 @@ lbann_callback_variable_minibatch::lbann_callback_variable_minibatch( void lbann_callback_variable_minibatch::on_train_begin(model *m) { // Avoid issues with the train method being called multiple times. - if (m->get_cur_epoch() != 0) { return; } + if (m->get_epoch() != 0) { return; } // Get first input layer in model generic_input_layer* input = nullptr; @@ -103,12 +103,12 @@ void lbann_callback_variable_minibatch::on_epoch_end(model *m) { std::cout << "Model " << comm->get_trainer_rank() << ": Changing mini-batch size to " << new_mbsize << " and learning rate to " << new_lr << " at epoch " << - m->get_cur_epoch() << std::endl; + m->get_epoch() << std::endl; } } else if (comm->am_trainer_master()) { std::cout << "Model " << comm->get_trainer_rank() << ": Changing mini-batch size to " << new_mbsize << - " at epoch " << m->get_cur_epoch() << std::endl; + " at epoch " << m->get_epoch() << std::endl; } } // Ramp the learning rate, if needed. @@ -152,7 +152,7 @@ lbann_callback_step_minibatch::lbann_callback_step_minibatch( bool lbann_callback_step_minibatch::schedule( model *m, int& new_mbsize, float& new_lr, int& ramp_time) { - if (m->get_cur_epoch() % m_step == 0) { + if (m->get_epoch() % m_step == 0) { new_mbsize = m_current_mini_batch_size * 2; new_lr = get_current_learning_rate(m) * 2; ramp_time = m_ramp_time; @@ -173,7 +173,7 @@ lbann_callback_minibatch_schedule::lbann_callback_minibatch_schedule( bool lbann_callback_minibatch_schedule::schedule( model *m, int& new_mbsize, float& new_lr, int& ramp_time) { - if (!m_steps.empty() && m->get_cur_epoch() == m_steps.back().epoch) { + if (!m_steps.empty() && m->get_epoch() == m_steps.back().epoch) { new_mbsize = m_steps.back().mbsize; new_lr = m_steps.back().lr; ramp_time = m_steps.back().ramp_time; diff --git a/src/callbacks/profiler.cpp b/src/callbacks/profiler.cpp index d12119a533c..b75df354f8d 100644 --- a/src/callbacks/profiler.cpp +++ b/src/callbacks/profiler.cpp @@ -50,55 +50,55 @@ lbann_callback_profiler::lbann_callback_profiler(bool sync, bool skip_init) : void lbann_callback_profiler::on_epoch_begin(model *m) { // Skip the first epoch - if (m_skip_init && m->get_cur_epoch() == 1) { + if (m_skip_init && m->get_epoch() == 1) { prof_start(); } - prof_region_begin(("epoch " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_begin(("epoch " + std::to_string(m->get_epoch())).c_str(), prof_colors[0], m_sync); } void lbann_callback_profiler::on_epoch_end(model *m) { - prof_region_end(("epoch " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_end(("epoch " + std::to_string(m->get_epoch())).c_str(), m_sync); } void lbann_callback_profiler::on_validation_begin(model *m) { - prof_region_begin(("val " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_begin(("val " + std::to_string(m->get_epoch())).c_str(), prof_colors[0], m_sync); } void lbann_callback_profiler::on_validation_end(model *m) { - prof_region_end(("val " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_end(("val " + std::to_string(m->get_epoch())).c_str(), m_sync); } void lbann_callback_profiler::on_test_begin(model *m) { - prof_region_begin(("test " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_begin(("test " + std::to_string(m->get_epoch())).c_str(), prof_colors[0], m_sync); } void lbann_callback_profiler::on_test_end(model *m) { - prof_region_end(("test " + std::to_string(m->get_cur_epoch())).c_str(), + prof_region_end(("test " + std::to_string(m->get_epoch())).c_str(), m_sync); } void lbann_callback_profiler::on_batch_begin(model *m) { - prof_region_begin(("batch " + std::to_string(m->get_cur_step())).c_str(), + prof_region_begin(("batch " + std::to_string(m->get_step(execution_mode::training))).c_str(), prof_colors[1], m_sync); } void lbann_callback_profiler::on_batch_end(model *m) { - prof_region_end(("batch " + std::to_string(m->get_cur_step())).c_str(), + prof_region_end(("batch " + std::to_string(m->get_step(execution_mode::training))).c_str(), m_sync); } void lbann_callback_profiler::on_batch_evaluate_begin(model *m) { - prof_region_begin(("batch eval " + std::to_string(m->get_cur_step())).c_str(), + prof_region_begin(("batch eval " + std::to_string(m->get_step(execution_mode::training))).c_str(), prof_colors[1], m_sync); } void lbann_callback_profiler::on_batch_evaluate_end(model *m) { - prof_region_end(("batch eval " + std::to_string(m->get_cur_step())).c_str(), + prof_region_end(("batch eval " + std::to_string(m->get_step(execution_mode::training))).c_str(), m_sync); } diff --git a/src/data_readers/data_reader.cpp b/src/data_readers/data_reader.cpp index 3dc2f6db45f..2fa2a1c8960 100644 --- a/src/data_readers/data_reader.cpp +++ b/src/data_readers/data_reader.cpp @@ -711,13 +711,13 @@ void generic_data_reader::setup_data_store(model *m, int mini_batch_size) { bool generic_data_reader::data_store_active() const { return (m_data_store != nullptr && (m_model->get_execution_mode() == execution_mode::training) - && m_model->get_cur_epoch() > 0); + && m_model->get_epoch() > 0); } bool generic_data_reader::priming_data_store() const { return (m_data_store != nullptr && (m_model->get_execution_mode() == execution_mode::training) - && m_model->get_cur_epoch() == 0); + && m_model->get_epoch() == 0); } void generic_data_reader::set_data_store(generic_data_store *g) { diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index 6e944a97905..53ab9493fb8 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -1401,14 +1401,14 @@ bool data_reader_jag_conduit::fetch_response(CPUMat& X, int data_id, int mb_idx) bool ok = true; // Create a node to hold all of the data conduit::Node node; - if (m_jag_store != nullptr && m_model->get_cur_epoch() > 0) { + if (m_jag_store != nullptr && m_model->get_epoch() > 0) { const conduit::Node& ds_node = m_jag_store->get_conduit_node(data_id); node.set_external(ds_node); } for(size_t i = 0u; ok && (i < X_v.size()); ++i) { ok = fetch(X_v[i], data_id, node, 0, tid, m_dependent[i], "response"); } - if (m_jag_store != nullptr && m_model->get_cur_epoch() == 0) { + if (m_jag_store != nullptr && m_model->get_epoch() == 0) { // Once the node has been populated save it in the data store if (m_jag_store != nullptr) { m_jag_store->set_conduit_node(data_id, node); diff --git a/src/data_readers/data_reader_moving_mnist.cpp b/src/data_readers/data_reader_moving_mnist.cpp index ebe7c779da6..e1b495d31bb 100644 --- a/src/data_readers/data_reader_moving_mnist.cpp +++ b/src/data_readers/data_reader_moving_mnist.cpp @@ -90,7 +90,7 @@ bool moving_mnist_reader::fetch_datum(CPUMat& X, int data_id, int col) { for (El::Int obj = 0; obj < m_num_objects; ++obj) { size_t hash = 1234; hash_combine(hash, data_id); - hash_combine(hash, m_model->get_cur_epoch()); + hash_combine(hash, m_model->get_epoch()); hash_combine(hash, obj); raw_image_indices[obj] = hash % m_num_raw_images; } @@ -225,7 +225,7 @@ bool moving_mnist_reader::fetch_label(CPUMat& Y, int data_id, int col) { for (El::Int obj = 0; obj < m_num_objects; ++obj) { size_t hash = 1234; hash_combine(hash, data_id); - hash_combine(hash, m_model->get_cur_epoch()); + hash_combine(hash, m_model->get_epoch()); hash_combine(hash, obj); raw_image_indices[obj] = hash % m_num_raw_images; } diff --git a/src/data_store/data_store_jag.cpp b/src/data_store/data_store_jag.cpp index e25fad71937..d764310ba84 100644 --- a/src/data_store/data_store_jag.cpp +++ b/src/data_store/data_store_jag.cpp @@ -245,7 +245,7 @@ const conduit::Node & data_store_jag::get_conduit_node(int data_id) const { std::unordered_map::const_iterator t2 = m_minibatch_data.find(data_id); if (t2 == m_minibatch_data.end()) { - LBANN_ERROR("failed to find data_id: " + std::to_string(data_id) + " in m_minibatch_data; m_minibatch_data.size: " + std::to_string(m_minibatch_data.size()) + "; epoch:" + std::to_string(m_model->get_cur_epoch())); + LBANN_ERROR("failed to find data_id: " + std::to_string(data_id) + " in m_minibatch_data; m_minibatch_data.size: " + std::to_string(m_minibatch_data.size()) + "; epoch:" + std::to_string(m_model->get_epoch())); } return t2->second; diff --git a/src/data_store/generic_data_store.cpp b/src/data_store/generic_data_store.cpp index 117096408b8..9ff32de8f06 100644 --- a/src/data_store/generic_data_store.cpp +++ b/src/data_store/generic_data_store.cpp @@ -211,7 +211,7 @@ size_t generic_data_store::get_file_size(std::string dir, std::string fn) { } void generic_data_store::set_shuffled_indices(const std::vector *indices, bool exchange_indices) { -if (m_master)std::cerr<<"starting set_shuffled_indices; epoch: "<get_cur_epoch()<<" role: " << m_reader->get_role()<<"; n: " << m_n << "\n"; +if (m_master)std::cerr<<"starting set_shuffled_indices; epoch: "<get_epoch()<<" role: " << m_reader->get_role()<<"; n: " << m_n << "\n"; m_shuffled_indices = indices; } diff --git a/src/models/model.cpp b/src/models/model.cpp index 09774714e7b..198fa150401 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -49,7 +49,7 @@ namespace lbann { // ============================================= -// Constructors and destructor +// Life cycle functions // ============================================= model::model(lbann_comm *comm, @@ -57,12 +57,6 @@ model::model(lbann_comm *comm, objective_function *obj_fn, optimizer* default_optimizer) : m_objective_function(obj_fn), - m_execution_mode(execution_mode::training), - m_terminate_training(false), - m_current_epoch(0), - m_current_step(0), - m_current_validation_step(0), - m_current_testing_step(0), m_max_mini_batch_size(mini_batch_size), m_current_mini_batch_size(mini_batch_size), m_effective_mini_batch_size(mini_batch_size), @@ -82,10 +76,8 @@ model::model(lbann_comm *comm, model::model(const model& other) : m_execution_mode(other.m_execution_mode), m_terminate_training(other.m_terminate_training), - m_current_epoch(other.m_current_epoch), - m_current_step(other.m_current_step), - m_current_validation_step(other.m_current_validation_step), - m_current_testing_step(other.m_current_testing_step), + m_epoch(other.m_epoch), + m_step(other.m_step), m_max_mini_batch_size(other.m_max_mini_batch_size), m_current_mini_batch_size(other.m_current_mini_batch_size), m_effective_mini_batch_size(other.m_effective_mini_batch_size), @@ -138,10 +130,8 @@ model& model::operator=(const model& other) { // Shallow copies m_execution_mode = other.m_execution_mode; m_terminate_training = other.m_terminate_training; - m_current_epoch = other.m_current_epoch; - m_current_step = other.m_current_step; - m_current_validation_step = other.m_current_validation_step; - m_current_testing_step = other.m_current_testing_step; + m_epoch = other.m_epoch; + m_step = other.m_step; m_max_mini_batch_size = other.m_max_mini_batch_size; m_current_mini_batch_size = other.m_current_mini_batch_size; m_effective_mini_batch_size = other.m_effective_mini_batch_size; @@ -191,6 +181,178 @@ model::~model() { for (const auto& cb : m_callbacks) { delete cb; } } +// ============================================= +// Access functions +// ============================================= + +void model::set_name(std::string name) { + if (name.empty()) { + std::ostringstream err; + err << "attempted to rename model \"" << get_name() << "\" " + << "with empty string"; + LBANN_ERROR(err.str()); + } + m_name = std::move(name); +} + +description model::get_description() const { + + // Construct description object + description desc(get_name()); + desc.add("Type", get_type()); + + // Layer topology + description layer_topology_desc("Layer topology:"); + for (El::Int k = 0; k < get_num_layers(); ++k) { + const auto& l = get_layer(k); + std::stringstream ss; + ss << l.get_name() << " (" << l.get_type() << "): {"; + const auto& parents = l.get_parent_layers(); + const auto& children = l.get_child_layers(); + for (size_t i = 0; i < parents.size(); ++i) { + ss << (i > 0 ? ", " : ""); + if (parents[i] == nullptr) { + ss << "unknown layer"; + } else { + ss << parents[i]->get_name() << " ("; + const auto& dims = l.get_input_dims(i); + for (size_t j = 0; j < dims.size(); ++j) { + ss << (j > 0 ? "x" : "") << dims[j]; + } + ss << ")"; + } + } + ss << "} -> {"; + for (size_t i = 0; i < children.size(); ++i) { + ss << (i > 0 ? ", " : ""); + if (children[i] == nullptr) { + ss << "unknown layer"; + } else { + ss << children[i]->get_name() << " ("; + const auto& dims = l.get_output_dims(i); + for (size_t j = 0; j < dims.size(); ++j) { + ss << (j > 0 ? "x" : "") << dims[j]; + } + ss << ")"; + } + } + ss << "}"; + layer_topology_desc.add(ss.str()); + } + desc.add(std::string{}); + desc.add(layer_topology_desc); + + // Layer details + description layer_details_desc("Layer details:"); + for (El::Int i = 0; i < get_num_layers(); ++i) { + layer_details_desc.add(get_layer(i).get_description()); + } + desc.add(std::string{}); + desc.add(layer_details_desc); + + // Weights + description weights_desc("Weights:"); + for (const auto* w : m_weights) { + if (w == nullptr) { + weights_desc.add("unknown weights"); + } else { + weights_desc.add(w->get_description()); + } + } + desc.add(std::string{}); + desc.add(weights_desc); + + /// @todo Descriptions for objective function, metrics, callbacks + + // Result + return desc; + +} + +El::Int model::get_num_layers() const noexcept { + return m_layers.size(); +} +Layer& model::get_layer(El::Int pos) { + // Item 3, p. 23 in "Effective C++", 3rd ed., by Scott Meyers + return const_cast(static_cast(*this).get_layer(pos)); +} +const Layer& model::get_layer(El::Int pos) const { + std::stringstream err; + if (pos < 0 || pos >= get_num_layers()) { + err << "could not access layer in model \"" << get_name() << "\" " + << "(requested index " << pos << ", " + << "but there are " << get_num_layers() << " layers)"; + LBANN_ERROR(err.str()); + } else if (m_layers[pos] == nullptr) { + err << "model \"" << get_name() << "\" " + << "has a null pointer in its layer list"; + LBANN_ERROR(err.str()); + } + return *m_layers[pos]; +} +std::vector model::get_layers() { + std::vector layer_list; + layer_list.reserve(m_layers.size()); + for (const auto& ptr : m_layers) { + layer_list.push_back(ptr.get()); + } + return layer_list; +} +const std::vector model::get_layers() const { + std::vector layer_list; + layer_list.reserve(m_layers.size()); + for (const auto& ptr : m_layers) { + layer_list.push_back(ptr.get()); + } + return layer_list; +} + +std::vector model::get_weights() { + std::vector weights_list; + for (const auto& w : m_weights) { + weights_list.push_back(w); + } + return weights_list; +} + +const std::vector model::get_weights() const { + std::vector weights_list; + for (const auto& w : m_weights) { + weights_list.push_back(w); + } + return weights_list; +} + +void model::set_execution_mode(execution_mode mode) { + m_execution_mode = mode; +} + +execution_mode model::get_execution_mode() const noexcept { + return m_execution_mode; +} + +El::Int model::get_step() const noexcept { + return get_step(get_execution_mode()); +} + +El::Int model::get_step(execution_mode mode) const noexcept { + if (m_step.count(mode) > 0) { + return m_step.at(mode); + } else { + return 0; + } +} + +int model::get_num_iterations_per_epoch(execution_mode mode) const { + for (El::Int i = 0; i < get_num_layers(); ++i) { + const auto* input = dynamic_cast(&get_layer(i)); + if (input != nullptr) { + return input->get_num_iterations_per_epoch(mode); + } + } + return 0; +} + // ============================================= // Model specification // ============================================= @@ -270,60 +432,6 @@ void model::add_metric(metric *m) { m_metrics.push_back(m); } -std::vector model::get_weights() { - std::vector weights_list; - for (const auto& w : m_weights) { - weights_list.push_back(w); - } - return weights_list; -} - -const std::vector model::get_weights() const { - std::vector weights_list; - for (const auto& w : m_weights) { - weights_list.push_back(w); - } - return weights_list; -} - -El::Int model::get_num_layers() const noexcept { - return m_layers.size(); -} -Layer& model::get_layer(El::Int pos) { - // Item 3, p. 23 in "Effective C++", 3rd ed., by Scott Meyers - return const_cast(static_cast(*this).get_layer(pos)); -} -const Layer& model::get_layer(El::Int pos) const { - std::stringstream err; - if (pos < 0 || pos >= get_num_layers()) { - err << "could not access layer in model \"" << get_name() << "\" " - << "(requested index " << pos << ", " - << "but there are " << get_num_layers() << " layers)"; - LBANN_ERROR(err.str()); - } else if (m_layers[pos] == nullptr) { - err << "model \"" << get_name() << "\" " - << "has a null pointer in its layer list"; - LBANN_ERROR(err.str()); - } - return *m_layers[pos]; -} -std::vector model::get_layers() { - std::vector layer_list; - layer_list.reserve(m_layers.size()); - for (const auto& ptr : m_layers) { - layer_list.push_back(ptr.get()); - } - return layer_list; -} -const std::vector model::get_layers() const { - std::vector layer_list; - layer_list.reserve(m_layers.size()); - for (const auto& ptr : m_layers) { - layer_list.push_back(ptr.get()); - } - return layer_list; -} - void model::replace_weights(std::vector& new_weights) { // Check that number of weights is valid @@ -421,91 +529,6 @@ void model::reorder_layers(const std::vector& gather_indices) { } -void model::set_name(std::string name) { - if (name.empty()) { - std::stringstream err; - err << "attempted to rename model \"" << get_name() << "\" " - << "with empty string"; - LBANN_ERROR(err.str()); - } - m_name = std::move(name); -} - - -description model::get_description() const { - - // Construct description object - description desc(get_name()); - desc.add("Type", get_type()); - - // Layer topology - description layer_topology_desc("Layer topology:"); - for (El::Int k = 0; k < get_num_layers(); ++k) { - const auto& l = get_layer(k); - std::stringstream ss; - ss << l.get_name() << " (" << l.get_type() << "): {"; - const auto& parents = l.get_parent_layers(); - const auto& children = l.get_child_layers(); - for (size_t i = 0; i < parents.size(); ++i) { - ss << (i > 0 ? ", " : ""); - if (parents[i] == nullptr) { - ss << "unknown layer"; - } else { - ss << parents[i]->get_name() << " ("; - const auto& dims = l.get_input_dims(i); - for (size_t j = 0; j < dims.size(); ++j) { - ss << (j > 0 ? "x" : "") << dims[j]; - } - ss << ")"; - } - } - ss << "} -> {"; - for (size_t i = 0; i < children.size(); ++i) { - ss << (i > 0 ? ", " : ""); - if (children[i] == nullptr) { - ss << "unknown layer"; - } else { - ss << children[i]->get_name() << " ("; - const auto& dims = l.get_output_dims(i); - for (size_t j = 0; j < dims.size(); ++j) { - ss << (j > 0 ? "x" : "") << dims[j]; - } - ss << ")"; - } - } - ss << "}"; - layer_topology_desc.add(ss.str()); - } - desc.add(std::string{}); - desc.add(layer_topology_desc); - - // Layer details - description layer_details_desc("Layer details:"); - for (El::Int i = 0; i < get_num_layers(); ++i) { - layer_details_desc.add(get_layer(i).get_description()); - } - desc.add(std::string{}); - desc.add(layer_details_desc); - - // Weights - description weights_desc("Weights:"); - for (const auto* w : m_weights) { - if (w == nullptr) { - weights_desc.add("unknown weights"); - } else { - weights_desc.add(w->get_description()); - } - } - desc.add(std::string{}); - desc.add(weights_desc); - - /// @todo Descriptions for objective function, metrics, callbacks - - // Result - return desc; - -} - void model::remap_pointers(const std::unordered_map& layer_map, const std::unordered_map& weights_map) { @@ -921,18 +944,8 @@ void model::add_split_layers(std::unordered_set& layer_names) { } } -int model::get_num_iterations_per_epoch(execution_mode mode) const { - for (El::Int i = 0; i < get_num_layers(); ++i) { - const auto* input = dynamic_cast(&get_layer(i)); - if (input != nullptr) { - return input->get_num_iterations_per_epoch(mode); - } - } - return 0; -} - // ============================================= -// Evaluation and training +// Execution // ============================================= void model::evaluate(execution_mode mode, int num_batches) { @@ -959,33 +972,9 @@ void model::evaluate(execution_mode mode, int num_batches) { do_evaluate_end_cbs(mode); } -//this is for data store functionality -void model::collect_indices(execution_mode mode) { - reset_mode_and_model(mode); - while (true) { - get_layer(0).forward_prop(); - bool finished = true; - finished = get_layer(0).update() && finished; - if (finished) { - break; - } - } - //this may not be necessary, but shouldn't hurt - reset_epoch_statistics(mode); -} - -void model::collect_background_data_fetch(execution_mode mode) { - for (El::Int i = 0; i < get_num_layers(); ++i) { - auto *input = dynamic_cast(&get_layer(i)); - if (input != nullptr) { - input->collect_background_data_fetch(mode); - } - } -} - void model::train(int num_epochs, int num_batches) { do_train_begin_cbs(); - for (int epoch = m_current_epoch; epoch < num_epochs; ++epoch) { + for (int epoch = m_epoch; epoch < num_epochs; ++epoch) { if (get_terminate_training()) { break; } // Initialize epoch @@ -1000,7 +989,7 @@ void model::train(int num_epochs, int num_batches) { } // Finalize epoch - ++m_current_epoch; + ++m_epoch; reconcile_weight_values(); do_epoch_end_cbs(); reset_epoch_statistics(execution_mode::training); @@ -1012,6 +1001,30 @@ void model::train(int num_epochs, int num_batches) { do_train_end_cbs(); } +//this is for data store functionality +void model::collect_indices(execution_mode mode) { + reset_mode_and_model(mode); + while (true) { + get_layer(0).forward_prop(); + bool finished = true; + finished = get_layer(0).update() && finished; + if (finished) { + break; + } + } + //this may not be necessary, but shouldn't hurt + reset_epoch_statistics(mode); +} + +void model::collect_background_data_fetch(execution_mode mode) { + for (El::Int i = 0; i < get_num_layers(); ++i) { + auto *input = dynamic_cast(&get_layer(i)); + if (input != nullptr) { + input->collect_background_data_fetch(mode); + } + } +} + // At the start of the epoch, set the execution mode and make sure // that each layer points to this model void model::reset_mode_and_model(execution_mode mode) { @@ -1054,8 +1067,9 @@ bool model::evaluate_mini_batch(execution_mode mode) { } bool model::train_mini_batch() { - reset_mode_and_model(execution_mode::training); - do_batch_begin_cbs(execution_mode::training); + constexpr execution_mode mode = execution_mode::training; + reset_mode_and_model(mode); + do_batch_begin_cbs(mode); bool finished; @@ -1068,10 +1082,9 @@ bool model::train_mini_batch() { #endif // Forward prop step clear_gradients(); - forward_prop(execution_mode::training); + forward_prop(mode); // Result is not needed until the end of the mini-batch. - m_objective_function->start_evaluation(execution_mode::training, - get_current_mini_batch_size()); + m_objective_function->start_evaluation(mode, get_current_mini_batch_size()); // Backward prop step m_objective_function->differentiate(); @@ -1079,11 +1092,9 @@ bool model::train_mini_batch() { m_objective_function->compute_weight_regularization(); // Finish evaluation. - m_objective_function->finish_evaluation(execution_mode::training, - get_current_mini_batch_size()); + m_objective_function->finish_evaluation(mode, get_current_mini_batch_size()); for (const auto& m : m_metrics) { - m->evaluate(execution_mode::training, - get_current_mini_batch_size()); + m->evaluate(mode, get_current_mini_batch_size()); } // Update step @@ -1094,7 +1105,11 @@ bool model::train_mini_batch() { } #endif - ++m_current_step; + // Increment mini-batch step + /// @todo Move after the callbacks + if (m_step.count(mode) < 1) { m_step[mode] = 0; } + ++m_step[mode]; + do_batch_end_cbs(execution_mode::training); return finished; } @@ -1236,7 +1251,7 @@ void model::do_batch_begin_cbs(execution_mode mode) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_batch_begin(this); } break; @@ -1245,10 +1260,7 @@ void model::do_batch_begin_cbs(execution_mode mode) { cb->on_batch_evaluate_begin(this); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } @@ -1257,7 +1269,7 @@ void model::do_batch_end_cbs(execution_mode mode) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_batch_end(this); } break; @@ -1266,10 +1278,7 @@ void model::do_batch_end_cbs(execution_mode mode) { cb->on_batch_evaluate_end(this); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } @@ -1278,7 +1287,7 @@ void model::do_model_forward_prop_begin_cbs(execution_mode mode) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_forward_prop_begin(this); } break; @@ -1287,10 +1296,7 @@ void model::do_model_forward_prop_begin_cbs(execution_mode mode) { cb->on_evaluate_forward_prop_begin(this); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } @@ -1299,7 +1305,7 @@ void model::do_model_forward_prop_end_cbs(execution_mode mode) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_forward_prop_end(this); } break; @@ -1308,23 +1314,19 @@ void model::do_model_forward_prop_end_cbs(execution_mode mode) { cb->on_evaluate_forward_prop_end(this); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } /** @todo Consistent behavior between train, validation, and test - * modes, e.g. - * if (get_cur_validation_step() % cb->get_batch_interval() == 0) { ... } + * modes */ void model::do_layer_forward_prop_begin_cbs(execution_mode mode, Layer *l) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_forward_prop_begin(this, l); } break; @@ -1333,23 +1335,19 @@ void model::do_layer_forward_prop_begin_cbs(execution_mode mode, Layer *l) { cb->on_evaluate_forward_prop_begin(this, l); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } /** @todo Consistent behavior between train, validation, and test - * modes, e.g. - * if (get_cur_validation_step() % cb->get_batch_interval() == 0) { ... } + * modes */ void model::do_layer_forward_prop_end_cbs(execution_mode mode, Layer *l) { for (const auto& cb : m_callbacks) { switch (mode) { case execution_mode::training: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_forward_prop_end(this, l); } break; @@ -1358,17 +1356,14 @@ void model::do_layer_forward_prop_end_cbs(execution_mode mode, Layer *l) { cb->on_evaluate_forward_prop_end(this, l); break; default: - std::stringstream err; - err << __FILE__ << " " << __LINE__ << " :: " - << "invalid execution mode"; - throw lbann_exception(err.str()); + LBANN_ERROR("invalid execution mode"); } } } void model::do_model_backward_prop_begin_cbs() { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_backward_prop_begin(this); } } @@ -1376,7 +1371,7 @@ void model::do_model_backward_prop_begin_cbs() { void model::do_model_backward_prop_end_cbs() { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_backward_prop_end(this); } } @@ -1384,7 +1379,7 @@ void model::do_model_backward_prop_end_cbs() { void model::do_layer_backward_prop_begin_cbs(Layer *l) { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_backward_prop_begin(this, l); } } @@ -1392,7 +1387,7 @@ void model::do_layer_backward_prop_begin_cbs(Layer *l) { void model::do_layer_backward_prop_end_cbs(Layer *l) { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_backward_prop_end(this, l); } } @@ -1400,7 +1395,7 @@ void model::do_layer_backward_prop_end_cbs(Layer *l) { void model::do_model_optimize_begin_cbs() { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_optimize_begin(this); } } @@ -1408,7 +1403,7 @@ void model::do_model_optimize_begin_cbs() { void model::do_model_optimize_end_cbs() { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_optimize_end(this); } } @@ -1416,7 +1411,7 @@ void model::do_model_optimize_end_cbs() { void model::do_weight_optimize_begin_cbs(weights *w) { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_optimize_begin(this, w); } } @@ -1424,7 +1419,7 @@ void model::do_weight_optimize_begin_cbs(weights *w) { void model::do_weight_optimize_end_cbs(weights *w) { for (const auto& cb : m_callbacks) { - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_optimize_end(this, w); } } @@ -1436,19 +1431,19 @@ void model::do_weight_optimize_end_cbs(weights *w) { void model::summarize_stats(lbann_summary& summarizer) { for (El::Int i = 0; i < get_num_layers(); ++i) { - get_layer(i).summarize_stats(summarizer, get_cur_step()); + get_layer(i).summarize_stats(summarizer, get_step(execution_mode::training)); } summarizer.reduce_scalar("objective", m_objective_function->get_mean_value(m_execution_mode), - get_cur_step()); + get_step(execution_mode::training)); summarizer.reduce_scalar( "objective_evaluation_time", m_objective_function->get_evaluation_time(), - get_cur_step()); + get_step(execution_mode::training)); summarizer.reduce_scalar( "objective_differentiation_time", m_objective_function->get_differentiation_time(), - get_cur_step()); + get_step(execution_mode::training)); m_objective_function->reset_counters(); double total_metric_time = 0.0; for (auto&& m : m_metrics) { @@ -1458,12 +1453,12 @@ void model::summarize_stats(lbann_summary& summarizer) { summarizer.reduce_scalar( "metric_evaluation_time", total_metric_time, - get_cur_step()); + get_step(execution_mode::training)); } void model::summarize_matrices(lbann_summary& summarizer) { for (El::Int i = 0; i < get_num_layers(); ++i) { - get_layer(i).summarize_matrices(summarizer, get_cur_step()); + get_layer(i).summarize_matrices(summarizer, get_step(execution_mode::training)); } } @@ -1475,10 +1470,10 @@ void model::summarize_matrices(lbann_summary& summarizer) { struct lbann_model_header { uint32_t execution_mode; uint32_t terminate_training; - uint64_t current_epoch; - uint64_t current_step; - uint64_t current_validation_step; - uint64_t current_testing_step; + uint64_t epoch; + uint64_t training_step; + uint64_t validation_step; + uint64_t testing_step; uint32_t max_mini_batch_size; uint32_t current_mini_batch_size; uint32_t current_phase; @@ -1491,15 +1486,15 @@ bool model::save_to_checkpoint_shared(persist& p) { if (m_comm->am_trainer_master()) { p.write_uint32(persist_type::train, "execution_mode", (uint32_t) m_execution_mode); p.write_uint32(persist_type::train, "terminate_training", (uint32_t) m_terminate_training); - p.write_uint64(persist_type::train, "current_epoch", (uint64_t) m_current_epoch); - p.write_uint64(persist_type::train, "current_step", (uint64_t) m_current_step); - p.write_uint64(persist_type::train, "current_testing_step", (uint64_t) m_current_testing_step); + p.write_uint64(persist_type::train, "epoch", (uint64_t) m_epoch); + p.write_uint64(persist_type::train, "training_step", (uint64_t) get_step(execution_mode::training)); + p.write_uint64(persist_type::train, "testing_step", (uint64_t) get_step(execution_mode::testing)); p.write_uint32(persist_type::train, "max_mini_batch_size", (uint32_t) m_max_mini_batch_size); p.write_uint32(persist_type::train, "current_mini_batch_size", (uint32_t) m_current_mini_batch_size); p.write_uint32(persist_type::train, "current_phase", (uint32_t) m_current_phase); p.write_uint32(persist_type::train, "persist_callback_type", (uint32_t) p.get_cb_type()); if(p.get_cb_type() == callback_type::batch) - p.write_uint64(persist_type::validate, "current_validataion_step", (uint64_t) m_current_validation_step); + p.write_uint64(persist_type::validate, "validation_step", (uint64_t) get_step(execution_mode::validation)); } for (weights *w : m_weights) { @@ -1520,7 +1515,7 @@ bool model::save_to_checkpoint_shared(persist& p) { } else{ if (m_comm->am_trainer_master()) { - p.write_uint64(persist_type::validate, "current_validataion_step", (uint64_t) m_current_validation_step); + p.write_uint64(persist_type::validate, "validation_step", (uint64_t) get_step(execution_mode::validation)); } save_rng_to_checkpoint_shared(p, m_comm); for (weights *w : m_weights) { @@ -1547,17 +1542,17 @@ bool model::load_from_checkpoint_shared(persist& p) { if (p.get_cb_type() != callback_type::validation) { p.read_uint32(persist_type::train, "execution_mode", &header.execution_mode); p.read_uint32(persist_type::train, "terminate_training", &header.terminate_training); - p.read_uint64(persist_type::train, "current_epoch", &header.current_epoch); - p.read_uint64(persist_type::train, "current_step", &header.current_step); + p.read_uint64(persist_type::train, "epoch", &header.epoch); + p.read_uint64(persist_type::train, "training_step", &header.training_step); if(get_num_iterations_per_epoch(execution_mode::validation) != 0) - p.read_uint64(persist_type::validate, "current_validation_step", &header.current_validation_step); - p.read_uint64(persist_type::train, "current_testing_step", &header.current_testing_step); + p.read_uint64(persist_type::validate, "validation_step", &header.validation_step); + p.read_uint64(persist_type::train, "testing_step", &header.testing_step); p.read_uint32(persist_type::train, "max_mini_batch_size", &header.max_mini_batch_size); p.read_uint32(persist_type::train, "current_mini_batch_size", &header.current_mini_batch_size); p.read_uint32(persist_type::train, "current_phase", &header.current_phase); p.read_uint32(persist_type::train, "persist_callback_type", &header.callback_type); } else { - p.read_uint64(persist_type::validate, "current_validation_step", &header.current_validation_step); + p.read_uint64(persist_type::validate, "validation_step", &header.validation_step); } } load_rng_from_checkpoint_shared(p, m_comm); @@ -1568,18 +1563,18 @@ bool model::load_from_checkpoint_shared(persist& p) { if (p.get_cb_type() != callback_type::validation) { m_execution_mode = (execution_mode) header.execution_mode; m_terminate_training = (bool) header.terminate_training; - m_current_epoch = (int) header.current_epoch; - m_current_step = (int) header.current_step; + m_epoch = (int) header.epoch; + m_step[execution_mode::training] = (int) header.training_step; if(get_num_iterations_per_epoch(execution_mode::validation) != 0) - m_current_validation_step = (int) header.current_validation_step; - m_current_testing_step = (int) header.current_testing_step; + m_step[execution_mode::validation] = (int) header.validation_step; + m_step[execution_mode::testing] = (int) header.testing_step; m_max_mini_batch_size = (int) header.max_mini_batch_size; m_current_mini_batch_size = (int) header.current_mini_batch_size; m_current_phase = header.current_phase; // set state of persist object to know which type of ckpt we are returning from. p.set_cb_type((callback_type) header.callback_type); } else { - m_current_validation_step = (int) header.current_validation_step; + m_step[execution_mode::validation] = (int) header.validation_step; } for (weights *w : m_weights) { @@ -1608,15 +1603,15 @@ bool model::save_to_checkpoint_distributed(persist& p){ if (p.get_cb_type() != callback_type::validation) { p.write_uint32(persist_type::train, "execution_mode", (uint32_t) m_execution_mode); p.write_uint32(persist_type::train, "terminate_training", (uint32_t) m_terminate_training); - p.write_uint64(persist_type::train, "current_epoch", (uint64_t) m_current_epoch); - p.write_uint64(persist_type::train, "current_step", (uint64_t) m_current_step); - p.write_uint64(persist_type::train, "current_testing_step", (uint64_t) m_current_testing_step); + p.write_uint64(persist_type::train, "epoch", (uint64_t) m_epoch); + p.write_uint64(persist_type::train, "training_step", (uint64_t) get_step(execution_mode::training)); + p.write_uint64(persist_type::train, "testing_step", (uint64_t) get_step(execution_mode::testing)); p.write_uint32(persist_type::train, "max_mini_batch_size", (uint32_t) m_max_mini_batch_size); p.write_uint32(persist_type::train, "current_mini_batch_size", (uint32_t) m_current_mini_batch_size); p.write_uint32(persist_type::train, "current_phase", (uint32_t) m_current_phase); p.write_uint32(persist_type::train, "persist_callback_type", (uint32_t) p.get_cb_type()); if(p.get_cb_type() == callback_type::batch) - p.write_uint64(persist_type::validate, "current_validataion_step", (uint64_t) m_current_validation_step); + p.write_uint64(persist_type::validate, "validataion_step", (uint64_t) get_step(execution_mode::validation)); for (weights *w : m_weights) { w->save_to_checkpoint_distributed(p); @@ -1636,7 +1631,7 @@ bool model::save_to_checkpoint_distributed(persist& p){ } else { - p.write_uint64(persist_type::validate, "current_validataion_step", (uint64_t) m_current_validation_step); + p.write_uint64(persist_type::validate, "validataion_step", (uint64_t) get_step(execution_mode::validation)); save_rng_to_checkpoint_shared(p, m_comm); for (El::Int i = 0; i < get_num_layers(); ++i) { @@ -1655,11 +1650,11 @@ bool model::load_from_checkpoint_distributed(persist& p){ struct lbann_model_header header; p.read_uint32(persist_type::train, "execution_mode", &header.execution_mode); p.read_uint32(persist_type::train, "terminate_training", &header.terminate_training); - p.read_uint64(persist_type::train, "current_epoch", &header.current_epoch); - p.read_uint64(persist_type::train, "current_step", &header.current_step); + p.read_uint64(persist_type::train, "epoch", &header.epoch); + p.read_uint64(persist_type::train, "training_step", &header.training_step); if(get_num_iterations_per_epoch(execution_mode::validation) != 0) - p.read_uint64(persist_type::validate, "current_validation_step", &header.current_validation_step); - p.read_uint64(persist_type::train, "current_testing_step", &header.current_testing_step); + p.read_uint64(persist_type::validate, "validation_step", &header.validation_step); + p.read_uint64(persist_type::train, "testing_step", &header.testing_step); p.read_uint32(persist_type::train, "max_mini_batch_size", &header.max_mini_batch_size); p.read_uint32(persist_type::train, "current_mini_batch_size", &header.current_mini_batch_size); p.read_uint32(persist_type::train, "current_phase", &header.current_phase); @@ -1667,11 +1662,11 @@ bool model::load_from_checkpoint_distributed(persist& p){ m_execution_mode = (execution_mode) header.execution_mode; m_terminate_training = (bool) header.terminate_training; - m_current_epoch = (int) header.current_epoch; - m_current_step = (int) header.current_step; + m_epoch = (int) header.epoch; + m_step[execution_mode::training] = (int) header.training_step; if(get_num_iterations_per_epoch(execution_mode::validation) != 0) - m_current_validation_step = (int) header.current_validation_step; - m_current_testing_step = (int) header.current_testing_step; + m_step[execution_mode::validation] = (int) header.validation_step; + m_step[execution_mode::testing] = (int) header.testing_step; m_max_mini_batch_size = (int) header.max_mini_batch_size; m_current_mini_batch_size = (int) header.current_mini_batch_size; m_current_phase = header.current_phase; From 9a3a45a125dd80c7be0d7964b7fc0b22e517ee61 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 10:24:51 -0800 Subject: [PATCH 118/443] Updated the code to keep the deque-based heap a fixed length and to close file handles when they are evicted from the heap. Also added a call to close the file once it has been used for the last time. --- .../lbann/data_readers/sample_list_jag.hpp | 51 +++++++++++++++---- .../data_readers/sample_list_jag_impl.hpp | 12 +++-- src/data_readers/data_reader_jag_conduit.cpp | 1 + 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 4de9d90e78c..0e3ea836243 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -21,7 +21,7 @@ #include #include "conduit/conduit_relay_io_hdf5.hpp" -#define LBANN_MAX_OPEN_DATA_FILES 384 +#define LBANN_MAX_OPEN_DATA_FILES 16 namespace lbann { @@ -197,16 +197,6 @@ class sample_list_jag { } } - if(m_open_fd_pq.size() > 100/*LBANN_MAX_OPEN_DATA_FILES*/) { - std::cout << "The file descriptors are over the limit, lets close " << m_open_fd_pq.top().first << std::endl; - while(!m_open_fd_pq.empty()) { - auto e = m_open_fd_pq.top(); - std::cout << "{" << e.first << ", " << e.second << "}" << std::endl; - // std::cout << q.top() << " "; - m_open_fd_pq.pop(); - } - std::cout << '\n'; - } auto result = m_open_fd_map.emplace(filename, h); m_open_fd_pq.emplace(std::make_pair(filename,access_count)); @@ -254,6 +244,31 @@ class sample_list_jag { //LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); m_open_fd_pq.pop_front(); } + + if(m_open_fd_pq.size() > LBANN_MAX_OPEN_DATA_FILES) { + // std::cout << "PQ is too big the queue looks like "; + // for(auto&& p: m_open_fd_pq) { + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // } + // std::cout << std::endl; + // std::cout << "The file descriptors are over the limit, lets close " << m_open_fd_pq.front().first << std::endl; + // { + auto& f = m_open_fd_pq.front(); + auto& victim = m_sample_id_map[f.first]; + // std::cout << "{" << f.second.first << ", " << f.second.second << "}" << std::endl; + // // std::cout << q.top() << " "; + // } + m_open_fd_pq.pop_front(); + conduit::relay::io::hdf5_close_file(std::get<1>(victim)); + std::get<1>(victim) = 0; + // std::cout << '\n'; + // std::cout << "Now the queue looks like "; + // for(auto&& p: m_open_fd_pq) { + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // } + // std::cout << std::endl; + } + std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); } @@ -378,6 +393,20 @@ class sample_list_jag { return h; } + void close_if_done_samples_hdf5_handle(const size_t i) { + const sample_t& s = m_sample_list[i]; + sample_id_t id = s.first; + hid_t h = get_samples_hdf5_handle(id); + if (h > static_cast(0)) { + auto& e = m_sample_id_map[id]; + auto& file_access_queue = std::get<2>(e); + if(file_access_queue.empty()) { + conduit::relay::io::hdf5_close_file(std::get<1>(e)); + std::get<1>(e) = 0; + } + } + } + void all_gather_archive(const std::string &archive, std::vector& gathered_archive, lbann_comm& comm); template size_t all_gather_field(T data, std::vector& gathered_data, lbann_comm& comm); void all_gather_packed_lists(lbann_comm& comm); diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index c19440660bd..dd2b605dc72 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -79,10 +79,14 @@ inline sample_list_jag::sample_list_jag() inline sample_list_jag::~sample_list_jag() { // Close the existing open files - // for(auto f : m_open_fd_map) { - // conduit::relay::io::hdf5_close_file(f.second); - // } - // m_open_fd_map.clear(); + for(auto f : m_sample_id_map) { + if(std::get<1>(f) > 0) { + conduit::relay::io::hdf5_close_file(std::get<1>(f)); + } + std::get<1>(f) = 0; + } + m_sample_id_map.clear(); + m_open_fd_pq.clear(); } inline void sample_list_jag::set_num_partitions(size_t n) { diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index 228ba35480b..562179bfd01 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -1410,6 +1410,7 @@ bool data_reader_jag_conduit::fetch_datum(CPUMat& X, int data_id, int mb_idx) { m_jag_store->set_conduit_node(data_id, node); } + m_sample_list.close_if_done_samples_hdf5_handle(data_id); // close_conduit_node(data_id); return ok; } From 038aee3be31e80f20ed506cddc14157d9445d7eb Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 10:50:47 -0800 Subject: [PATCH 119/443] Cleaned up dead code. --- .../data_readers/data_reader_jag_conduit.hpp | 1 - .../lbann/data_readers/sample_list_jag.hpp | 51 +------------------ src/data_readers/data_reader_jag_conduit.cpp | 12 ----- 3 files changed, 1 insertion(+), 63 deletions(-) diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index 38df939c10a..ea6219435f5 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -356,7 +356,6 @@ class data_reader_jag_conduit : public generic_data_reader { bool load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const; /// Check if a key exist for sample i bool has_conduit_path(const size_t i, const std::string& key) const; - // void close_conduit_node(const size_t i); /// Obtain image data std::vector< std::vector > get_image_data(const size_t i, conduit::Node& sample) const; diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 0e3ea836243..6ca8fdc3960 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -21,7 +21,7 @@ #include #include "conduit/conduit_relay_io_hdf5.hpp" -#define LBANN_MAX_OPEN_DATA_FILES 16 +#define LBANN_MAX_OPEN_DATA_FILES 768 namespace lbann { @@ -169,55 +169,6 @@ class sample_list_jag { } void set_samples_hdf5_handle(sample_id_t id, hid_t h) { -#if 0 - int bucket_count = m_open_fd_map.bucket_count(); - int bucket = m_open_fd_map.bucket(filename); - if(!allow_collisions && m_open_fd_map.bucket_size(bucket) > 0) { - if(m_open_fd_map.bucket_size(bucket) != 1) { - LBANN_ERROR(std::string{} + " :: unexpected number of open file descriptors for bucket " - + std::to_string(bucket)); - } - auto local_it = m_open_fd_map.begin(bucket); - if(local_it == m_open_fd_map.end(bucket)) { - LBANN_ERROR(std::string{} + " :: bucket '" + std::to_string(bucket) - + "' has an empty iterator"); - } - const std::string& old_filename = local_it->first; - hid_t old_h = local_it->second; - if (old_h <= static_cast(0)) { - LBANN_ERROR(std::string{} + " :: data file '" + old_filename - + "' has a corrupt file descriptor = " + std::to_string(old_h)); - } - - conduit::relay::io::hdf5_close_file(old_h); - int num_erased = m_open_fd_map.erase(old_filename); - if(num_erased != 1) { - LBANN_ERROR(std::string{} + " :: erasing file descriptor for '" + old_filename - + "' that had a file descriptor = " + std::to_string(old_h)); - } - } - - - auto result = m_open_fd_map.emplace(filename, h); - m_open_fd_pq.emplace(std::make_pair(filename,access_count)); - - int bucket2 = m_open_fd_map.bucket(filename); - int bucket_count2 = m_open_fd_map.bucket_count(); - if(!result.second) { - LBANN_WARNING(std::string{} + " :: The key for " + filename + " already existed"); - } - if(bucket2 != bucket) { - LBANN_ERROR(std::string{} + " :: the buckets don't match original bucket " - + std::to_string(bucket) + " with a count of " + std::to_string(bucket_count) + " and new bucket " + std::to_string(bucket2) + " and a new count of " + std::to_string(bucket_count2)); - } - if(m_open_fd_map.bucket_size(bucket) != 1) { - LBANN_WARNING(std::string{} + " :: there should be one entry with an open file descriptors for bucket " - + std::to_string(bucket) + " not " - + std::to_string(m_open_fd_map.bucket_size(bucket)) + " entries"); - } -#endif - - // for (auto&& e : m_sample_id_map) { auto&& e = m_sample_id_map[id]; std::get<1>(e) = h; // std::cout << "Attempt to set the hdf5 handle " << h << " for filename " << std::get<0>(e) << std::endl; diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index 562179bfd01..ebca9c4333b 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -328,17 +328,6 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin return true; } -// void data_reader_jag_conduit::close_conduit_node(const size_t i) { -// const sample_t& s = m_sample_list[i]; - -// sample_id_t id = s.first; -// hid_t h = m_sample_list.get_samples_hdf5_handle(id); -// if (h > static_cast(0)) { -// conduit::relay::io::hdf5_close_file(h); -// m_sample_list.set_samples_hdf5_handle(id, 0); -// } -// } - bool data_reader_jag_conduit::has_conduit_path(const size_t i, const std::string& key) const { const sample_t& s = m_sample_list[i]; sample_id_t id = s.first; @@ -1411,7 +1400,6 @@ bool data_reader_jag_conduit::fetch_datum(CPUMat& X, int data_id, int mb_idx) { } m_sample_list.close_if_done_samples_hdf5_handle(data_id); - // close_conduit_node(data_id); return ok; } From c267037f37360cb7bef1217fa303a97aa04a77a5 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 27 Feb 2019 11:01:26 -0800 Subject: [PATCH 120/443] Removing deprecated "phase" logic from model class. --- .../lbann/callbacks/callback_save_images.hpp | 1 - include/lbann/models/model.hpp | 13 +--------- src/callbacks/callback_save_images.cpp | 7 ----- src/models/model.cpp | 26 +++++-------------- 4 files changed, 7 insertions(+), 40 deletions(-) diff --git a/include/lbann/callbacks/callback_save_images.hpp b/include/lbann/callbacks/callback_save_images.hpp index 443f39ad49a..4f935f0ea5f 100644 --- a/include/lbann/callbacks/callback_save_images.hpp +++ b/include/lbann/callbacks/callback_save_images.hpp @@ -55,7 +55,6 @@ class lbann_callback_save_images : public lbann_callback { return new lbann_callback_save_images(*this); } void on_epoch_end(model *m) override; - void on_phase_end(model *m) override; void on_test_end(model *m) override; std::string name() const override { return "save images"; } diff --git a/include/lbann/models/model.hpp b/include/lbann/models/model.hpp index 009579c5dbc..b00d873e86f 100644 --- a/include/lbann/models/model.hpp +++ b/include/lbann/models/model.hpp @@ -90,7 +90,7 @@ class model { /** Human-readable description. */ virtual description get_description() const; - /** Return the model's objective function. */ + /** Mathematical function to be minimized during training. */ objective_function* get_objective_function() const { return m_objective_function; } @@ -170,11 +170,6 @@ class model { } int get_num_iterations_per_epoch(execution_mode mode) const; - /** Get the current phase (multiple epochs) in layer-wise model training. */ - inline int get_current_phase() const { - return m_current_phase; - } - /** Return true if the flag to stop training is set. */ bool get_terminate_training() const { return m_terminate_training; @@ -451,10 +446,6 @@ class model { */ std::map m_step; - /** Most recent/current training step for the model. */ - int m_current_step; - int m_current_validation_step; - int m_current_testing_step; /** @details Maximum possible minibatch size supported by layers in * this model. Note that this is local to the particular model, * not across multiple models. @@ -468,8 +459,6 @@ class model { * e.g. correctly averaging gradients from multiple models. */ int m_effective_mini_batch_size; - /** current phase (multiple of epoch counts) in training a model */ - int m_current_phase; /** Communicator for the model. */ lbann_comm *m_comm; /** Current callbacks to process. */ diff --git a/src/callbacks/callback_save_images.cpp b/src/callbacks/callback_save_images.cpp index 3eede6fe3d0..7696755fbc2 100644 --- a/src/callbacks/callback_save_images.cpp +++ b/src/callbacks/callback_save_images.cpp @@ -145,13 +145,6 @@ void lbann_callback_save_images::on_epoch_end(model *m) { m_layer_names); } -void lbann_callback_save_images::on_phase_end(model *m) { - save_image(m_image_prefix + "phase" + std::to_string(m->get_current_phase()), - m_image_format, - m->get_layers(), - m_layer_names); -} - void lbann_callback_save_images::on_test_end(model *m) { save_image(m_image_prefix + "test", m_image_format, diff --git a/src/models/model.cpp b/src/models/model.cpp index 198fa150401..aecc66c9cdc 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -60,7 +60,6 @@ model::model(lbann_comm *comm, m_max_mini_batch_size(mini_batch_size), m_current_mini_batch_size(mini_batch_size), m_effective_mini_batch_size(mini_batch_size), - m_current_phase(0), m_comm(comm), m_default_optimizer(default_optimizer), m_io_thread_pool(), @@ -81,7 +80,6 @@ model::model(const model& other) : m_max_mini_batch_size(other.m_max_mini_batch_size), m_current_mini_batch_size(other.m_current_mini_batch_size), m_effective_mini_batch_size(other.m_effective_mini_batch_size), - m_current_phase(other.m_current_phase), m_comm(other.m_comm), m_background_io_allowed(other.m_background_io_allowed) { @@ -135,7 +133,6 @@ model& model::operator=(const model& other) { m_max_mini_batch_size = other.m_max_mini_batch_size; m_current_mini_batch_size = other.m_current_mini_batch_size; m_effective_mini_batch_size = other.m_effective_mini_batch_size; - m_current_phase = other.m_current_phase; m_comm = other.m_comm; m_background_io_allowed = other.m_background_io_allowed; @@ -1052,16 +1049,12 @@ bool model::evaluate_mini_batch(execution_mode mode) { m->evaluate(mode, get_current_mini_batch_size()); } const bool finished = update_layers(); - switch(m_execution_mode) { - case execution_mode::validation: - ++m_current_validation_step; - break; - case execution_mode::testing: - ++m_current_testing_step; - break; - default: - throw lbann_exception("Illegal execution mode in evaluate mini-batch function"); - } + + // Increment mini-batch step + /// @todo Move after the callbacks + if (m_step.count(mode) < 1) { m_step[mode] = 0; } + ++m_step[mode]; + do_batch_end_cbs(mode); return finished; } @@ -1476,7 +1469,6 @@ struct lbann_model_header { uint64_t testing_step; uint32_t max_mini_batch_size; uint32_t current_mini_batch_size; - uint32_t current_phase; uint32_t callback_type;; }; @@ -1491,7 +1483,6 @@ bool model::save_to_checkpoint_shared(persist& p) { p.write_uint64(persist_type::train, "testing_step", (uint64_t) get_step(execution_mode::testing)); p.write_uint32(persist_type::train, "max_mini_batch_size", (uint32_t) m_max_mini_batch_size); p.write_uint32(persist_type::train, "current_mini_batch_size", (uint32_t) m_current_mini_batch_size); - p.write_uint32(persist_type::train, "current_phase", (uint32_t) m_current_phase); p.write_uint32(persist_type::train, "persist_callback_type", (uint32_t) p.get_cb_type()); if(p.get_cb_type() == callback_type::batch) p.write_uint64(persist_type::validate, "validation_step", (uint64_t) get_step(execution_mode::validation)); @@ -1549,7 +1540,6 @@ bool model::load_from_checkpoint_shared(persist& p) { p.read_uint64(persist_type::train, "testing_step", &header.testing_step); p.read_uint32(persist_type::train, "max_mini_batch_size", &header.max_mini_batch_size); p.read_uint32(persist_type::train, "current_mini_batch_size", &header.current_mini_batch_size); - p.read_uint32(persist_type::train, "current_phase", &header.current_phase); p.read_uint32(persist_type::train, "persist_callback_type", &header.callback_type); } else { p.read_uint64(persist_type::validate, "validation_step", &header.validation_step); @@ -1570,7 +1560,6 @@ bool model::load_from_checkpoint_shared(persist& p) { m_step[execution_mode::testing] = (int) header.testing_step; m_max_mini_batch_size = (int) header.max_mini_batch_size; m_current_mini_batch_size = (int) header.current_mini_batch_size; - m_current_phase = header.current_phase; // set state of persist object to know which type of ckpt we are returning from. p.set_cb_type((callback_type) header.callback_type); } else { @@ -1608,7 +1597,6 @@ bool model::save_to_checkpoint_distributed(persist& p){ p.write_uint64(persist_type::train, "testing_step", (uint64_t) get_step(execution_mode::testing)); p.write_uint32(persist_type::train, "max_mini_batch_size", (uint32_t) m_max_mini_batch_size); p.write_uint32(persist_type::train, "current_mini_batch_size", (uint32_t) m_current_mini_batch_size); - p.write_uint32(persist_type::train, "current_phase", (uint32_t) m_current_phase); p.write_uint32(persist_type::train, "persist_callback_type", (uint32_t) p.get_cb_type()); if(p.get_cb_type() == callback_type::batch) p.write_uint64(persist_type::validate, "validataion_step", (uint64_t) get_step(execution_mode::validation)); @@ -1657,7 +1645,6 @@ bool model::load_from_checkpoint_distributed(persist& p){ p.read_uint64(persist_type::train, "testing_step", &header.testing_step); p.read_uint32(persist_type::train, "max_mini_batch_size", &header.max_mini_batch_size); p.read_uint32(persist_type::train, "current_mini_batch_size", &header.current_mini_batch_size); - p.read_uint32(persist_type::train, "current_phase", &header.current_phase); p.read_uint32(persist_type::train, "persist_callback_type", &header.callback_type); m_execution_mode = (execution_mode) header.execution_mode; @@ -1669,7 +1656,6 @@ bool model::load_from_checkpoint_distributed(persist& p){ m_step[execution_mode::testing] = (int) header.testing_step; m_max_mini_batch_size = (int) header.max_mini_batch_size; m_current_mini_batch_size = (int) header.current_mini_batch_size; - m_current_phase = header.current_phase; p.set_cb_type((callback_type) header.callback_type); load_rng_from_checkpoint_shared(p, m_comm); From 8fc8e49ce0008829d3bb3abc3dd62f1d3e225f3f Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 27 Feb 2019 12:58:38 -0800 Subject: [PATCH 121/443] Copying default optimizer in copy constructor/operator. A copy of a model can now call its destructor successfully. Reorganizing member variable order in model class. --- include/lbann/models/model.hpp | 75 +++++++++++++++++----------------- model_zoo/lbann.cpp | 4 -- src/models/model.cpp | 56 +++++++++++++------------ 3 files changed, 69 insertions(+), 66 deletions(-) diff --git a/include/lbann/models/model.hpp b/include/lbann/models/model.hpp index b00d873e86f..6c512793658 100644 --- a/include/lbann/models/model.hpp +++ b/include/lbann/models/model.hpp @@ -49,7 +49,7 @@ namespace lbann { // Forward declarations class lbann_callback; -/** Base class for neural network models. */ +/** @brief Abstract base class for neural network models. */ class model { public: @@ -57,9 +57,9 @@ class model { // Life cycle functions // =========================================== - model(lbann_comm *comm, + model(lbann_comm* comm, El::Int mini_batch_size, - objective_function *obj_fn, + objective_function* obj_fn, optimizer* default_optimizer = nullptr); model(const model& other); model& operator=(const model& other); @@ -421,8 +421,8 @@ class model { private: - /** Mathematical function to be minimized during training. */ - objective_function* m_objective_function; + /** LBANN communicator. */ + lbann_comm* m_comm; /** @brief Model instance's name. * @detailed Each model in a trainer should have a unique, @@ -433,64 +433,65 @@ class model { /** Current execution mode. */ execution_mode m_execution_mode = execution_mode::training; - /** @brief Whether to terminate training. - * @detailed If set to true, training will terminate immediately - * before the next epoch. - */ - bool m_terminate_training = false; - - /** Current epoch. */ + /** Number of times the training data set has been traversed. */ El::Int m_epoch = 0; - /** @brief Current mini-batch step for each execution mode. + + /** @brief Number of mini-batch steps performed. * @detailed Step counts are not reset after each epoch. */ std::map m_step; + /** @brief Whether to terminate training. + * @detailed If true, training will terminate immediately before + * the next epoch. + */ + bool m_terminate_training = false; + + /** Size of the current mini-batch in the model. */ + int m_current_mini_batch_size; /** @details Maximum possible minibatch size supported by layers in * this model. Note that this is local to the particular model, * not across multiple models. */ int m_max_mini_batch_size; - /** Size of the current mini-batch in the model. */ - int m_current_mini_batch_size; /** The "effective" size of a minibatch. * * This is the size of the minibatch across all models and used for * e.g. correctly averaging gradients from multiple models. */ int m_effective_mini_batch_size; - /** Communicator for the model. */ - lbann_comm *m_comm; - /** Current callbacks to process. */ - std::vector m_callbacks; - /** Default optimizer. - * - * If a layer needs to construct an optimizer during setup, it will - * make a copy of the default optimizer. + /** @brief Tensor operations. + * @details The list is in execution order for forward propagation. */ - optimizer *m_default_optimizer; + std::vector> m_layers; - /** List of model metrics. - * - * A metric can be used to evaluate the performance of the model - * without affecting the training process. + /** @brief Trainable parameters. */ + std::vector m_weights; + + /** @detailed If a layer needs to construct an optimizer during + * setup, it will make a copy of the default optimizer. This object + * is just used to create copies and is not actually used for + * optimization. */ - std::vector m_metrics; + optimizer* m_default_optimizer = nullptr; - /** List of weights in model. */ - std::vector m_weights; + /** Mathematical function to be minimized during training. */ + objective_function* m_objective_function; + + /** @brief Numerical quantities to evaluate model performance. + * @detailed Does not affect training. + */ + std::vector m_metrics; + + /** Current callbacks to process. */ + std::vector m_callbacks; /** Threads available for I/O */ std::shared_ptr m_io_thread_pool; /** Flag that allows input layers to fetch data in the background */ - bool m_background_io_allowed; - - /** @brief List of layers in model. - * @details The list is in execution order for forward propagation. - */ - std::vector> m_layers; + bool m_background_io_allowed = true; // =========================================== // Functions to add utility layers diff --git a/model_zoo/lbann.cpp b/model_zoo/lbann.cpp index d46a510f459..5a4d0741bc4 100644 --- a/model_zoo/lbann.cpp +++ b/model_zoo/lbann.cpp @@ -85,10 +85,6 @@ int main(int argc, char *argv[]) { model *model = build_model_from_prototext(argc, argv, pb, comm, io_thread_pool, true); - /// @todo Remove - auto* model_copy = model->copy(); - delete model_copy; - if (! (opts->has_bool("exit_after_setup") && opts->get_bool("exit_after_setup"))) { // Train model diff --git a/src/models/model.cpp b/src/models/model.cpp index aecc66c9cdc..fc86b862fb0 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -52,18 +52,16 @@ namespace lbann { // Life cycle functions // ============================================= -model::model(lbann_comm *comm, +model::model(lbann_comm* comm, El::Int mini_batch_size, - objective_function *obj_fn, + objective_function* obj_fn, optimizer* default_optimizer) - : m_objective_function(obj_fn), - m_max_mini_batch_size(mini_batch_size), + : m_comm(comm), m_current_mini_batch_size(mini_batch_size), + m_max_mini_batch_size(mini_batch_size), m_effective_mini_batch_size(mini_batch_size), - m_comm(comm), m_default_optimizer(default_optimizer), - m_io_thread_pool(), - m_background_io_allowed(true) { + m_objective_function(obj_fn) { // Default model name static El::Int num_models = 0; @@ -73,30 +71,32 @@ model::model(lbann_comm *comm, } model::model(const model& other) : + m_comm(other.m_comm), + m_name(other.m_name), m_execution_mode(other.m_execution_mode), - m_terminate_training(other.m_terminate_training), m_epoch(other.m_epoch), m_step(other.m_step), - m_max_mini_batch_size(other.m_max_mini_batch_size), + m_terminate_training(other.m_terminate_training), m_current_mini_batch_size(other.m_current_mini_batch_size), + m_max_mini_batch_size(other.m_max_mini_batch_size), m_effective_mini_batch_size(other.m_effective_mini_batch_size), - m_comm(other.m_comm), m_background_io_allowed(other.m_background_io_allowed) { // Deep copies - m_objective_function = other.m_objective_function; - m_metrics = other.m_metrics; - m_callbacks = other.m_callbacks; - m_weights = other.m_weights; - if (m_objective_function != nullptr) { - m_objective_function = m_objective_function->copy(); - } + m_default_optimizer = (other.m_default_optimizer ? + other.m_default_optimizer->copy() : nullptr); + m_objective_function = (other.m_objective_function ? + other.m_objective_function->copy() : nullptr); + m_metrics = other.m_metrics; + m_callbacks = other.m_callbacks; for (auto& m : m_metrics) { m = m->copy(); } for (auto& cb : m_callbacks) { cb = cb->copy(); } + + // Copy layers std::unordered_map layer_map; m_layers.reserve(other.m_layers.size()); for (const auto& ptr : other.m_layers) { @@ -107,12 +107,17 @@ model::model(const model& other) : m_layers.emplace_back(new_layer); layer_map[old_layer] = new_layer; } + + // Copy weights + m_weights = other.m_weights; std::unordered_map weights_map; for (auto& w : m_weights) { auto&& w_copy = w->copy(); weights_map[w] = w_copy; w = w_copy; } + + // Fix pointers remap_pointers(layer_map, weights_map); } @@ -126,14 +131,15 @@ model& model::operator=(const model& other) { for (const auto& w : m_weights) { delete w; } // Shallow copies + m_comm = other.m_comm; + m_name = other.m_name; m_execution_mode = other.m_execution_mode; - m_terminate_training = other.m_terminate_training; m_epoch = other.m_epoch; m_step = other.m_step; - m_max_mini_batch_size = other.m_max_mini_batch_size; + m_terminate_training = other.m_terminate_training; m_current_mini_batch_size = other.m_current_mini_batch_size; + m_max_mini_batch_size = other.m_max_mini_batch_size; m_effective_mini_batch_size = other.m_effective_mini_batch_size; - m_comm = other.m_comm; m_background_io_allowed = other.m_background_io_allowed; // Deep copies @@ -171,11 +177,11 @@ model& model::operator=(const model& other) { } model::~model() { - if (m_objective_function) { delete m_objective_function; } - if (m_default_optimizer != nullptr) { delete m_default_optimizer; } - for (const auto& w : m_weights) { delete w; } - for (const auto& m : m_metrics) { delete m; } - for (const auto& cb : m_callbacks) { delete cb; } + if (m_objective_function != nullptr) { delete m_objective_function; } + if (m_default_optimizer != nullptr) { delete m_default_optimizer; } + for (const auto& w : m_weights) { delete w; } + for (const auto& m : m_metrics) { delete m; } + for (const auto& cb : m_callbacks) { delete cb; } } // ============================================= From 7b386b1762754debc56a31d0db73db67bf76aed3 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 13:32:54 -0800 Subject: [PATCH 122/443] Renaming data structures and types used in the sample list to track the statistics and metadata of files used by the samples. --- .../data_readers/data_reader_jag_conduit.hpp | 2 +- .../lbann/data_readers/sample_list_jag.hpp | 57 ++++++++++--------- .../data_readers/sample_list_jag_impl.hpp | 50 ++++++++-------- src/data_readers/data_reader_jag_conduit.cpp | 4 +- 4 files changed, 57 insertions(+), 56 deletions(-) diff --git a/include/lbann/data_readers/data_reader_jag_conduit.hpp b/include/lbann/data_readers/data_reader_jag_conduit.hpp index ea6219435f5..32e949740c1 100644 --- a/include/lbann/data_readers/data_reader_jag_conduit.hpp +++ b/include/lbann/data_readers/data_reader_jag_conduit.hpp @@ -59,7 +59,7 @@ class data_reader_jag_conduit : public generic_data_reader { using sample_locator_t = std::pair; using sample_map_t = std::vector; ///< valid sample map type using sample_t = sample_list_jag::sample_t; - using sample_id_t = sample_list_jag::sample_id_t; + using sample_file_id_t = sample_list_jag::sample_file_id_t; /// linear transform on X defined as: first * X + second => X' using linear_transform_t = std::pair; diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 6ca8fdc3960..879cfd70435 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -75,19 +75,20 @@ class sample_list_jag { /// The type of the native identifier of a sample rather than an arbitrarily assigned index using sample_name_t = std::string; /// The type for arbitrarily assigned index - using sample_id_t = std::size_t; + using sample_file_id_t = std::size_t; /// To describe a sample as a pair of the file to which it belongs and its name // using sample_t = std::pair; - using sample_t = std::pair; - /// Map of the file index to the file name, file descriptor, and + using sample_t = std::pair; + /// Statistics for each file used by the sample list: includes the file name, file descriptor, and /// and a queue of each step and substep when data will be loaded from the file - using sample_id_map_t = std::tuple>>; // rename - // to sample_to_file_map + using file_id_stats_t = std::tuple>>; + /// Type for the list of samples using samples_t = std::vector< sample_t >; - using samples_id_map_v_t = std::vector< sample_id_map_t >; // rename to sample_to_file_v or something + /// Mapping of the file index to the statistics for each file + using file_id_stats_v_t = std::vector< file_id_stats_t >; // rename to sample_to_file_v or something /// Type for the map of file descriptors to usage step and substep - using fd_use_map_t = std::pair>; + using fd_use_map_t = std::pair>; sample_list_jag(); ~sample_list_jag(); @@ -151,25 +152,25 @@ class sample_list_jag { /// Allow read-only access to the metadata of the idx-th sample in the list const sample_t& operator[](size_t idx) const; - const std::string& get_samples_filename(sample_id_t id) const { - return std::get<0>(m_sample_id_map[id]); + const std::string& get_samples_filename(sample_file_id_t id) const { + return std::get<0>(m_file_id_stats_map[id]); } const std::string& get_samples_dirname() const { return m_header.get_file_dir(); } - hid_t get_samples_hdf5_handle(sample_id_t id) const { - hid_t h = std::get<1>(m_sample_id_map[id]); + hid_t get_samples_hdf5_handle(sample_file_id_t id) const { + hid_t h = std::get<1>(m_file_id_stats_map[id]); return h; } - void set_samples_filename(sample_id_t id, const std::string& filename) { - std::get<0>(m_sample_id_map[id]) = filename; + void set_samples_filename(sample_file_id_t id, const std::string& filename) { + std::get<0>(m_file_id_stats_map[id]) = filename; } - void set_samples_hdf5_handle(sample_id_t id, hid_t h) { - auto&& e = m_sample_id_map[id]; + void set_samples_hdf5_handle(sample_file_id_t id, hid_t h) { + auto&& e = m_file_id_stats_map[id]; std::get<1>(e) = h; // std::cout << "Attempt to set the hdf5 handle " << h << " for filename " << std::get<0>(e) << std::endl; @@ -205,7 +206,7 @@ class sample_list_jag { // std::cout << "The file descriptors are over the limit, lets close " << m_open_fd_pq.front().first << std::endl; // { auto& f = m_open_fd_pq.front(); - auto& victim = m_sample_id_map[f.first]; + auto& victim = m_file_id_stats_map[f.first]; // std::cout << "{" << f.second.first << ", " << f.second.second << "}" << std::endl; // // std::cout << q.top() << " "; // } @@ -243,11 +244,11 @@ class sample_list_jag { // std::cout << std::endl; } - // std::get<1>(m_sample_id_map[id]) = h; + // std::get<1>(m_file_id_stats_map[id]) = h; // std::cout << "I am setting the hdf5 handle " << h << " for filename " << filename << std::endl; // m_open_fd_map.emplace(std::make_tuple(filename, h, access_count)); - // for (auto&& e : m_sample_id_map) { + // for (auto&& e : m_file_id_stats_map) { // std::cout << "set_files_hdf5_handle {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; // if(std::get<2>(e).empty()) { // std::cout << "empty" << std::endl; @@ -258,14 +259,14 @@ class sample_list_jag { // std::cout << std::endl; // } // } - // for (auto&& e : m_sample_id_map) + // for (auto&& e : m_file_id_stats_map) // std::cout << "{" << std::get<0)>(e) << ", " << std::get<1>(e) << ", " << std::get<2>(e) << "}" << std::endl; } void set_files_hdf5_handle(const std::string& filename, hid_t h) { - sample_id_t id = 0; - for (auto&& e : m_sample_id_map) { + sample_file_id_t id = 0; + for (auto&& e : m_file_id_stats_map) { if(std::get<0>(e) == filename) { break; } @@ -276,7 +277,7 @@ class sample_list_jag { hid_t open_samples_hdf5_handle(const size_t i) { const sample_t& s = m_sample_list[i]; - sample_id_t id = s.first; + sample_file_id_t id = s.first; hid_t h = get_samples_hdf5_handle(id); if (h <= static_cast(0)) { const std::string& file_name = get_samples_filename(id); @@ -307,7 +308,7 @@ class sample_list_jag { // LBANN_WARNING("We have weirdness here, the head of the queue is not " + std::to_string(id)); // } - auto& e = m_sample_id_map[id]; + auto& e = m_file_id_stats_map[id]; // std::cout << "open_files_hdf5_handle updated list {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; // for (auto&& v : std::get<2>(e)) { @@ -346,10 +347,10 @@ class sample_list_jag { void close_if_done_samples_hdf5_handle(const size_t i) { const sample_t& s = m_sample_list[i]; - sample_id_t id = s.first; + sample_file_id_t id = s.first; hid_t h = get_samples_hdf5_handle(id); if (h > static_cast(0)) { - auto& e = m_sample_id_map[id]; + auto& e = m_file_id_stats_map[id]; auto& file_access_queue = std::get<2>(e); if(file_access_queue.empty()) { conduit::relay::io::hdf5_close_file(std::get<1>(e)); @@ -403,11 +404,11 @@ class sample_list_jag { /// header info of sample list sample_list_header m_header; - /// Contains list of all sample + /// List of all samples with a file identifier and sample name for each sample samples_t m_sample_list; - /// Maps sample IDs to file names, file descriptors, and use counts - samples_id_map_v_t m_sample_id_map; + /// Maps sample's file id to file names, file descriptors, and use counts + file_id_stats_v_t m_file_id_stats_map; /// Maps a global index to a local index sample_list_indexer m_indexer; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index dd2b605dc72..b3aa2acda2d 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -79,13 +79,13 @@ inline sample_list_jag::sample_list_jag() inline sample_list_jag::~sample_list_jag() { // Close the existing open files - for(auto f : m_sample_id_map) { + for(auto f : m_file_id_stats_map) { if(std::get<1>(f) > 0) { conduit::relay::io::hdf5_close_file(std::get<1>(f)); } std::get<1>(f) = 0; } - m_sample_id_map.clear(); + m_file_id_stats_map.clear(); m_open_fd_pq.clear(); } @@ -292,8 +292,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str m_file_map[filename] = sample_names.size(); } - sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); + sample_file_id_t index = m_file_id_stats_map.size(); + m_file_id_stats_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); set_files_hdf5_handle(filename, hdf5_file_hnd); size_t valid_sample_count = 0u; @@ -380,8 +380,8 @@ inline void sample_list_jag::read_exclusive_list(std::istream& istrm, size_t str std::unordered_set set_of_samples(sample_names.begin(), sample_names.end()); - sample_id_t index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); + sample_file_id_t index = m_file_id_stats_map.size(); + m_file_id_stats_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); set_files_hdf5_handle(filename, hdf5_file_hnd); size_t valid_sample_count = 0u; @@ -522,11 +522,11 @@ inline size_t sample_list_jag::all_gather_field(T data, std::vector& gathered inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { int num_ranks = comm.get_procs_per_trainer(); std::vector per_rank_samples(num_ranks); - std::vector per_rank_sample_id_map(num_ranks); + std::vector per_rank_file_id_stats_map(num_ranks); std::vector> per_rank_file_map(num_ranks); // Close the existing open files - for(auto&& e : m_sample_id_map) { + for(auto&& e : m_file_id_stats_map) { conduit::relay::io::hdf5_close_file(std::get<1>(e)); std::get<1>(e) = 0; std::get<2>(e).clear(); @@ -534,34 +534,34 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { m_open_fd_pq.clear(); size_t num_samples = all_gather_field(m_sample_list, per_rank_samples, comm); - size_t num_ids = all_gather_field(m_sample_id_map, per_rank_sample_id_map, comm); + size_t num_ids = all_gather_field(m_file_id_stats_map, per_rank_file_id_stats_map, comm); size_t num_files = all_gather_field(m_file_map, per_rank_file_map, comm); m_sample_list.clear(); - m_sample_id_map.clear(); + m_file_id_stats_map.clear(); m_sample_list.reserve(num_samples); - m_sample_id_map.reserve(num_ids); + m_file_id_stats_map.reserve(num_ids); m_file_map.reserve(num_files); for(int r = 0; r < num_ranks; r++) { const samples_t& sample_list = per_rank_samples[r]; - const samples_id_map_v_t& sample_id_map = per_rank_sample_id_map[r]; + const file_id_stats_v_t& file_id_stats_map = per_rank_file_id_stats_map[r]; const std::unordered_map& file_map = per_rank_file_map[r]; for (const auto& s : sample_list) { - sample_id_t index = s.first; - const std::string& filename = std::get<0>(sample_id_map[index]); - if(index >= m_sample_id_map.size() - || (std::get<0>(m_sample_id_map.back()) != filename)) { - index = m_sample_id_map.size(); - m_sample_id_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); + sample_file_id_t index = s.first; + const std::string& filename = std::get<0>(file_id_stats_map[index]); + if(index >= m_file_id_stats_map.size() + || (std::get<0>(m_file_id_stats_map.back()) != filename)) { + index = m_file_id_stats_map.size(); + m_file_id_stats_map.emplace_back(std::make_tuple(filename, 0, std::deque>{})); // Update the file map structure if(m_file_map.count(filename) == 0) { m_file_map[filename] = file_map.at(filename); } }else { - for(size_t i = 0; i < m_sample_id_map.size(); i++) { - if(filename == std::get<0>(m_sample_id_map[i])) { + for(size_t i = 0; i < m_file_id_stats_map.size(); i++) { + if(filename == std::get<0>(m_file_id_stats_map[i])) { index = i; break; } @@ -575,7 +575,7 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { } inline void sample_list_jag::compute_epochs_file_usage(const std::vector& shuffled_indices, int mini_batch_size, const lbann_comm& comm) { - for (auto&& e : m_sample_id_map) { + for (auto&& e : m_file_id_stats_map) { std::get<1>(e) = 0; std::get<2>(e).clear(); } @@ -583,13 +583,13 @@ inline void sample_list_jag::compute_epochs_file_usage(const std::vector& s for (size_t i = 0; i < shuffled_indices.size(); i++) { int idx = shuffled_indices[i]; const auto& s = m_sample_list[idx]; - sample_id_t index = s.first; + sample_file_id_t index = s.first; if((i % mini_batch_size) % comm.get_procs_per_trainer() == static_cast(comm.get_rank_in_trainer())) { /// Enqueue the iteration step when the sample will get used int step = i / mini_batch_size; int substep = (i % mini_batch_size) / comm.get_procs_per_trainer(); - std::get<2>(m_sample_id_map[index]).emplace_back(std::make_pair(step, substep)); + std::get<2>(m_file_id_stats_map[index]).emplace_back(std::make_pair(step, substep)); } } } @@ -600,7 +600,7 @@ inline void sample_list_jag::clear() { } template void sample_list_jag::serialize( Archive & ar ) { - ar(m_num_partitions, m_header, m_sample_list, m_sample_id_map); + ar(m_num_partitions, m_header, m_sample_list, m_file_id_stats_map); } inline void sample_list_jag::write_header(std::string& sstr, size_t num_files) const { @@ -634,7 +634,7 @@ inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { std::map> tmp_file_map; for (const auto& s : m_sample_list) { - std::string filename = std::get<0>(m_sample_id_map[s.first]); + std::string filename = std::get<0>(m_file_id_stats_map[s.first]); tmp_file_map[filename].emplace_back(s.second); } diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index ebca9c4333b..e9381cb847c 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -314,7 +314,7 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin const std::string& sample_name = s.second; const std::string path = sample_name + key; - sample_id_t id = s.first; + sample_file_id_t id = s.first; hid_t h = m_sample_list.get_samples_hdf5_handle(id); if (h <= static_cast(0) || !conduit::relay::io::hdf5_has_path(h, path)) { const std::string& file_name = m_sample_list.get_samples_filename(id); @@ -330,7 +330,7 @@ bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::strin bool data_reader_jag_conduit::has_conduit_path(const size_t i, const std::string& key) const { const sample_t& s = m_sample_list[i]; - sample_id_t id = s.first; + sample_file_id_t id = s.first; const std::string& file_name = m_sample_list.get_samples_filename(id); const std::string& sample_name = s.second; const hid_t h = m_sample_list.get_samples_hdf5_handle(id); From eb4806f4bf01b0a26e2807de789b3ee616e3f90e Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 13:42:30 -0800 Subject: [PATCH 123/443] Removed the deprecated indexer structure from the sample list. --- .../lbann/data_readers/sample_list_jag.hpp | 29 ---------- .../data_readers/sample_list_jag_impl.hpp | 56 +------------------ 2 files changed, 1 insertion(+), 84 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 879cfd70435..8712b079401 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -47,26 +47,6 @@ struct sample_list_header { } }; -/** - * Maps a global index of a sample list to a local index. - * When managing the sample list in a distributed fashion, with which every - * one has the same copy (the whole global list), m_partition_offset must be - * zero. In this case, the local index is the same as the global index. - * When managing the sample list in a centralized fashion, with which each - * has a portion of the list that corresponds to the only samples it needs, - * a global index is subtracted by m_partition_offset for local indexing. - */ -struct sample_list_indexer { - sample_list_indexer(); - size_t operator()(size_t idx) const; - - void set_partition_offset(size_t o); - size_t get_partition_offset() const; - bool check_index(size_t i) const; - - size_t m_partition_offset; -}; - static const std::string conduit_hdf5_exclusion_list = "CONDUIT_HDF5_EXCLUSION"; static const std::string conduit_hdf5_inclusion_list = "CONDUIT_HDF5_INCLUSION"; @@ -96,12 +76,6 @@ class sample_list_jag { /// Set the number of partitions and clear internal states void set_num_partitions(size_t n); - /// Set the index mapping function - void set_indexer(const sample_list_indexer& indexer); - - /// Get the index mapping function - const sample_list_indexer& get_indexer() const; - /// Load a sample list file void load(const std::string& samplelist_file, size_t stride=1, size_t offset=0); @@ -410,9 +384,6 @@ class sample_list_jag { /// Maps sample's file id to file names, file descriptors, and use counts file_id_stats_v_t m_file_id_stats_map; - /// Maps a global index to a local index - sample_list_indexer m_indexer; - /// Track the number of samples per file std::unordered_map m_file_map; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index b3aa2acda2d..3167aeafcdf 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -46,34 +46,6 @@ inline const std::string& sample_list_header::get_file_dir() const { return m_file_dir; } - -inline sample_list_indexer::sample_list_indexer() -: m_partition_offset(0u) { -} - -inline bool sample_list_indexer::check_index(size_t i) const { - return (i >= m_partition_offset); -} - -inline size_t sample_list_indexer::operator()(size_t i) const { - if (!check_index(i)) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: index (" + std::to_string(i) - + ") is less than the partition offset (" - + std::to_string(m_partition_offset) + ")"); - } - return i - m_partition_offset; -} - -inline void sample_list_indexer::set_partition_offset(size_t o) { - m_partition_offset = o; -} - -inline size_t sample_list_indexer::get_partition_offset() const { - return m_partition_offset; -} - - inline sample_list_jag::sample_list_jag() : m_num_partitions(1u) {} @@ -99,50 +71,30 @@ inline void sample_list_jag::set_num_partitions(size_t n) { m_num_partitions = n; } -inline void sample_list_jag::set_indexer(const sample_list_indexer& indexer) { - m_indexer = indexer; -} - -inline const sample_list_indexer& sample_list_jag::get_indexer() const { - return m_indexer; -} - - inline void sample_list_jag::load(const std::string& samplelist_file, size_t stride, size_t offset) { std::ifstream istr(samplelist_file); get_samples_per_file(istr, samplelist_file, stride, offset); istr.close(); } - inline sample_list_header sample_list_jag::load_header(const std::string& samplelist_file) const { std::ifstream istr(samplelist_file); return read_header(istr, samplelist_file); } - inline void sample_list_jag::load_from_string(const std::string& samplelist) { std::istringstream istr(samplelist); get_samples_per_file(istr, "", 1, 0); } - inline size_t sample_list_jag::size() const { return m_sample_list.size(); } - inline bool sample_list_jag::empty() const { return m_sample_list.empty(); } - -inline bool sample_list_jag::check_index(size_t idx) const { - return m_indexer.check_index(idx) && - (m_indexer(idx) < m_sample_list.size()); -} - - inline std::string sample_list_jag::read_header_line(std::istream& istrm, const std::string& filename, const std::string& info) const { if (!istrm.good()) { throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) @@ -773,13 +725,7 @@ inline const sample_list_header& sample_list_jag::get_header() const { } inline const sample_list_jag::sample_t& sample_list_jag::operator[](size_t idx) const { - size_t i = m_indexer(idx); - if (i >= m_sample_list.size()) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: index (" + std::to_string(i) + ") out of range [0 " - + std::to_string(m_sample_list.size()) + ")"); - } - return m_sample_list[i]; + return m_sample_list[idx]; } } // end of namespace lbann From a8aa8e177f94b10be75341af68ea2aaa931ddcee Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Wed, 27 Feb 2019 14:08:21 -0800 Subject: [PATCH 124/443] Add the --loss-y{min,max} arguments --- scripts/proto/lbann/plot/plot.py | 11 ++++++++++- scripts/proto/scripts/plot/lbplot | 15 ++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index 3f4808fda8e..69c0b6d4721 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -26,8 +26,13 @@ def _get_time_axis(time_list, units='hours'): return time_axis def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', - plot_accuracy=True, merge_train_val=False, pretty_ylim=True, save_fig=None, save_csv=None): + plot_accuracy=True, merge_train_val=False, pretty_ylim=True, save_fig=None, save_csv=None, ylim=None): """Tabulate and plot stats from LBANN or PyTorch training in common format.""" + + if pretty_ylim and ylim is not None: + print('ERROR: pretty_ylim and ylim must not be set at the same time.') + sys.exit(1) + ### Load stat dicts and print stat summary stat_dict_list = [] # Get run names @@ -174,6 +179,8 @@ def parse_num(d, key): plt.ylabel('Train Loss') if pretty_ylim: plt.ylim(0, PRETTY_YLIM_LOSS) + elif ylim is not None: + plt.ylim(*ylim) p, = plt.plot(stat_dict['train_axis'], stat_dict['train_loss'], label=run_name_train) @@ -184,6 +191,8 @@ def parse_num(d, key): plt.ylabel('Val Loss') if pretty_ylim: plt.ylim(0, PRETTY_YLIM_LOSS) + elif ylim is not None: + plt.ylim(*ylim) kwargs = {} if not merge_train_val else {"color": p.get_color(), "linestyle": "dashed"} plt.plot(stat_dict['val_axis'], stat_dict['val_loss'], label=run_name_val, **kwargs) diff --git a/scripts/proto/scripts/plot/lbplot b/scripts/proto/scripts/plot/lbplot index db2bc1429d4..a5401df9535 100755 --- a/scripts/proto/scripts/plot/lbplot +++ b/scripts/proto/scripts/plot/lbplot @@ -27,15 +27,28 @@ def main(): help="Save the training/validation curve plot as an image file.") parser.add_argument('--save-csv', type=str, default='', help="Save the sumamry table as a CSV file.") + parser.add_argument('--loss-ymin', type=float, # default=None, # float("-inf"), + help="The minimum y-axis limit of the loss plot.") + parser.add_argument('--loss-ymax', type=float, # default=None, # float("inf"), + help="The maximum y-axis limit of the loss plot.") args = parser.parse_args() + ylim = None + if args.loss_ymin is not None: + if args.loss_ymax is None: + print("ERROR: --loss-ylim and --loss-ymax should be set at the same time.") + exit(1) + + ylim = [args.loss_ymin, args.loss_ymax] + # Tabulate and plot stats from user input files plot(args.stat_path, args.stat_name, ind_var=args.ind_var, time_units=args.time_units, plot_accuracy=(not args.no_accuracy), merge_train_val=args.merge_train_val, pretty_ylim=args.pretty_ylim, save_fig=args.save_fig if args.save_fig != '' else None, - save_csv=args.save_csv if args.save_csv != '' else None) + save_csv=args.save_csv if args.save_csv != '' else None, + ylim=[args.loss_ymin, args.loss_ymax]) if __name__=='__main__': From 1a6bbdceeffdfd895b67aedd3d5f5fe3a470af65 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 27 Feb 2019 14:29:35 -0800 Subject: [PATCH 125/443] add any wrapper --- cmake/configure_files/lbann_config.hpp.in | 3 + cmake/modules/SetupCXX.cmake | 13 + include/lbann/utils/CMakeLists.txt | 1 + include/lbann/utils/any.hpp | 350 ++++++++++++++++++++++ include/lbann/utils/memory.hpp | 11 +- 5 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 include/lbann/utils/any.hpp diff --git a/cmake/configure_files/lbann_config.hpp.in b/cmake/configure_files/lbann_config.hpp.in index bdf17666422..4a712dca4a2 100644 --- a/cmake/configure_files/lbann_config.hpp.in +++ b/cmake/configure_files/lbann_config.hpp.in @@ -42,6 +42,9 @@ #cmakedefine LBANN_SYS_SENDFILE_OK +#cmakedefine LBANN_HAS_STD_ANY +#cmakedefine LBANN_HAS_STD_MAKE_UNIQUE + // Define the LBANN datatype namespace lbann { diff --git a/cmake/modules/SetupCXX.cmake b/cmake/modules/SetupCXX.cmake index eeac1e6336e..9960b8b2fad 100644 --- a/cmake/modules/SetupCXX.cmake +++ b/cmake/modules/SetupCXX.cmake @@ -150,3 +150,16 @@ endif () # Check if we can use Linux's sys/sendfile.h check_include_file_cxx(sys/sendfile.h LBANN_SYS_SENDFILE_OK) + +# Testing for std::any +include(CheckCXXSourceCompiles) +set(_ANY_TEST_CODE + "#include +int main(int, char* argv[]) { std::any x; }") +check_cxx_source_compiles("${_ANY_TEST_CODE}" LBANN_HAS_STD_ANY) + +set(_MAKE_UNIQUE_TEST_CODE + "#include +int main(int, char* argv[]) { auto x = std::make_unique(); }") +check_cxx_source_compiles( + "${_MAKE_UNIQUE_TEST_CODE}" LBANN_HAS_STD_MAKE_UNIQUE) diff --git a/include/lbann/utils/CMakeLists.txt b/include/lbann/utils/CMakeLists.txt index 2b3d75a8b37..e13b4103948 100644 --- a/include/lbann/utils/CMakeLists.txt +++ b/include/lbann/utils/CMakeLists.txt @@ -1,5 +1,6 @@ # Add the headers for this directory set_full_path(THIS_DIR_HEADERS + any.hpp compiler_control.hpp cublas.hpp cuda.hpp diff --git a/include/lbann/utils/any.hpp b/include/lbann/utils/any.hpp new file mode 100644 index 00000000000..00fa8d6e60c --- /dev/null +++ b/include/lbann/utils/any.hpp @@ -0,0 +1,350 @@ +#ifndef LBANN_UTILS_ANY_HPP_INCLUDED +#define LBANN_UTILS_ANY_HPP_INCLUDED + +#include + +#ifdef LBANN_HAVE_STD_ANY + +#include + +#else +#include // non-C++14 make_unique + +#include +#include +#include +#include +#endif // LBANN_HAVE_STD_ANY + +namespace lbann +{ +namespace utils +{ + +#ifdef LBANN_HAVE_STD_ANY +// This case is simple symbol injection; don't feel great about this, +// but it's not my fault they couldn't get this into C++11... + +using any = std::any; +using bad_any_cast = std::bad_any_cast; +using std::any_cast; +using std::make_any; + +#else + +/** @class any + * @brief Type-erasure class to store any object of copy-constructible type. + * + * This class is (mostly) API-compatible with std::any. The most + * notable omission is the std::in_place_type_t overloads of the + * constructor (std::in_place_type_t is also C++17, and I don't want + * to implement the whole standard). For best results, do not attempt + * to use those in this code. For even better results (yes, better + * than best. English is overrated), incessently remind your friends, + * colleagues, and, most importantly, vendors that it's 2019, that + * 2019 > 2017, and that there are excellent free compilers in the + * world until they concede to updating to a modern compiler and this + * implementation can be banished to the depths. + */ +class any +{ +public: + + /** @brief Default construct an empty "any" */ + any() noexcept = default; + + /** @brief Construct an object holding a T */ + template + any(T&& obj); + + /** @brief Copy construct from another container. + * + * Makes a copy of the held object. + */ + any(any const& other); + + /** @brief Move construct from another container */ + any(any&& other) noexcept = default; + + /** @brief Default destructor */ + ~any() = default; + + /** @brief Copy assign from another container + * + * Makes a deep copy of the held object. + */ + any& operator=(any const& other); + + /** @brief Move assign from another container */ + any& operator=(any&& other) noexcept = default; + + /** @name Modifiers */ + ///@{ + + /** @brief Change the contained object to one of type T + * + * Any held object is destroyed and the new object is + * emplace-constructed from the arguments given. + * + * @tparam T The type of the new held object + * @tparam Args (Deduced) types of arguments to the T constructor + * + * @param args The arguments to the T constructor + * + * @return A reference to the newly constructed object + */ + template + auto emplace(Args&&... args) -> typename std::decay::type&; + + /** @brief Reset the container to an empty state, destroying the + * held object. + */ + void reset() noexcept; + + /** @brief Swap the contents of this container with another */ + void swap(any& other) noexcept; + + ///@} + /** @name Observers */ + ///@{ + + /** @brief Test whether the container holds a value */ + bool has_value() const noexcept; + + /** @brief Get the type_info object for the held type */ + std::type_info const& type() const noexcept; + + ///@} + +private: + + /** @class holder_base + * @brief Abstract base class for storing the object + */ + struct holder_base + { + /** @brief Destructor */ + virtual ~holder_base() = default; + + /** @brief Clone function */ + virtual std::unique_ptr clone() const = 0; + + /** @brief Get the type_info for the underlying object */ + virtual std::type_info const& type() const = 0; + }; // class holder_base + + /** @class holder + * @brief Class to hold a copy-constructible object of type T + */ + template + struct holder : holder_base + { + /** @brief Construct by copying data */ + holder(T const& data) : m_data{data} {} + + /** @brief Construct by moving data */ + holder(T&& data) : m_data{std::move(data)} {} + + /** @brief Construct by emplace-constructing the T with the given + * arguments. + */ + template + holder(Args&&... args) : m_data{std::forward(args)...} + {} + + /** @brief Destructor */ + ~holder() = default; + + /** @brief Clone the data holder */ + std::unique_ptr clone() const final + { + return make_unique(m_data); + } + + /** @brief Get the type_info for this object */ + std::type_info const& type() const { return typeid(T); } + + /** @brief The data object */ + T m_data; + };// class holder + +private: + + template + friend T const* any_cast(any const*) noexcept; + + template + friend T* any_cast(any*) noexcept; + + std::unique_ptr m_holder = nullptr; + +};// class any + +/** @class bad_any_cast + * @brief Exception class indicating an any_cast has failed. + */ +struct bad_any_cast : std::runtime_error +{ + template + bad_any_cast(T&& what_arg) + : std::runtime_error{std::forward(what_arg)} {} +};// struct bad_any_cast + +/** @brief Swap two any objects */ +inline void swap(any& lhs, any& rhs) +{ + lhs.swap(rhs); +} + +/** @brief Create an any object of type T constructed with args */ +template +any make_any(Ts&&... args) +{ + return any{T(std::forward(args)...)}; +} + +/** @brief Typesafe access to the held object. + * + * @tparam T The type of the held object. + * + * @param obj The any object. + * + * @return If obj is not null and holds a T, a pointer to + * the held object. Otherwise, nullptr. + */ +template +T* any_cast(any* obj) noexcept +{ + return const_cast( + any_cast( + static_cast(obj))); +} + +/** @brief Typesafe access to the held object, const version. + * + * @tparam T The type of the held object. + * + * @param obj The any object. + * + * @return If obj is not null and holds a T, a pointer to + * the held object. Otherwise, nullptr. + */ +template +T const* any_cast(any const* obj) noexcept +{ + static_assert(!std::is_reference::value, + "T must nust be a reference type."); + + if (!obj || !obj->has_value()) + return nullptr; + + if (obj->type() != typeid(T)) + { + return nullptr; + } + + auto T_holder = dynamic_cast const*>(obj->m_holder.get()); + return (T_holder ? &(T_holder->m_data) : nullptr); +} + +/** @brief Typesafe access to the held object. + * + * @tparam T The type of the held object. + * + * @param obj The any object. + * + * @return static_cast(*std::any_cast(&operand)). + * + * @throws bad_any_cast If obj is not nullptr but does not hold a T. + */ +template +T any_cast(any& obj) +{ + using type = + typename std::remove_cv< + typename std::remove_reference::type>::type; + auto* ret = any_cast(&obj); + if (not ret) + throw bad_any_cast("bad any_cast"); + return *ret; +} + +template +T any_cast(any&& obj) +{ + using type = + typename std::remove_cv< + typename std::remove_reference::type>::type; + auto ret = any_cast(&obj); + if (not ret) + throw bad_any_cast("bad any_cast"); + return std::move(*ret); +} + +template +T any_cast(any const& obj) +{ + using type = + typename std::remove_cv::type>::type; + auto ret = any_cast(&obj); + if (not ret) + throw bad_any_cast("bad any_cast"); + return *ret; +} + +// "any" member function implementation + +template +any::any(T&& obj) + : m_holder{make_unique::type>>( + std::forward(obj))} +{} + +inline any::any(any const& other) + : m_holder{other.has_value() ? other.m_holder->clone() : nullptr} {} + +inline any& any::operator=(any const& other) +{ + m_holder = (other.has_value() ? other.m_holder->clone() : nullptr); + return *this; +} + +template +auto any::emplace(Args&&... args) + -> typename std::decay::type& +{ + using held_type = typename std::decay::type; + + reset(); + auto tmp_holder = make_unique>( + std::forward(args)...); + auto& ret = tmp_holder->m_data; + m_holder = std::move(tmp_holder); + return ret; +} + +inline void any::reset() noexcept +{ + m_holder.reset(); +} + +inline void any::swap(any& other) noexcept +{ + std::swap(m_holder,other.m_holder); +} + +inline bool any::has_value() const noexcept +{ + return (bool) m_holder; +} + +inline std::type_info const& any::type() const noexcept +{ + return m_holder ? m_holder->type() : typeid(void); +} + +#endif /* End fallback implementation */ + +}// namespace utils +}// namespace lbann +#endif // LBANN_UTILS_ANY_HPP_INCLUDED diff --git a/include/lbann/utils/memory.hpp b/include/lbann/utils/memory.hpp index 12003d1e19b..2545256de8d 100644 --- a/include/lbann/utils/memory.hpp +++ b/include/lbann/utils/memory.hpp @@ -1,11 +1,16 @@ #ifndef LBANN_MEMORY_HPP_ #define LBANN_MEMORY_HPP_ +#include #include namespace lbann { -#if __cplusplus < 201402L +#ifdef LBANN_HAS_STD_MAKE_UNIQUE + +using std::make_unique; + +#else /** \brief Local definition of make_unique for non-C++14 compilers */ template @@ -14,10 +19,6 @@ std::unique_ptr make_unique(Ts&&... params) return std::unique_ptr(new T(std::forward(params)...)); } -#else - -using std::make_unique; - #endif }// namespace lbann From 1f527d922108cb540ec7390586ea1fa8683b82a6 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Wed, 27 Feb 2019 14:57:23 -0800 Subject: [PATCH 126/443] Removed the partitioned field from the sample list class and supporting functions. This feature has been deprecated. --- .../lbann/data_readers/sample_list_jag.hpp | 26 +--- .../data_readers/sample_list_jag_impl.hpp | 131 ++---------------- 2 files changed, 10 insertions(+), 147 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index 8712b079401..a2330ca0dea 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -73,9 +73,6 @@ class sample_list_jag { sample_list_jag(); ~sample_list_jag(); - /// Set the number of partitions and clear internal states - void set_num_partitions(size_t n); - /// Load a sample list file void load(const std::string& samplelist_file, size_t stride=1, size_t offset=0); @@ -99,27 +96,15 @@ class sample_list_jag { /// Check if a sample index is in the valid range bool check_index(size_t idx) const; - /// Serialize sample list for a partition - bool to_string(size_t p, std::string& sstr) const; - - /// Serialize sample list for all partitions + /// Serialize sample list bool to_string(std::string& sstr) const; - /// Write the sample list of partition p - void write(size_t p, const std::string filename) const; - - /// Write the sample list of each partitions + /// Write the sample list void write(const std::string filename) const; /// Allow read-only access to the internal list data const samples_t& get_list() const; - /// Copy the internal list data for partition p - bool get_list(size_t p, samples_t& l_p) const; - - /// Allow read-only access to the internal list data for partition p via iterators - std::pair get_list(size_t p) const; - /// Allow the read-only access to the list header const sample_list_header& get_header() const; @@ -359,9 +344,6 @@ class sample_list_jag { /// Reads a sample list and populates the internal list size_t get_samples_per_file(std::istream& istrm, const std::string& filename, size_t stride=1, size_t offset=0); - /// Compute the sample index range that partition p covers - void get_sample_range_per_part(const size_t p, size_t& sid_start, size_t& sid_end) const; - /// Add the header info to the given string void write_header(std::string& sstr, size_t num_files) const; @@ -371,10 +353,6 @@ class sample_list_jag { ((left.second).second < (right.second).second)); } private: - - /// The number of partitions to divide samples into - size_t m_num_partitions; - /// header info of sample list sample_list_header m_header; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index 3167aeafcdf..20d0859cfed 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -46,8 +46,7 @@ inline const std::string& sample_list_header::get_file_dir() const { return m_file_dir; } -inline sample_list_jag::sample_list_jag() - : m_num_partitions(1u) {} +inline sample_list_jag::sample_list_jag() {} inline sample_list_jag::~sample_list_jag() { // Close the existing open files @@ -61,16 +60,6 @@ inline sample_list_jag::~sample_list_jag() { m_open_fd_pq.clear(); } -inline void sample_list_jag::set_num_partitions(size_t n) { - if (n == 0) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: number of partitions must be a positive number (" - + std::to_string(n) + ")"); - } - clear(); - m_num_partitions = n; -} - inline void sample_list_jag::load(const std::string& samplelist_file, size_t stride, size_t offset) { std::ifstream istr(samplelist_file); get_samples_per_file(istr, samplelist_file, stride, offset); @@ -385,23 +374,6 @@ inline size_t sample_list_jag::get_samples_per_file(std::istream& istrm, const s } -inline void sample_list_jag::get_sample_range_per_part(const size_t p, size_t& sid_start, size_t& sid_end) const{ - const size_t total = static_cast(m_sample_list.size()); - const size_t one_more = total % m_num_partitions; - const size_t min_per_partition = total/m_num_partitions; - - if (min_per_partition == 0u) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: insufficient number of samples for each partition to have at least one."); - } else if (m_num_partitions == 1u) { - sid_start = 0u; - sid_end = total; - } else { - sid_start = min_per_partition * p + ((p >= one_more)? one_more : p); - sid_end = sid_start + min_per_partition + ((p < one_more)? 1u : 0u); - } -} - inline void sample_list_jag::all_gather_archive(const std::string &archive, std::vector& gathered_archive, lbann_comm& comm) { int size_of_list_archive = archive.size(); std::vector packed_sizes(comm.get_procs_per_trainer()); @@ -547,12 +519,11 @@ inline void sample_list_jag::compute_epochs_file_usage(const std::vector& s } inline void sample_list_jag::clear() { - m_num_partitions = 1u; m_sample_list.clear(); } template void sample_list_jag::serialize( Archive & ar ) { - ar(m_num_partitions, m_header, m_sample_list, m_file_id_stats_map); + ar(m_header, m_sample_list, m_file_id_stats_map); } inline void sample_list_jag::write_header(std::string& sstr, size_t num_files) const { @@ -567,23 +538,7 @@ inline void sample_list_jag::write_header(std::string& sstr, size_t num_files) c } -inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { - if ((m_num_partitions == 0u) || - ((m_num_partitions > 1u) && (p >= m_num_partitions))) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: partition id is out of range."); - return false; - } - - size_t i_begin, i_end; - get_sample_range_per_part(p, i_begin, i_end); - - if (i_begin > i_end) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) - + " :: incorrect partition range."); - return false; - } - +inline bool sample_list_jag::to_string(std::string& sstr) const { std::map> tmp_file_map; for (const auto& s : m_sample_list) { std::string filename = std::get<0>(m_file_id_stats_map[s.first]); @@ -592,14 +547,12 @@ inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { samples_t::const_iterator it_begin = m_sample_list.cbegin(); samples_t::const_iterator it_end = m_sample_list.cbegin(); - std::advance(it_begin, i_begin); - std::advance(it_end, i_end); sstr.clear(); // reserve the string to hold the entire sample lit size_t estimated_len = 30 + 42 + m_header.get_file_dir().size() + 1; - if (i_begin < i_end) { + if (it_begin < it_end) { estimated_len += tmp_file_map.size(); sstr.reserve(estimated_len); } @@ -625,37 +578,9 @@ inline bool sample_list_jag::to_string(size_t p, std::string& sstr) const { return true; } - -inline bool sample_list_jag::to_string(std::string& sstr) const { - size_t total_len = 0u; - std::vector strvec(m_num_partitions); - bool ok = true; - - for(size_t p=0u; (p < m_num_partitions) && ok; ++p) { - ok = to_string(p, strvec[p]); - total_len += strvec[p].size(); - } - - if (!ok) { - return false; - } - - sstr.clear(); - sstr.reserve(total_len); - - for(size_t p=0u; p < m_num_partitions; ++p) { - sstr += strvec[p]; - } - - return true; -} - - -inline void sample_list_jag::write(size_t p, const std::string filename) const { - std::string filename_p = modify_file_name(filename, std::string("p") + std::to_string(p)); - +inline void sample_list_jag::write(const std::string filename) const { std::string dir, basename; - parse_path(filename_p, dir, basename); + parse_path(filename, dir, basename); if (!dir.empty() && !check_if_dir_exists(dir)) { // The creation of a shared directory must be done once in a coordinated fashion // among the entities that have access to it. Thus, it must be done in advance @@ -663,63 +588,23 @@ inline void sample_list_jag::write(size_t p, const std::string filename) const { return; } - std::fstream ofs(filename_p, std::fstream::out | std::fstream::binary); + std::fstream ofs(filename, std::fstream::out | std::fstream::binary); if (!ofs.good()) { return; } std::string buf; - to_string(p, buf); + to_string(buf); ofs.write(buf.data(), buf.size()*sizeof(std::string::value_type)); ofs.close(); } - -inline void sample_list_jag::write(const std::string filename) const { - for (size_t p = 0u; p < m_num_partitions; ++p) { - write(p, filename); - } -} - - inline const sample_list_jag::samples_t& sample_list_jag::get_list() const { return m_sample_list; } - -inline std::pair -sample_list_jag::get_list(size_t p) const { - if (p >= m_num_partitions) { - return std::make_pair(m_sample_list.cend(), m_sample_list.cend()); - } - - size_t i_begin, i_end; - get_sample_range_per_part(p, i_begin, i_end); - - if (i_begin > i_end) { - return std::make_pair(m_sample_list.cend(), m_sample_list.cend()); - } - - samples_t::const_iterator it_begin = m_sample_list.cbegin(); - samples_t::const_iterator it_end = m_sample_list.cbegin(); - std::advance(it_begin, i_begin); - std::advance(it_end, i_end); - - return std::make_pair(it_begin, it_end); -} - - -inline bool sample_list_jag::get_list(size_t p, sample_list_jag::samples_t& l_p) const { - const auto it = get_list(p); - l_p.clear(); - std::copy(it.first, it.second, l_p.begin()); - - return (it.first != m_sample_list.cend()); -} - - inline const sample_list_header& sample_list_jag::get_header() const { return m_header; } From 20a54ac69df2cdcb3ea1706db8c1dc7a0572c303 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 27 Feb 2019 16:48:14 -0800 Subject: [PATCH 127/443] add type erased matrix class; work in progress; uncompiled, untested --- include/lbann/utils/type_erased_matrix.hpp | 57 ++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 include/lbann/utils/type_erased_matrix.hpp diff --git a/include/lbann/utils/type_erased_matrix.hpp b/include/lbann/utils/type_erased_matrix.hpp new file mode 100644 index 00000000000..d9c3df45d25 --- /dev/null +++ b/include/lbann/utils/type_erased_matrix.hpp @@ -0,0 +1,57 @@ +#ifndef LBANN_UTILS_TYPE_ERASED_MATRIX_HPP_INCLUDED +#define LBANN_UTILS_TYPE_ERASED_MATRIX_HPP_INCLUDED + +#include +#include + +#include + +namespace lbann +{ +namespace utils +{ + +class type_erased_matrix +{ +public: + + template + type_erased_matrix(El::Matrix const& in_matrix) + : m_matrix{in_matrix} + {} + + template + type_erased_matrix(El::Matrix&& in_matrix) + : m_matrix{std::move(in_matrix)} + {} + + template + El::Matrix& get() + { + return const_cast&>( + static_cast(*this) + .template get()); + } + + template + El::Matrix const& get() + { + return any_cast const&>(m_matrix); + } + +private: + any m_matrix; +};// class type_erased_matrix + +// Helper function for what will probably be the usual construction +// process. +template +std::unique_ptr +create_type_erased_matrx() +{ + return make_unique(El::Matrix{}); +} + +}// namespace utils +}// namespace lbann +#endif // LBANN_UTILS_TYPE_ERASED_MATRIX_HPP_INCLUDED From 829d495308e76a390b0d8a6d74f02a09b326fedd Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 09:28:20 -0800 Subject: [PATCH 128/443] add type conversion function to type_erased_matrix --- include/lbann/utils/type_erased_matrix.hpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/lbann/utils/type_erased_matrix.hpp b/include/lbann/utils/type_erased_matrix.hpp index d9c3df45d25..9f0d23e0fd5 100644 --- a/include/lbann/utils/type_erased_matrix.hpp +++ b/include/lbann/utils/type_erased_matrix.hpp @@ -39,6 +39,15 @@ class type_erased_matrix return any_cast const&>(m_matrix); } + template + El::Matrix& convert() + { + any new_mat{El::Matrix{ + any_cast const&>(m_matrix)}}; + m_matrix.swap(new_mat); + return this->template get(); + } + private: any m_matrix; };// class type_erased_matrix From 1590f99f717db0606c3e56ca2c8c116f45e6aa90 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 28 Feb 2019 10:04:37 -0800 Subject: [PATCH 129/443] Skip log files if accuracy/loss information is not available --- scripts/proto/lbann/plot/plot.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index 69c0b6d4721..2df0155d862 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -86,18 +86,26 @@ def parse_num(d, key): num_nodes = int(num_procs / num_procs_on_node) else : num_nodes = None - print('WARNING: No process counts are provided.') + print('WARNING: No process counts are provided from {}'.format(stat_path)) # Total epochs of training total_epochs = len(d['val_time']) # Compute accuracy stats if plot_accuracy: + if len(d['train_acc']) == 0: + print('WARNING: No accuracy information is provided from {}'.format(stat_path)) + continue + peak_train_acc = max(d['train_acc']) peak_train_epoch = d['train_acc'].index(peak_train_acc) peak_val_acc = max(d['val_acc']) peak_val_epoch = d['val_acc'].index(peak_val_acc) + if len(d['train_loss']) == 0: + print('WARNING: No loss information is provided from {}'.format(stat_path)) + continue + # Compute loss stats min_train_loss = min(d['train_loss']) min_train_epoch = d['train_loss'].index(min_train_loss) From 30862289d3ff5d5e2d1d09a20b6e15f6a53f2d31 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 11:32:24 -0800 Subject: [PATCH 130/443] add unit test framework and setup in utils directory --- CMakeLists.txt | 26 ++++- include/lbann/utils/CMakeLists.txt | 1 + src/utils/unit_test/CMakeLists.txt | 9 ++ src/utils/unit_test/any_test.cpp | 114 ++++++++++++++++++++ src/utils/unit_test/factory_test.cpp | 155 +++++++++++++++++++++++++++ unit_test/CMakeLists.txt | 9 ++ unit_test/SequentialCatchMain.cpp | 2 + 7 files changed, 315 insertions(+), 1 deletion(-) create mode 100644 src/utils/unit_test/CMakeLists.txt create mode 100644 src/utils/unit_test/any_test.cpp create mode 100644 src/utils/unit_test/factory_test.cpp create mode 100644 unit_test/CMakeLists.txt create mode 100644 unit_test/SequentialCatchMain.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index fbe752f6da6..2cafa495f93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -112,10 +112,12 @@ option(LBANN_WITH_NVPROF option(LBANN_WITH_TBINF "Include Tensorboard interface" ON) - option(LBANN_WITH_VTUNE "Link the Intel VTune profiling library" OFF) +option(LBANN_WITH_UNIT_TESTING + "Enable the unit testing framework (requires Catch2)" OFF) + # Enable parallel random matrix generation, if possible option(LBANN_DETERMINISTIC "Use deterministic algorithms as much as possible." OFF) @@ -405,6 +407,28 @@ if (LBANN_WITH_CONDUIT) set(LBANN_HAS_CONDUIT ${CONDUIT_FOUND}) endif (LBANN_WITH_CONDUIT) +if (LBANN_WITH_UNIT_TESTING) + find_package(Catch2 2.0.0 CONFIG QUIET + HINTS ${CATCH2_DIR} $ENV{CATCH2_DIR} ${CATCH_DIR} $ENV{CATCH_DIR} + PATH_SUFFIXES lib64/cmake/Catch2 lib/cmake/Catch2 + NO_DEFAULT_PATH) + if (NOT Catch2_FOUND) + find_package(Catch2 2.0.0 CONFIG QUIET REQUIRED) + endif () + message(STATUS "Found Catch2: ${Catch2_DIR}") + message(STATUS "CMAKE_MODULE_PATH=${CMAKE_MODULE_PATH}") + + # Now that Catch2 has been found, start adding the unit tests + include(CTest) + include(Catch) + add_subdirectory(src/utils/unit_test) + + message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") + + # Add this one last + add_subdirectory(unit_test) +endif (LBANN_WITH_UNIT_TESTING) + # Handle the documentation add_subdirectory(docs) diff --git a/include/lbann/utils/CMakeLists.txt b/include/lbann/utils/CMakeLists.txt index e13b4103948..a07932b662f 100644 --- a/include/lbann/utils/CMakeLists.txt +++ b/include/lbann/utils/CMakeLists.txt @@ -26,6 +26,7 @@ set_full_path(THIS_DIR_HEADERS statistics.hpp summary.hpp timer.hpp + type_erased_matrix.hpp ) # Add the subdirectories diff --git a/src/utils/unit_test/CMakeLists.txt b/src/utils/unit_test/CMakeLists.txt new file mode 100644 index 00000000000..d42e7fd8334 --- /dev/null +++ b/src/utils/unit_test/CMakeLists.txt @@ -0,0 +1,9 @@ +set_full_path(_DIR_LBANN_CATCH2_TEST_FILES + any_test.cpp + factory_test.cpp +# type_erased_matrix_test.cpp + ) + +set(LBANN_CATCH2_TEST_FILES + "${LBANN_CATCH2_TEST_FILES}" "${_DIR_LBANN_CATCH2_TEST_FILES}" PARENT_SCOPE) +message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") diff --git a/src/utils/unit_test/any_test.cpp b/src/utils/unit_test/any_test.cpp new file mode 100644 index 00000000000..2b7178727ce --- /dev/null +++ b/src/utils/unit_test/any_test.cpp @@ -0,0 +1,114 @@ +// MUST include this +#include + +// File being tested +#include + +#include +#include +#include + +namespace +{ +struct base { virtual ~base() = default; }; +struct derived : base {}; +}// namespace + +TEST_CASE ("Testing the type-erased \"any\" class", "[any][utilities]") +{ + SECTION ("Default-constructing an \"any\" object") + { + lbann::utils::any empty_any; + lbann::utils::any* null_any_ptr = nullptr; + REQUIRE_FALSE(empty_any.has_value()); + REQUIRE(lbann::utils::any_cast(&empty_any) == nullptr); + REQUIRE(lbann::utils::any_cast(null_any_ptr) == nullptr); + } + + SECTION ("Storing a double in an \"any\" object") + { + lbann::utils::any eight_as_double(8.0); + REQUIRE(eight_as_double.has_value()); + REQUIRE_NOTHROW(lbann::utils::any_cast(eight_as_double)); + REQUIRE(lbann::utils::any_cast(eight_as_double) == 8.0); + REQUIRE_THROWS_AS(lbann::utils::any_cast(eight_as_double), + lbann::utils::bad_any_cast); + + REQUIRE(eight_as_double.type() == typeid(double)); + REQUIRE_FALSE(eight_as_double.type() == typeid(int)); + + eight_as_double.reset(); + REQUIRE(eight_as_double.type() == typeid(void)); + REQUIRE_FALSE(eight_as_double.has_value()); + } + + SECTION ("Storing a vector of ints in an \"any\" object") + { + lbann::utils::any int_vec_as_any(std::vector(10)); + + REQUIRE(int_vec_as_any.has_value()); + REQUIRE_NOTHROW(lbann::utils::any_cast&>(int_vec_as_any)); + + auto& vec = lbann::utils::any_cast&>(int_vec_as_any); + std::iota(vec.begin(),vec.end(),0); + REQUIRE(lbann::utils::any_cast&>(int_vec_as_any)[5] == 5); + + REQUIRE_THROWS_AS(lbann::utils::any_cast>(int_vec_as_any), + lbann::utils::bad_any_cast); + + REQUIRE(int_vec_as_any.type() == typeid(std::vector)); + REQUIRE_FALSE(int_vec_as_any.type() == typeid(int[])); + + int_vec_as_any.reset(); + REQUIRE_FALSE(int_vec_as_any.has_value()); + } + + SECTION ("Storing a derived type as pointer-to-base in \"any\" object") + { + lbann::utils::any derived_as_base_any(std::shared_ptr{new derived}); + + REQUIRE(derived_as_base_any.has_value()); + REQUIRE_NOTHROW( + lbann::utils::any_cast&>(derived_as_base_any)); + + REQUIRE_THROWS_AS( + lbann::utils::any_cast&>(derived_as_base_any), + lbann::utils::bad_any_cast); + + derived_as_base_any.reset(); + REQUIRE_FALSE(derived_as_base_any.has_value()); + } + + SECTION ("Storing a derived type in \"any\" object") + { + lbann::utils::any derived_as_any(std::make_shared()); + + REQUIRE(derived_as_any.has_value()); + REQUIRE_NOTHROW( + lbann::utils::any_cast&>(derived_as_any)); + + REQUIRE_THROWS_AS( + lbann::utils::any_cast&>(derived_as_any), + lbann::utils::bad_any_cast); + + derived_as_any.reset(); + REQUIRE_FALSE(derived_as_any.has_value()); + } + + SECTION ("Storing a \"shared_ptr\" and change to \"double\"") + { + lbann::utils::any my_any(std::make_shared()); + + REQUIRE(my_any.has_value()); + REQUIRE_NOTHROW( + lbann::utils::any_cast&>(my_any)); + + // Change to double + REQUIRE(my_any.emplace(10.0) == 10.0); + REQUIRE(lbann::utils::any_cast(&my_any) != nullptr); + REQUIRE( + lbann::utils::any_cast>(&my_any) == nullptr); + my_any.reset(); + REQUIRE_FALSE(my_any.has_value()); + } +} diff --git a/src/utils/unit_test/factory_test.cpp b/src/utils/unit_test/factory_test.cpp new file mode 100644 index 00000000000..d118e72030a --- /dev/null +++ b/src/utils/unit_test/factory_test.cpp @@ -0,0 +1,155 @@ +// Be sure to include this! +#include + +// The code being tested +#include + +// Other includes +#include + +namespace +{ +struct widget_base { + virtual ~widget_base() = default; +}; +struct widget : widget_base {}; +struct gizmo : widget_base {}; +} + +enum class generic_key +{ + INVALID, + WIDGET, + GIZMO +}; + +template struct Key; + +template <> +struct Key +{ + static std::string get(generic_key key) + { + switch (key) + { + case generic_key::WIDGET: + return "widget"; + case generic_key::GIZMO: + return "gizmo"; + case generic_key::INVALID: + return "invalid"; + } + return ""; + } +}; + +template <> +struct Key +{ + static int get(generic_key key) noexcept + { + return static_cast(key); + } +}; + +// This tests factories keyed with strings and ints. BDD-style +// nomenclature is used inside the test case. +TEMPLATE_TEST_CASE( + "testing the factory class", "[factory][utilities]", std::string, int) +{ + using widget_factory + = lbann::generic_factory; + using key = Key; + + GIVEN("an object factory") + { + widget_factory factory; + + WHEN("Two new builders are registered") + { + factory.register_builder( + key::get(generic_key::WIDGET),[]() + { + return std::unique_ptr( + lbann::make_unique()); + }); + + factory.register_builder( + key::get(generic_key::GIZMO),[]() + { + return std::unique_ptr( + lbann::make_unique()); + }); + + THEN("The factory knows about two builders") + { + auto names = factory.get_registered_keys(); + REQUIRE(std::distance(names.begin(), names.end()) == 2UL); + } + AND_WHEN("A builder is added with an existing key") + { + factory.register_builder( + key::get(generic_key::GIZMO),[]() + { + return std::unique_ptr( + lbann::make_unique()); + }); + + THEN("The factory still knows about only two factories") + { + auto names = factory.get_registered_keys(); + REQUIRE(std::distance(names.begin(), names.end()) == 2UL); + } + } + + AND_WHEN("A new object is requested with a valid key") + { + auto obj = factory.create_object(key::get(generic_key::WIDGET)); + + THEN("The returned object is the right type.") + { + widget* obj_ptr = dynamic_cast(obj.get()); + REQUIRE(obj_ptr != nullptr); + } + } + + AND_WHEN("A new object is requested with with an invalid key") + { + THEN("An exception is thrown.") + { + std::unique_ptr obj; + REQUIRE_THROWS_AS( + obj = factory.create_object(key::get(generic_key::INVALID)), + lbann::exception); + } + } + + AND_WHEN("A key is removed") + { + auto success = factory.unregister(key::get(generic_key::WIDGET)); + THEN("The number of known factories has decreased.") + { + REQUIRE(success == true); + auto names = factory.get_registered_keys(); + REQUIRE(std::distance(names.begin(), names.end()) == 1UL); + } + + THEN("The remaining key is still valid.") + { + auto obj = factory.create_object(key::get(generic_key::GIZMO)); + gizmo* obj_ptr = dynamic_cast(obj.get()); + REQUIRE(obj_ptr != nullptr); + } + + THEN("An exception is thrown when trying to create an " + "object with a removed key.") + { + std::unique_ptr obj; + REQUIRE_THROWS_AS( + obj = factory.create_object(key::get(generic_key::WIDGET)), + lbann::exception); + } + } + } + } +} diff --git a/unit_test/CMakeLists.txt b/unit_test/CMakeLists.txt new file mode 100644 index 00000000000..c43b43447af --- /dev/null +++ b/unit_test/CMakeLists.txt @@ -0,0 +1,9 @@ +message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") + +# Add the sequential test main() function +add_executable(seq-catch-tests SequentialCatchMain.cpp "${LBANN_CATCH2_TEST_FILES}") +target_link_libraries(seq-catch-tests PRIVATE lbann Catch2::Catch2) + +catch_discover_tests(seq-catch-tests) + +# Add the parallel test main() function -- TODO diff --git a/unit_test/SequentialCatchMain.cpp b/unit_test/SequentialCatchMain.cpp new file mode 100644 index 00000000000..4ed06df1f7b --- /dev/null +++ b/unit_test/SequentialCatchMain.cpp @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include From 310f4b4e222728c5bb14a8cf9594589ecb7a5db0 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 28 Feb 2019 11:34:21 -0800 Subject: [PATCH 131/443] Refactoring conv/deconv layers. Less brittle approach to setup kernel dimensions. Miscellaneous stylistic changes. --- .../layers/learning/base_convolution.hpp | 225 +++++++++--------- include/lbann/layers/learning/convolution.hpp | 91 ++++--- .../lbann/layers/learning/deconvolution.hpp | 93 ++++---- 3 files changed, 195 insertions(+), 214 deletions(-) diff --git a/include/lbann/layers/learning/base_convolution.hpp b/include/lbann/layers/learning/base_convolution.hpp index 5630a3aa3d2..f445dcd778a 100644 --- a/include/lbann/layers/learning/base_convolution.hpp +++ b/include/lbann/layers/learning/base_convolution.hpp @@ -24,12 +24,11 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_LAYER_BASE_CONVOLUTION_HPP_INCLUDED -#define LBANN_LAYER_BASE_CONVOLUTION_HPP_INCLUDED +#ifndef LBANN_LAYERS_LEARNING_BASE_CONVOLUTION_HPP_INCLUDED +#define LBANN_LAYERS_LEARNING_BASE_CONVOLUTION_HPP_INCLUDED #include #include -#include "lbann/layers/learning/learning.hpp" #include "lbann/layers/layer.hpp" #include "lbann/weights/initializer.hpp" #include "lbann/weights/variance_scaling_initializers.hpp" @@ -43,15 +42,16 @@ namespace lbann { /** @brief Computation kernels for convolution and deconvolution layers. */ -template -class base_convolution_layer : public learning_layer { +template +class base_convolution_layer : public Layer { protected: - /** Convolution kernel dimensions. */ - std::vector m_kernel_dims; - /** Size of convolutional kernel. */ - int m_kernel_size; + int m_output_channels; + /** @brief Spatial dimensions for convolution kernel. + * @detailed Excludes number of input and output channels. + */ + std::vector m_conv_dims; /** Convolution padding. */ std::vector m_pads; /** Convolution strides. */ @@ -74,21 +74,21 @@ class base_convolution_layer : public learning_layer { * This is this layer's contribution to the objective function * gradient w.r.t. the convolutional kernel weights. */ - StarMat m_kernel_gradient; + StarMat m_kernel_gradient; /** Bias gradient. * This is this layer's contribution to the objective function * gradient w.r.t. the bias weights. */ - StarMat m_bias_gradient; + StarMat m_bias_gradient; #ifdef LBANN_HAS_CUDNN /** Convolution kernel cuDNN descriptor. */ - cudnnFilterDescriptor_t m_kernel_cudnn_desc; + cudnnFilterDescriptor_t m_kernel_cudnn_desc = nullptr; /** Convolution cuDNN descriptor. */ - cudnnConvolutionDescriptor_t m_convolution_cudnn_desc; + cudnnConvolutionDescriptor_t m_convolution_cudnn_desc = nullptr; /** Bias tensor cuDNN descriptor. */ - cudnnTensorDescriptor_t m_bias_cudnn_desc; + cudnnTensorDescriptor_t m_bias_cudnn_desc = nullptr; /** Tensor cuDNN descriptors. */ cudnn::data_parallel_layer_tensor_manager m_tensors_cudnn_desc; @@ -96,79 +96,65 @@ class base_convolution_layer : public learning_layer { public: - base_convolution_layer(lbann_comm *comm, - int num_data_dims, - int num_output_channels, - const std::vector conv_dims, - const std::vector pads, - const std::vector strides, - const std::vector dilations, + base_convolution_layer(lbann_comm* comm, + int num_data_dims, /// @todo Remove + int output_channels, + std::vector conv_dims, + std::vector pads, + std::vector strides, + std::vector dilations, int groups, bool has_bias) - : learning_layer(comm), - m_kernel_dims(conv_dims), - m_kernel_size(0), - m_pads(pads), - m_strides(strides), - m_dilations(dilations), - m_num_groups(groups), - m_bias_scaling_factor(has_bias ? DataType(1) : DataType(0)), - m_kernel_gradient(this->m_comm->get_trainer_grid()), - m_bias_gradient(this->m_comm->get_trainer_grid()) + : Layer(comm), + m_output_channels(std::max(output_channels, 1)), + m_conv_dims(std::move(conv_dims)), + m_pads(std::move(pads)), + m_strides(std::move(strides)), + m_dilations(std::move(dilations)), + m_num_groups(std::max(groups, 1)), + m_bias_scaling_factor(has_bias ? 1 : 0), + m_kernel_gradient(this->get_comm()->get_trainer_grid()), + m_bias_gradient(this->get_comm()->get_trainer_grid()) #ifdef LBANN_HAS_CUDNN - , m_kernel_cudnn_desc(nullptr), - m_convolution_cudnn_desc(nullptr), - m_bias_cudnn_desc(nullptr), - m_tensors_cudnn_desc(this) + , m_tensors_cudnn_desc(this) #endif // LBANN_HAS_CUDNN { - - bool nonunit_dilation = false; - for (const auto& d : m_dilations) { - if (d != 1) { - nonunit_dilation = true; - break; - } - } - if (Dev == El::Device::CPU && nonunit_dilation) { - std::stringstream err; - err << "layer \"" << get_name() << "\" " - << "has nonunit dilation which is only supported on GPUs"; + std::ostringstream err; + + // Make sure that configuration is supported + if (Device == El::Device::CPU + && std::any_of(m_dilations.begin(), m_dilations.end(), + [](El::Int d) { return d != 1; })) { + err << "layer \"" << this->get_name() << "\" " + << "has non-unit dilation, which is not yet supported on CPU"; LBANN_ERROR(err.str()); } - if (Dev == El::Device::CPU && m_num_groups > 1) { - std::stringstream err; - err << "layer \"" << get_name() << "\" " - << "has nonunit groups " << m_num_groups - << " which is only supported on GPUs"; + if (Device == El::Device::CPU && m_num_groups != 1) { + err << "layer \"" << this->get_name() << "\" " + << "has " << m_num_groups << ", " + << "but only unit groups are currently supported on CPU"; LBANN_ERROR(err.str()); } // Check dimensions of convolution parameters - if ((int) m_kernel_dims.size() != num_data_dims - || (int) m_pads.size() != num_data_dims - || (int) m_strides.size() != num_data_dims - || (int) m_dilations.size() != num_data_dims) { - std::stringstream err; - err << "layer \"" << get_name() << "\" " + if (m_pads.size() != m_conv_dims.size() + || m_strides.size() != m_conv_dims.size() + || m_dilations.size() != m_conv_dims.size()) { + err << " layer \"" << this->get_name() << "\" " << "has an invalid number of convolution parameters " - << "(expected " << num_data_dims << " parameters, " - << "conv_dims has " << m_kernel_dims.size() << ", " + << "(conv_dims has " << m_conv_dims.size() << " entries, " << "pads has " << m_pads.size() << ", " << "strides has " << m_strides.size() << ", " << "dilations has " << m_dilations.size() << ")"; LBANN_ERROR(err.str()); } - // Record number of output channels - m_kernel_dims.insert(m_kernel_dims.begin(), num_output_channels); - } base_convolution_layer(const base_convolution_layer& other) - : learning_layer(other), - m_kernel_dims(other.m_kernel_dims), - m_kernel_size(other.m_kernel_size), + : Layer(other), + m_output_channels(other.m_output_channels), + m_conv_dims(other.m_conv_dims), m_pads(other.m_pads), m_strides(other.m_strides), m_dilations(other.m_dilations), @@ -177,10 +163,7 @@ class base_convolution_layer : public learning_layer { m_kernel_gradient(other.m_kernel_gradient), m_bias_gradient(other.m_bias_gradient) #ifdef LBANN_HAS_CUDNN - , m_kernel_cudnn_desc(nullptr), - m_convolution_cudnn_desc(nullptr), - m_bias_cudnn_desc(nullptr), - m_tensors_cudnn_desc(other.m_tensors_cudnn_desc) + , m_tensors_cudnn_desc(other.m_tensors_cudnn_desc) #endif // LBANN_HAS_CUDNN { #ifdef LBANN_HAS_CUDNN @@ -195,9 +178,9 @@ class base_convolution_layer : public learning_layer { } base_convolution_layer& operator=(const base_convolution_layer& other) { - learning_layer::operator=(other); - m_kernel_dims = other.m_kernel_dims; - m_kernel_size = other.m_kernel_size; + Layer::operator=(other); + m_output_channels = other.m_output_channels; + m_conv_dims = other.m_conv_dims; m_pads = other.m_pads; m_strides = other.m_strides; m_dilations = other.m_dilations; @@ -236,14 +219,14 @@ class base_convolution_layer : public learning_layer { } description get_description() const override { - auto&& desc = learning_layer::get_description(); - std::stringstream ss; + auto&& desc = Layer::get_description(); + std::ostringstream ss; // Convolution dimensions ss.str(std::string{}); ss.clear(); - for (size_t i = 2; i < m_kernel_dims.size(); ++i) { - ss << (i > 2 ? ", " : "" ) << m_kernel_dims[i]; + for (size_t i = 0; i < m_conv_dims.size(); ++i) { + ss << (i > 0 ? ", " : "" ) << m_conv_dims[i]; } desc.add("Convolution dimensions", ss.str()); @@ -290,9 +273,15 @@ class base_convolution_layer : public learning_layer { * The kernel weights are setup in the convolution and * deconvolution classes. */ void setup_data() override { - learning_layer::setup_data(); + Layer::setup_data(); + + // Tensor dimensions const auto& input_dims = get_input_dims(); const auto& output_dims = get_output_dims(); + const auto& kernel_dims = get_kernel_dims(); + const auto& kernel_size = std::accumulate(kernel_dims.begin(), + kernel_dims.end(), + 1, std::multiplies()); // Initialize default weights if none are provided if (this->m_weights.size() > 2) { @@ -329,15 +318,15 @@ class base_convolution_layer : public learning_layer { auto* cast_initializer = dynamic_cast(kernel_weights.get_initializer()); if (cast_initializer != nullptr) { - cast_initializer->set_fan_in(m_kernel_size / output_dims[0]); - cast_initializer->set_fan_out(m_kernel_size / input_dims[0]); + cast_initializer->set_fan_in(kernel_size / output_dims[0]); + cast_initializer->set_fan_out(kernel_size / input_dims[0]); } // Initialize weight matrices auto dist = get_prev_activations().DistData(); dist.colDist = El::STAR; dist.rowDist = El::STAR; - kernel_weights.set_dims(m_kernel_dims); + kernel_weights.set_dims(kernel_dims); kernel_weights.set_matrix_distribution(dist); bias_weights.set_dims(output_dims[0]); bias_weights.set_matrix_distribution(dist); @@ -373,20 +362,21 @@ class base_convolution_layer : public learning_layer { /// Initialize GPU objects void setup_gpu() override { - learning_layer::setup_gpu(); + Layer::setup_gpu(); #ifndef LBANN_HAS_CUDNN LBANN_ERROR("cuDNN not detected"); #else const auto& output_dims = get_output_dims(); + const auto& kernel_dims = get_kernel_dims(); // Set kernel descriptor CHECK_CUDNN(cudnnCreateFilterDescriptor(&m_kernel_cudnn_desc)); CHECK_CUDNN(cudnnSetFilterNdDescriptor(m_kernel_cudnn_desc, cudnn::get_data_type(), CUDNN_TENSOR_NCHW, - m_kernel_dims.size(), - m_kernel_dims.data())); + kernel_dims.size(), + kernel_dims.data())); // Set convolution descriptor CHECK_CUDNN(cudnnCreateConvolutionDescriptor(&m_convolution_cudnn_desc)); @@ -410,6 +400,9 @@ class base_convolution_layer : public learning_layer { protected: + /** Dimensions of convolution kernel. */ + virtual std::vector get_kernel_dims() const = 0; + /** Convolution with cuDNN. */ void apply_convolution_cudnn(bool during_forward_prop) { #ifndef LBANN_HAS_CUDNN @@ -752,14 +745,18 @@ class base_convolution_layer : public learning_layer { input_dims = get_output_dims(); output_dims = get_input_dims(); } + const auto& kernel_dims = get_kernel_dims(); + const auto& kernel_size = std::accumulate(kernel_dims.begin(), + kernel_dims.end(), + 1, std::multiplies()); // Initialize matrices const int m = output_size / output_dims[0]; const int n = output_dims[0]; - const int k = m_kernel_size / output_dims[0]; - DMat input_col, output_col; - DMat im2col_matrix(k, m); - const DMat kernel_matrix(k, n, local_kernel.LockedBuffer(), k); + const int k = kernel_size / output_dims[0]; + DMat input_col, output_col; + DMat im2col_matrix(k, m); + const DMat kernel_matrix(k, n, local_kernel.LockedBuffer(), k); // Iterate through input columns for (El::Int col = 0; col < local_width; ++col) { @@ -772,7 +769,7 @@ class base_convolution_layer : public learning_layer { input_dims.size() - 1, &input_dims[1], m_pads.data(), - &m_kernel_dims[2], + &kernel_dims[2], m_strides.data()); // Apply convolution to current input column @@ -793,9 +790,9 @@ class base_convolution_layer : public learning_layer { const auto& local_input = (during_forward_prop ? get_local_prev_activations() : get_local_prev_error_signals()); - DMat& local_output = (during_forward_prop ? - get_local_activations() : - get_local_error_signals()); + DMat& local_output = (during_forward_prop ? + get_local_activations() : + get_local_error_signals()); // Matrix parameters const int input_size = local_input.Height(); @@ -809,14 +806,18 @@ class base_convolution_layer : public learning_layer { input_dims = get_output_dims(); output_dims = get_input_dims(); } + const auto& kernel_dims = get_kernel_dims(); + const auto& kernel_size = std::accumulate(kernel_dims.begin(), + kernel_dims.end(), + 1, std::multiplies()); // Initialize matrices - const int m = m_kernel_size / input_dims[0]; + const int m = kernel_size / input_dims[0]; const int n = input_size / input_dims[0]; const int k = input_dims[0]; - DMat input_col, output_col; - DMat im2col_matrix(m, n); - const DMat kernel_matrix(m, k, local_kernel.LockedBuffer(), m); + DMat input_col, output_col; + DMat im2col_matrix(m, n); + const DMat kernel_matrix(m, k, local_kernel.LockedBuffer(), m); // Iterate through input columns for (El::Int col = 0; col < local_width; ++col) { @@ -836,7 +837,7 @@ class base_convolution_layer : public learning_layer { output_dims.size() - 1, &output_dims[1], m_pads.data(), - &m_kernel_dims[2], + &kernel_dims[2], m_strides.data()); } @@ -876,8 +877,8 @@ class base_convolution_layer : public learning_layer { void compute_gradients_im2col(bool using_transposed_convolution) { // Local matrices - const DMat& local_input = get_local_prev_activations(); - const DMat& local_gradient_wrt_output = get_local_prev_error_signals(); + const DMat& local_input = get_local_prev_activations(); + const DMat& local_gradient_wrt_output = get_local_prev_error_signals(); auto& local_kernel_gradient = m_kernel_gradient.Matrix(); auto& local_bias_gradient = m_bias_gradient.Matrix(); @@ -889,6 +890,10 @@ class base_convolution_layer : public learning_layer { const int num_output_channels = output_dims[0]; const int num_per_output_channel = get_output_size() / num_output_channels; const int effective_mini_batch_size = this->m_model->get_effective_mini_batch_size(); + const auto& kernel_dims = get_kernel_dims(); + const auto& kernel_size = std::accumulate(kernel_dims.begin(), + kernel_dims.end(), + 1, std::multiplies()); // Compute bias gradient // Note: Sum is computed with Kahan summation @@ -921,23 +926,23 @@ class base_convolution_layer : public learning_layer { // Initialize matrices const int m = (using_transposed_convolution ? - m_kernel_size / num_input_channels : - m_kernel_size / num_output_channels); + kernel_size / num_input_channels : + kernel_size / num_output_channels); const int n = (using_transposed_convolution ? num_input_channels : num_output_channels); const int k = (using_transposed_convolution ? get_input_size() / num_input_channels : get_output_size() / num_output_channels); - DMat im2col_matrix(m, k); - DMat kernel_gradient_matrix(m, n, local_kernel_gradient.Buffer(), m); + DMat im2col_matrix(m, k); + DMat kernel_gradient_matrix(m, n, local_kernel_gradient.Buffer(), m); El::Zero(kernel_gradient_matrix); // Compute kernel gradient contributions from each data sample for (El::Int col = 0; col < local_width; ++col) { if (using_transposed_convolution) { - const DMat input_col(k, n, local_input.LockedBuffer(0,col), k); - const DMat gradient_wrt_output_col = + const DMat input_col(k, n, local_input.LockedBuffer(0,col), k); + const DMat gradient_wrt_output_col = El::LockedView(local_gradient_wrt_output, El::ALL, El::IR(col)); im2col(gradient_wrt_output_col, im2col_matrix, @@ -945,23 +950,23 @@ class base_convolution_layer : public learning_layer { output_dims.size() - 1, &output_dims[1], m_pads.data(), - &m_kernel_dims[2], + &kernel_dims[2], m_strides.data()); El::Gemm(El::NORMAL, El::NORMAL, DataType(1), im2col_matrix, input_col, DataType(1), kernel_gradient_matrix); } else { - const DMat input_col + const DMat input_col = El::LockedView(local_input, El::ALL, El::IR(col)); - const DMat gradient_wrt_output_col(k, n, local_gradient_wrt_output.LockedBuffer(0,col), k); + const DMat gradient_wrt_output_col(k, n, local_gradient_wrt_output.LockedBuffer(0,col), k); im2col(input_col, im2col_matrix, num_input_channels, input_dims.size() - 1, &input_dims[1], m_pads.data(), - &m_kernel_dims[2], + &kernel_dims[2], m_strides.data()); El::Gemm(El::NORMAL, El::NORMAL, DataType(1), im2col_matrix, gradient_wrt_output_col, @@ -1079,4 +1084,4 @@ class base_convolution_layer : public learning_layer { } // namespace lbann -#endif // LBANN_LAYER_BASE_CONVOLUTION_HPP_INCLUDED +#endif // LBANN_LAYERS_LEARNING_BASE_CONVOLUTION_HPP_INCLUDED diff --git a/include/lbann/layers/learning/convolution.hpp b/include/lbann/layers/learning/convolution.hpp index b6e5faab24a..76294d6332e 100644 --- a/include/lbann/layers/learning/convolution.hpp +++ b/include/lbann/layers/learning/convolution.hpp @@ -24,10 +24,9 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_LAYER_CONVOLUTION_HPP_INCLUDED -#define LBANN_LAYER_CONVOLUTION_HPP_INCLUDED +#ifndef LBANN_LAYERS_LEARNING_CONVOLUTION_HPP_INCLUDED +#define LBANN_LAYERS_LEARNING_CONVOLUTION_HPP_INCLUDED -#include #include "lbann/layers/learning/base_convolution.hpp" #include "lbann/utils/exception.hpp" @@ -39,8 +38,8 @@ namespace lbann { * tensors. This is primarily optimized for image data in NCHW * format. */ -template -class convolution_layer : public base_convolution_layer { +template +class convolution_layer : public base_convolution_layer { private: friend class lbann_callback_imcomm; @@ -75,17 +74,18 @@ class convolution_layer : public base_convolution_layer { std::vector dilations, int groups, bool has_bias = true) - : base_convolution_layer(comm, - num_data_dims, - num_output_channels, - conv_dims, - pads, - strides, - dilations, - groups, - has_bias) { - static_assert(T_layout == data_layout::DATA_PARALLEL, - "convolution only supports DATA_PARALLEL"); + : base_convolution_layer( + comm, + num_data_dims, + num_output_channels, + conv_dims, + pads, + strides, + dilations, + groups, + has_bias) { + static_assert(Layout == data_layout::DATA_PARALLEL, + "convolution layer only supports DATA_PARALLEL"); } @@ -93,19 +93,19 @@ class convolution_layer : public base_convolution_layer { std::string get_type() const override { return "convolution"; } - data_layout get_data_layout() const override { return T_layout; } + data_layout get_data_layout() const override { return Layout; } - El::Device get_device_allocation() const override { return Dev; } + El::Device get_device_allocation() const override { return Device; } void setup_dims() override { - base_convolution_layer::setup_dims(); + base_convolution_layer::setup_dims(); std::stringstream err; // Get tensor dimensions const auto& input_dims = this->get_input_dims(); auto output_dims = input_dims; const auto input_channels = input_dims[0]; - const auto output_channels = this->m_kernel_dims[0]; + const auto output_channels = this->m_output_channels; // Check that number of groups is valid if (this->m_num_groups < 1) { @@ -125,32 +125,11 @@ class convolution_layer : public base_convolution_layer { LBANN_ERROR(err.str()); } - // Initialize convolution kernel dimensions - this->m_kernel_dims.insert(this->m_kernel_dims.begin() + 1, - input_channels / this->m_num_groups); - this->m_kernel_size = std::accumulate(this->m_kernel_dims.begin(), - this->m_kernel_dims.end(), - 1, std::multiplies()); - if (this->m_kernel_dims.size() != input_dims.size() + 1) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" " - << "has a "; - for (size_t i = 0; i < input_dims.size(); ++i) { - err << (i > 0 ? " x " : "") << input_dims[i]; - } - err << " input tensor and a "; - for (size_t i = 0; i < this->m_kernel_dims.size(); ++i) { - err << (i > 0 ? " x " : "") << this->m_kernel_dims[i]; - } - err << " convolution kernel"; - LBANN_ERROR(err.str()); - } - // Initialize output tensor dimensions output_dims[0] = output_channels; for (size_t i = 0; i < output_dims.size() - 1; ++i) { const auto& input_dim = input_dims[i+1]; - const auto& kernel_dim = this->m_kernel_dims[i+2]; + const auto& kernel_dim = this->m_conv_dims[i]; const auto& stride = this->m_strides[i]; const auto& pad = this->m_pads[i]; const auto& dilation = this->m_dilations[i]; @@ -165,23 +144,33 @@ class convolution_layer : public base_convolution_layer { protected: + std::vector get_kernel_dims() const { + std::vector dims; + dims.push_back(this->m_output_channels); + dims.push_back(this->get_input_dims()[0]); + dims.insert(dims.end(), + this->m_conv_dims.begin(), + this->m_conv_dims.end()); + return dims; + } + void fp_compute() override { if(this->using_gpus()) { - base_convolution_layer::apply_convolution_cudnn(true); - base_convolution_layer::apply_bias_cudnn(); + base_convolution_layer::apply_convolution_cudnn(true); + base_convolution_layer::apply_bias_cudnn(); } else { - base_convolution_layer::apply_convolution_im2col(true); - base_convolution_layer::apply_bias_cpu(); + base_convolution_layer::apply_convolution_im2col(true); + base_convolution_layer::apply_bias_cpu(); } } void bp_compute() override { if(this->using_gpus()) { - base_convolution_layer::compute_gradients_cudnn(false); - base_convolution_layer::apply_transposed_convolution_cudnn(false); + base_convolution_layer::compute_gradients_cudnn(false); + base_convolution_layer::apply_transposed_convolution_cudnn(false); } else { - base_convolution_layer::compute_gradients_im2col(false); - base_convolution_layer::apply_transposed_convolution_im2col(false); + base_convolution_layer::compute_gradients_im2col(false); + base_convolution_layer::apply_transposed_convolution_im2col(false); } } @@ -189,4 +178,4 @@ class convolution_layer : public base_convolution_layer { } // namespace lbann -#endif // LBANN_LAYER_CONVOLUTION_HPP_INCLUDED +#endif // LBANN_LAYERS_LEARNING_CONVOLUTION_HPP_INCLUDED diff --git a/include/lbann/layers/learning/deconvolution.hpp b/include/lbann/layers/learning/deconvolution.hpp index 51884ff1c9c..e7bf3ffbdab 100644 --- a/include/lbann/layers/learning/deconvolution.hpp +++ b/include/lbann/layers/learning/deconvolution.hpp @@ -24,10 +24,9 @@ // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// -#ifndef LBANN_LAYER_DECONVOLUTION_HPP_INCLUDED -#define LBANN_LAYER_DECONVOLUTION_HPP_INCLUDED +#ifndef LBANN_LAYERS_LEARNING_DECONVOLUTION_HPP_INCLUDED +#define LBANN_LAYERS_LEARNING_DECONVOLUTION_HPP_INCLUDED -#include #include "lbann/layers/learning/base_convolution.hpp" #include "lbann/utils/exception.hpp" @@ -39,8 +38,8 @@ class lbann_callback_imcomm; /** @brief Transpose of the convolution layer. * @todo Rename to "transposed_convolution_layer". */ -template -class deconvolution_layer : public base_convolution_layer { +template +class deconvolution_layer : public base_convolution_layer { private: friend class lbann_callback_imcomm; @@ -75,17 +74,18 @@ class deconvolution_layer : public base_convolution_layer { std::vector dilations, int groups, bool has_bias = true) - : base_convolution_layer(comm, - num_data_dims, - num_output_channels, - conv_dims, - pads, - strides, - dilations, - groups, - has_bias) { - static_assert(T_layout == data_layout::DATA_PARALLEL, - "convolution only supports DATA_PARALLEL"); + : base_convolution_layer( + comm, + num_data_dims, + num_output_channels, + conv_dims, + pads, + strides, + dilations, + groups, + has_bias) { + static_assert(Layout == data_layout::DATA_PARALLEL, + "deconvolution layer only supports DATA_PARALLEL"); } @@ -93,19 +93,19 @@ class deconvolution_layer : public base_convolution_layer { std::string get_type() const override { return "deconvolution"; } - data_layout get_data_layout() const override { return T_layout; } + data_layout get_data_layout() const override { return Layout; } - El::Device get_device_allocation() const override { return Dev; } + El::Device get_device_allocation() const override { return Device; } void setup_dims() override { - base_convolution_layer::setup_dims(); + base_convolution_layer::setup_dims(); std::stringstream err; // Get tensor dimensions const auto& input_dims = this->get_input_dims(); auto output_dims = input_dims; const auto input_channels = input_dims[0]; - const auto output_channels = this->m_kernel_dims[0]; + const auto output_channels = this->m_output_channels; // Check for unsupported features /// @todo Implement dilated and grouped deconvolution @@ -147,35 +147,12 @@ class deconvolution_layer : public base_convolution_layer { LBANN_ERROR(err.str()); } - // Initialize convolution kernel dimensions - // Note: Unlike the convolutional kernel, the previous layer's - // number of channels is now the leading position -- keep in mind - // that deconvolution is the transpose of a convolution. - this->m_kernel_dims.insert(this->m_kernel_dims.begin(), - input_channels / this->m_num_groups); - this->m_kernel_size = std::accumulate(this->m_kernel_dims.begin(), - this->m_kernel_dims.end(), - 1, std::multiplies()); - if (this->m_kernel_dims.size() != input_dims.size() + 1) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" has a "; - for (size_t i = 0; i < input_dims.size(); ++i) { - err << (i > 0 ? " x " : "") << input_dims[i]; - } - err << " input tensor and a "; - for (size_t i = 0; i < this->m_kernel_dims.size(); ++i) { - err << (i > 0 ? " x " : "") << this->m_kernel_dims[i]; - } - err << " convolution kernel"; - LBANN_ERROR(err.str()); - } - // Initialize output tensor dimensions /// @todo Dilated deconvolution output_dims[0] = output_channels; for (size_t i = 0; i < output_dims.size() - 1; ++i) { const auto& input_dim = input_dims[i+1]; - const auto& kernel_dim = this->m_kernel_dims[i+2]; + const auto& kernel_dim = this->m_conv_dims[i]; const auto& stride = this->m_strides[i]; const auto& pad = this->m_pads[i]; // const auto& dilation = this->m_dilations[i]; @@ -187,23 +164,33 @@ class deconvolution_layer : public base_convolution_layer { protected: + std::vector get_kernel_dims() const { + std::vector dims; + dims.push_back(this->get_input_dims()[0]); + dims.push_back(this->m_output_channels); + dims.insert(dims.end(), + this->m_conv_dims.begin(), + this->m_conv_dims.end()); + return dims; + } + void fp_compute() override { if(this->using_gpus()) { - base_convolution_layer::apply_transposed_convolution_cudnn(true); - base_convolution_layer::apply_bias_cudnn(); + base_convolution_layer::apply_transposed_convolution_cudnn(true); + base_convolution_layer::apply_bias_cudnn(); } else { - base_convolution_layer::apply_transposed_convolution_im2col(true); - base_convolution_layer::apply_bias_cpu(); + base_convolution_layer::apply_transposed_convolution_im2col(true); + base_convolution_layer::apply_bias_cpu(); } } void bp_compute() override { if(this->using_gpus()) { - base_convolution_layer::compute_gradients_cudnn(true); - base_convolution_layer::apply_convolution_cudnn(false); + base_convolution_layer::compute_gradients_cudnn(true); + base_convolution_layer::apply_convolution_cudnn(false); } else { - base_convolution_layer::compute_gradients_im2col(true); - base_convolution_layer::apply_convolution_im2col(false); + base_convolution_layer::compute_gradients_im2col(true); + base_convolution_layer::apply_convolution_im2col(false); } } @@ -211,4 +198,4 @@ class deconvolution_layer : public base_convolution_layer { } // namespace lbann -#endif // LBANN_LAYER_DECONVOLUTION_HPP_INCLUDED +#endif // LBANN_LAYERS_LEARNING_DECONVOLUTION_HPP_INCLUDED From e1b80fd957131df21ab685e177084fecaa827e26 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 12:19:54 -0800 Subject: [PATCH 132/443] Add unit test for type_erased_matrix --- include/lbann/utils/type_erased_matrix.hpp | 12 ++-- src/utils/unit_test/CMakeLists.txt | 2 +- .../unit_test/type_erased_matrix_test.cpp | 71 +++++++++++++++++++ 3 files changed, 78 insertions(+), 7 deletions(-) create mode 100644 src/utils/unit_test/type_erased_matrix_test.cpp diff --git a/include/lbann/utils/type_erased_matrix.hpp b/include/lbann/utils/type_erased_matrix.hpp index 9f0d23e0fd5..433bda383ae 100644 --- a/include/lbann/utils/type_erased_matrix.hpp +++ b/include/lbann/utils/type_erased_matrix.hpp @@ -29,12 +29,12 @@ class type_erased_matrix El::Matrix& get() { return const_cast&>( - static_cast(*this) + static_cast(*this) .template get()); } template - El::Matrix const& get() + El::Matrix const& get() const { return any_cast const&>(m_matrix); } @@ -42,9 +42,9 @@ class type_erased_matrix template El::Matrix& convert() { - any new_mat{El::Matrix{ - any_cast const&>(m_matrix)}}; - m_matrix.swap(new_mat); + El::Matrix new_mat; + El::Copy(this->template get(), new_mat); + m_matrix.template emplace>(std::move(new_mat)); return this->template get(); } @@ -56,7 +56,7 @@ class type_erased_matrix // process. template std::unique_ptr -create_type_erased_matrx() +create_type_erased_matrix() { return make_unique(El::Matrix{}); } diff --git a/src/utils/unit_test/CMakeLists.txt b/src/utils/unit_test/CMakeLists.txt index d42e7fd8334..a42940a60b6 100644 --- a/src/utils/unit_test/CMakeLists.txt +++ b/src/utils/unit_test/CMakeLists.txt @@ -1,7 +1,7 @@ set_full_path(_DIR_LBANN_CATCH2_TEST_FILES any_test.cpp factory_test.cpp -# type_erased_matrix_test.cpp + type_erased_matrix_test.cpp ) set(LBANN_CATCH2_TEST_FILES diff --git a/src/utils/unit_test/type_erased_matrix_test.cpp b/src/utils/unit_test/type_erased_matrix_test.cpp new file mode 100644 index 00000000000..f722c33d2e6 --- /dev/null +++ b/src/utils/unit_test/type_erased_matrix_test.cpp @@ -0,0 +1,71 @@ +// MUST include this +#include + +// File being tested +#include + +// Other includes +#include + +namespace +{ +template +struct TypePair +{ + using src_type = SrcT; + using tgt_type = TgtT; +}; +}// namespace + +TEMPLATE_PRODUCT_TEST_CASE( + "Testing type-erase Matrix","[type-erase][la][utilities]", + (TypePair), + ((int, float), (int, double), + (float, int), (float, double), + (double, int), (double,float))) +{ + using src_type = typename TestType::src_type; + using tgt_type = typename TestType::tgt_type; + + GIVEN("A type-erased matrix") + { + auto x = lbann::utils::create_type_erased_matrix(); + + THEN ("the internal matrix has the correct storage type") + { + REQUIRE_NOTHROW(x->template get()); + REQUIRE_THROWS_AS(x->template get(), + lbann::utils::bad_any_cast); + + auto&& internal_mat = x->template get(); + REQUIRE(internal_mat.Height() == 0); + REQUIRE(internal_mat.Width() == 0); + } + + WHEN ("The matrix is resized") + { + REQUIRE_NOTHROW(x->template get().Resize(10,12)); + + THEN ("The change is reflected in the internal matrix.") + { + auto&& internal_mat = x->template get(); + REQUIRE(internal_mat.Height() == 10); + REQUIRE(internal_mat.Width() == 12); + } + AND_WHEN ("The matrix is converted") + { + REQUIRE_NOTHROW(x->template convert()); + + THEN ("The internal matrix has the right type and size") + { + REQUIRE_NOTHROW(x->template get()); + REQUIRE_THROWS_AS(x->template get(), + lbann::utils::bad_any_cast); + + REQUIRE(x->template get().Height() == 10); + REQUIRE(x->template get().Width() == 12); + } + } + } + } +} From 5ea03a2f9af89fd18cdb7e74e226af2a2916842b Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 12:55:50 -0800 Subject: [PATCH 133/443] fix a bug in the type-erased copying constructor; add doxygen documentation --- include/lbann/utils/type_erased_matrix.hpp | 79 ++++++++++++++++++- .../unit_test/type_erased_matrix_test.cpp | 72 ++++++++++++++--- 2 files changed, 136 insertions(+), 15 deletions(-) diff --git a/include/lbann/utils/type_erased_matrix.hpp b/include/lbann/utils/type_erased_matrix.hpp index 433bda383ae..615b95ac217 100644 --- a/include/lbann/utils/type_erased_matrix.hpp +++ b/include/lbann/utils/type_erased_matrix.hpp @@ -11,20 +11,59 @@ namespace lbann namespace utils { +/** @class type_erased_matrix + * @brief A type-erased wrapper around an @c El::Matrix + * + * @warning This class is an implementation detail of the + * preprocessing pipeline and should not be used in general + * LBANN code. + */ class type_erased_matrix { public: + /** @brief Construct from a copy of a given matrix. + * + * Deep-copy the input matrix into the held matrix. + * + * @tparam Field The data type of the input matrix + * + * @param in_matrix The input matrix. + * + * @warning This performs a deep copy of the matrix. + */ template type_erased_matrix(El::Matrix const& in_matrix) - : m_matrix{in_matrix} - {} + { + El::Matrix held; + El::Copy(in_matrix, held); + m_matrix.emplace>(std::move(held)); + } + /** @brief Construct by moving the given matrix into type-erased + * storage. + * + * Move the input matrix into the held matrix. + * + * @tparam Field The data type of the input matrix + * + * @param in_matrix The input matrix. + */ template type_erased_matrix(El::Matrix&& in_matrix) : m_matrix{std::move(in_matrix)} {} + /** @brief Access the underlying matrix. + * + * Provides read/write access to the underlying matrix if the input + * @c Field matches the data type of the held matrix. + * + * @tparam Field The data type of the held matrix + * + * @throws bad_any_cast If the datatype of the held matrix does not + * match the input @c Field. + */ template El::Matrix& get() { @@ -33,12 +72,37 @@ class type_erased_matrix .template get()); } + /** @brief Access the underlying matrix. + * + * Provides read-only access to the underlying matrix if the input + * @c Field matches the data type of the held matrix. + * + * @tparam Field The data type of the held matrix + * + * @return Reference to the underlying matrix + * + * @throws bad_any_cast If the datatype of the held matrix does not + * match the input @c Field. + */ template El::Matrix const& get() const { return any_cast const&>(m_matrix); } + /** @brief Access the underlying matrix. + * + * Converts (copies) the internal matrix into a matrix of a new + * type, which is then held in place of the original matrix. + * + * @tparam OldField The data type of the originally held matrix + * @tparam NewField The data type of the newly held matrix + * + * @return @c const reference to the underlying matrix + * + * @throws bad_any_cast If the datatype of the held matrix does not + * match the input @c OldField. + */ template El::Matrix& convert() { @@ -49,11 +113,18 @@ class type_erased_matrix } private: + /** @brief Type-erased matrix storage */ any m_matrix; };// class type_erased_matrix -// Helper function for what will probably be the usual construction -// process. +/** @brief Create an empty type-erased matrix with given underlying + * data type. + * + * @tparam Field The type of the underlying matrix. + * + * @return A pointer to an empty type-erased matrix with data type @c + * Field. + */ template std::unique_ptr create_type_erased_matrix() diff --git a/src/utils/unit_test/type_erased_matrix_test.cpp b/src/utils/unit_test/type_erased_matrix_test.cpp index f722c33d2e6..9a77d217686 100644 --- a/src/utils/unit_test/type_erased_matrix_test.cpp +++ b/src/utils/unit_test/type_erased_matrix_test.cpp @@ -29,41 +29,91 @@ TEMPLATE_PRODUCT_TEST_CASE( GIVEN("A type-erased matrix") { - auto x = lbann::utils::create_type_erased_matrix(); + auto mat = lbann::utils::create_type_erased_matrix(); THEN ("the internal matrix has the correct storage type") { - REQUIRE_NOTHROW(x->template get()); - REQUIRE_THROWS_AS(x->template get(), + REQUIRE_NOTHROW(mat->template get()); + REQUIRE_THROWS_AS(mat->template get(), lbann::utils::bad_any_cast); - auto&& internal_mat = x->template get(); + auto&& internal_mat = mat->template get(); REQUIRE(internal_mat.Height() == 0); REQUIRE(internal_mat.Width() == 0); } WHEN ("The matrix is resized") { - REQUIRE_NOTHROW(x->template get().Resize(10,12)); + REQUIRE_NOTHROW(mat->template get().Resize(10,12)); THEN ("The change is reflected in the internal matrix.") { - auto&& internal_mat = x->template get(); + auto&& internal_mat = mat->template get(); REQUIRE(internal_mat.Height() == 10); REQUIRE(internal_mat.Width() == 12); } AND_WHEN ("The matrix is converted") { - REQUIRE_NOTHROW(x->template convert()); + REQUIRE_NOTHROW(mat->template convert()); THEN ("The internal matrix has the right type and size") { - REQUIRE_NOTHROW(x->template get()); - REQUIRE_THROWS_AS(x->template get(), + REQUIRE_NOTHROW(mat->template get()); + REQUIRE_THROWS_AS(mat->template get(), lbann::utils::bad_any_cast); - REQUIRE(x->template get().Height() == 10); - REQUIRE(x->template get().Width() == 12); + REQUIRE(mat->template get().Height() == 10); + REQUIRE(mat->template get().Width() == 12); + } + } + } + } + + GIVEN("A matrix of a given type") + { + El::Matrix mat(10,12); + mat(1,1) = src_type(13); + + WHEN("A type-erased matrix is constructed by copying it") + { + lbann::utils::type_erased_matrix erased_mat(mat); + THEN("The type-erased matrix is a copy") + { + REQUIRE(erased_mat.template get().Height() == 10); + REQUIRE(erased_mat.template get().Width() == 12); + REQUIRE( + erased_mat.template get().operator()(1,1) == mat(1,1)); + + AND_WHEN("The original matrix is resized") + { + mat.Resize(5,5); + THEN("The type-erased matrix is unaffected.") + { + REQUIRE(erased_mat.template get().Height() == 10); + REQUIRE(erased_mat.template get().Width() == 12); + } + } + } + } + + WHEN("A type-erased matrix is constructed by moving it") + { + lbann::utils::type_erased_matrix erased_mat(std::move(mat)); + THEN("The type-erased matrix is sized correctly and has good values") + { + REQUIRE(erased_mat.template get().Height() == 10); + REQUIRE(erased_mat.template get().Width() == 12); + REQUIRE( + erased_mat.template get().operator()(1,1) == src_type(13)); + + AND_WHEN("The original matrix is resized") + { + mat.Resize(5,5); + THEN("The type-erased matrix is unaffected.") + { + REQUIRE(erased_mat.template get().Height() == 10); + REQUIRE(erased_mat.template get().Width() == 12); + } } } } From 927b09a68ac568fb68326635697eed17f8ed30b3 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 14:01:27 -0800 Subject: [PATCH 134/443] remove the comm_test from CTest until it is generalized --- model_zoo/tests/CMakeLists.txt | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/model_zoo/tests/CMakeLists.txt b/model_zoo/tests/CMakeLists.txt index 27b586276c5..9a0f2a8c0ee 100644 --- a/model_zoo/tests/CMakeLists.txt +++ b/model_zoo/tests/CMakeLists.txt @@ -15,14 +15,16 @@ function(add_mpi_ctest TEST_NAME) endif() # ctest with 1, 2, and 4 MPI processes - set(NUM_PROCS 1) - while(8 GREATER ${NUM_PROCS}) - add_test("${TEST_NAME}_${NUM_PROCS}" - ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${NUM_PROCS} ${MPIEXEC_PREFLAGS} - ${CMAKE_CURRENT_BINARY_DIR}/${TEST_EXE}) - math(EXPR NUM_PROCS "${NUM_PROCS} * 2") - endwhile() - + set(COMM_TEST_IS_GENERAL FALSE) + if (COMM_TEST_IS_GENERAL) + set(NUM_PROCS 1) + while(8 GREATER ${NUM_PROCS}) + add_test("${TEST_NAME}_${NUM_PROCS}" + ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${NUM_PROCS} ${MPIEXEC_PREFLAGS} + ${CMAKE_CURRENT_BINARY_DIR}/${TEST_EXE}) + math(EXPR NUM_PROCS "${NUM_PROCS} * 2") + endwhile() + endif (COMM_TEST_IS_GENERAL) endfunction() # Parallel Tests From 1b50523b17cce1b5df1005472687809d1e9f3051 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 14:01:41 -0800 Subject: [PATCH 135/443] remove extraneous debugging output from CMake --- CMakeLists.txt | 3 --- src/utils/unit_test/CMakeLists.txt | 1 - unit_test/CMakeLists.txt | 5 ++--- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2cafa495f93..1b470f51c88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -416,15 +416,12 @@ if (LBANN_WITH_UNIT_TESTING) find_package(Catch2 2.0.0 CONFIG QUIET REQUIRED) endif () message(STATUS "Found Catch2: ${Catch2_DIR}") - message(STATUS "CMAKE_MODULE_PATH=${CMAKE_MODULE_PATH}") # Now that Catch2 has been found, start adding the unit tests include(CTest) include(Catch) add_subdirectory(src/utils/unit_test) - message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") - # Add this one last add_subdirectory(unit_test) endif (LBANN_WITH_UNIT_TESTING) diff --git a/src/utils/unit_test/CMakeLists.txt b/src/utils/unit_test/CMakeLists.txt index a42940a60b6..3578391b3a4 100644 --- a/src/utils/unit_test/CMakeLists.txt +++ b/src/utils/unit_test/CMakeLists.txt @@ -6,4 +6,3 @@ set_full_path(_DIR_LBANN_CATCH2_TEST_FILES set(LBANN_CATCH2_TEST_FILES "${LBANN_CATCH2_TEST_FILES}" "${_DIR_LBANN_CATCH2_TEST_FILES}" PARENT_SCOPE) -message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") diff --git a/unit_test/CMakeLists.txt b/unit_test/CMakeLists.txt index c43b43447af..1368eafc110 100644 --- a/unit_test/CMakeLists.txt +++ b/unit_test/CMakeLists.txt @@ -1,7 +1,6 @@ -message(STATUS "LBANN_CATCH2_TEST_FILES=${LBANN_CATCH2_TEST_FILES}") - # Add the sequential test main() function -add_executable(seq-catch-tests SequentialCatchMain.cpp "${LBANN_CATCH2_TEST_FILES}") +add_executable(seq-catch-tests + SequentialCatchMain.cpp "${LBANN_CATCH2_TEST_FILES}") target_link_libraries(seq-catch-tests PRIVATE lbann Catch2::Catch2) catch_discover_tests(seq-catch-tests) From 81ad6346b3d95e7cac520ea743be5a12c625d891 Mon Sep 17 00:00:00 2001 From: Yosuke Oyama <17844184+oyamay@users.noreply.github.com> Date: Thu, 28 Feb 2019 15:22:13 -0800 Subject: [PATCH 136/443] Add --test-loss option --- scripts/proto/lbann/plot/parser.py | 1 + scripts/proto/lbann/plot/plot.py | 9 +++++++-- scripts/proto/scripts/plot/lbplot | 6 +++++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/scripts/proto/lbann/plot/parser.py b/scripts/proto/lbann/plot/parser.py index 22a0d3228f9..bfe3c0459b0 100644 --- a/scripts/proto/lbann/plot/parser.py +++ b/scripts/proto/lbann/plot/parser.py @@ -7,6 +7,7 @@ ('val_loss', 'validation objective function : ([0-9.]+)', lambda r: float(r.group(1))), ('val_acc', 'validation categorical accuracy : ([0-9.]+)', lambda r: float(r.group(1))/100.0), ('val_time', 'validation run time : ([0-9.]+)', lambda r: float(r.group(1))), + ('test_loss', 'test objective function : ([0-9.]+)', lambda r: float(r.group(1))), ('num_procs', 'Total number of processes\s*:\s*([\d]+)', lambda r: int(r.group(1))), ('num_procs_on_node', 'Processes on node\s*:\s*([\d]+)', lambda r: int(r.group(1))), ] diff --git a/scripts/proto/lbann/plot/plot.py b/scripts/proto/lbann/plot/plot.py index 2df0155d862..fd0f8a6598d 100644 --- a/scripts/proto/lbann/plot/plot.py +++ b/scripts/proto/lbann/plot/plot.py @@ -26,7 +26,8 @@ def _get_time_axis(time_list, units='hours'): return time_axis def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', - plot_accuracy=True, merge_train_val=False, pretty_ylim=True, save_fig=None, save_csv=None, ylim=None): + plot_accuracy=True, merge_train_val=False, pretty_ylim=True, save_fig=None, save_csv=None, ylim=None, + test_loss=False): """Tabulate and plot stats from LBANN or PyTorch training in common format.""" if pretty_ylim and ylim is not None: @@ -52,6 +53,8 @@ def plot(stat_path_list, stat_name_list, ind_var='time', time_units='hours', headings += ['Peak Train Acc', 'Peak Val Acc'] headings += ['Min. Train Loss', 'Min. Val Loss'] + if test_loss: + headings += ['Min. Test Loss'] stat_table.header(headings) # Loop through each trial @@ -111,6 +114,7 @@ def parse_num(d, key): min_train_epoch = d['train_loss'].index(min_train_loss) min_val_loss = min(d['val_loss']) min_val_epoch = d['val_loss'].index(min_val_loss) + min_test_loss = d['test_loss'][0] if test_loss else None # Compute time stats avg_train_time = int(sum(d['train_time'])/len(d['train_time'])) @@ -134,7 +138,8 @@ def parse_num(d, key): # Add row to stats table for current trial row = [run_name, num_procs, num_nodes, total_epochs, avg_train_time, avg_val_time] \ + ([peak_train_acc, peak_val_acc] if plot_accuracy else []) \ - + [min_train_loss, min_val_loss] + + [min_train_loss, min_val_loss] \ + + ([min_test_loss] if test_loss else []) rows.append(row) row_names.append(run_name) diff --git a/scripts/proto/scripts/plot/lbplot b/scripts/proto/scripts/plot/lbplot index a5401df9535..538e3daa533 100755 --- a/scripts/proto/scripts/plot/lbplot +++ b/scripts/proto/scripts/plot/lbplot @@ -31,6 +31,9 @@ def main(): help="The minimum y-axis limit of the loss plot.") parser.add_argument('--loss-ymax', type=float, # default=None, # float("inf"), help="The maximum y-axis limit of the loss plot.") + parser.add_argument("--test-loss", dest="test_loss", action="store_const", + const=True, default=False, + help="Show the test loss") args = parser.parse_args() ylim = None @@ -48,7 +51,8 @@ def main(): pretty_ylim=args.pretty_ylim, save_fig=args.save_fig if args.save_fig != '' else None, save_csv=args.save_csv if args.save_csv != '' else None, - ylim=[args.loss_ymin, args.loss_ymax]) + ylim=[args.loss_ymin, args.loss_ymax], + test_loss=args.test_loss) if __name__=='__main__': From aa322abe56dc9714b25cb5afec90e1634feec9dc Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 28 Feb 2019 15:48:56 -0800 Subject: [PATCH 137/443] Added a command-line option to force the model to serialize the I/O. Clarified the help message descriptions about serializing the I/O, and cleaned up the variable names. --- model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext | 2 +- .../models/jag/ae_cycle_gan/3models/ae_cyc.prototext | 2 +- .../models/jag/ae_cycle_gan/3models/ae_cyc2.prototext | 2 +- .../models/jag/ae_cycle_gan/3models/cycle_gan.prototext | 2 +- model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext | 2 +- model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext | 2 +- model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext | 2 +- .../data_reader_jag_conduit_lustre.prototext | 4 ++-- model_zoo/models/jag/ae_cycle_gan/vae1.prototext | 2 +- model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext | 2 +- model_zoo/models/jag/wae.prototext | 2 +- src/proto/lbann.proto | 2 +- src/proto/proto_common.cpp | 9 ++++++++- src/utils/lbann_library.cpp | 4 ++-- 14 files changed, 23 insertions(+), 16 deletions(-) diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext index dff2f6b5185..5f0b88111a6 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae.prototext @@ -4,7 +4,7 @@ model { name: "ae_model" shareable_training_data_reader:false - serialize_background_io: true + serialize_io: true data_layout: "data_parallel" mini_batch_size: 256 block_size: 256 diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext index 16008c2b4f5..c7931f84084 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc.prototext @@ -2,7 +2,7 @@ model { name: "ae_cycgan_model" shareable_training_data_reader:false - serialize_background_io: true + serialize_io: true data_layout: "data_parallel" mini_batch_size: 256 block_size: 256 diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext index e7ae8d72727..e2a6eb6085d 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/ae_cyc2.prototext @@ -3,7 +3,7 @@ model { name: "ae_cycgan_model" shareable_training_data_reader:false - serialize_background_io: true + serialize_io: true data_layout: "data_parallel" mini_batch_size: 256 block_size: 256 diff --git a/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext b/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext index bd3d05794d0..7d38e4ca6bb 100644 --- a/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/3models/cycle_gan.prototext @@ -1,7 +1,7 @@ model { name: "cycgan_model" shareable_training_data_reader:false - serialize_background_io: true + serialize_io: true procs_per_trainer:0 objective_function { l2_weight_regularization { diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext index 9e3195a1c65..41a071fab87 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m1.prototext @@ -1,7 +1,7 @@ model { name: "dis_model" shareable_training_data_reader: true - serialize_background_io: true + serialize_io: true objective_function { l2_weight_regularization { scale_factor: 0.0001 diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext index e6b73ba7cdc..9a6715a0fc9 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m2.prototext @@ -1,7 +1,7 @@ model { name: "fw_model" shareable_training_data_reader: true - serialize_background_io: true + serialize_io: true objective_function { l2_weight_regularization { scale_factor: 0.0001 diff --git a/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext b/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext index eaf041c41bd..41005af6f15 100644 --- a/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/cycgan_m3.prototext @@ -1,7 +1,7 @@ model { name: "inv_model" shareable_training_data_reader: true - serialize_background_io: true + serialize_io: true objective_function { l2_weight_regularization { scale_factor: 0.0001 diff --git a/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext b/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext index 81467ae6970..c13b35fedd1 100644 --- a/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext @@ -16,7 +16,7 @@ data_reader { shuffle: true # change to a lustre path data_filedir: "/p/lustre2/brainusr/datasets/10MJAG/1M_A/" - index_list: "index.txt" + index_list: "index_eight.txt" index_list_per_trainer: false index_list_per_model: false @@ -66,7 +66,7 @@ data_reader { shuffle: true # change to a lustre path data_filedir: "/p/lustre2/brainusr/datasets/10MJAG/1M_B/" - index_list: "index.txt" + index_list: "index_eight.txt" index_list_per_trainer: false index_list_per_model: false diff --git a/model_zoo/models/jag/ae_cycle_gan/vae1.prototext b/model_zoo/models/jag/ae_cycle_gan/vae1.prototext index 8e790c22092..1646bdd0298 100644 --- a/model_zoo/models/jag/ae_cycle_gan/vae1.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/vae1.prototext @@ -4,7 +4,7 @@ model { name: "ae_model" shareable_training_data_reader: false - serialize_background_io: true + serialize_io: true data_layout: "model_parallel" mini_batch_size: 256 block_size: 256 diff --git a/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext b/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext index 7f271d3a4a6..d5d3deca580 100644 --- a/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/vae_cyc.prototext @@ -4,7 +4,7 @@ model { name: "ae_cycgan_model" shareable_training_data_reader: true - serialize_background_io: true + serialize_io: true data_layout: "model_parallel" mini_batch_size: 256 block_size: 256 diff --git a/model_zoo/models/jag/wae.prototext b/model_zoo/models/jag/wae.prototext index 0c09e386d9e..f8edac45647 100644 --- a/model_zoo/models/jag/wae.prototext +++ b/model_zoo/models/jag/wae.prototext @@ -1,6 +1,6 @@ model { random_init_models_differently: true - serialize_background_io: true + serialize_io: true objective_function { l2_weight_regularization { scale_factor: 0.0001 diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 1ceba293ef6..6c0e07eacaa 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -263,7 +263,7 @@ message Model { int64 num_gpus = 53; //has no effect int64 evaluation_frequency = 54; int64 num_parallel_readers = 100; - bool serialize_background_io = 101; + bool serialize_io = 101; bool disable_cuda = 8; diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index d6ee41f077e..7e4ebc069ac 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -742,6 +742,9 @@ void get_cmdline_overrides(lbann_comm *comm, lbann_data::LbannPB& p) if (opts->has_int("random_seed")) { model->set_random_seed(opts->get_int("random_seed")); } + if(opts->has_bool("serialize_io")) { + model->set_serialize_io(opts->get_bool("serialize_io")); + } if (opts->has_string("opt")) { @@ -821,7 +824,7 @@ void print_parameters(lbann_comm *comm, lbann_data::LbannPB& p) << " block_size: " << m.block_size() << std::endl << " procs_per_trainer: " << m.procs_per_trainer() << std::endl << " num_parallel_readers: " << m.num_parallel_readers() << std::endl - << " serialize_background_io: " << m.serialize_background_io() << std::endl + << " serialize_io: " << m.serialize_io() << std::endl << " disable_cuda: " << m.disable_cuda() << std::endl << " random_seed: " << m.random_seed() << std::endl << " data_layout: " << m.data_layout() << std::endl @@ -861,7 +864,11 @@ void print_help(lbann_comm *comm) " --num_gpus=\n" " --num_parallel_readers=\n" " --num_io_threads=\n" + " # of threads used for I/O by the data readers\n" + " --serialize_io=\n" + " force data readers to use a single thread for I/O\n" " --disable_background_io_activity=\n" + " prevent the input layers from fetching data in the background\n" " --disable_cuda=\n" " has no effect unless lbann was compiled with: LBANN_HAS_CUDNN\n" " --random_seed=\n" diff --git a/src/utils/lbann_library.cpp b/src/utils/lbann_library.cpp index 7f6a73fd496..78c864b9e40 100644 --- a/src/utils/lbann_library.cpp +++ b/src/utils/lbann_library.cpp @@ -79,9 +79,9 @@ model *build_model_from_prototext(int argc, char **argv, set_num_parallel_readers(comm, pb); // Check to see if the model wants to reduce the I/O parallelism - if(pb_model->serialize_background_io() && io_thread_pool->get_num_threads() != 1) { + if(pb_model->serialize_io() && io_thread_pool->get_num_threads() != 1) { if(master) { - std::cout << "Model " << pb_model->name() << " serialized the background I/O threads" << std::endl; + std::cout << "Model " << pb_model->name() << " serialized the I/O threads" << std::endl; } io_thread_pool->relaunch_pinned_threads(1); } From c90cf446914fb1decc70729034ca5a894f176944 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 28 Feb 2019 15:52:59 -0800 Subject: [PATCH 138/443] Fixed accidental commit --- .../jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext b/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext index c13b35fedd1..81467ae6970 100644 --- a/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/data_reader_jag_conduit_lustre.prototext @@ -16,7 +16,7 @@ data_reader { shuffle: true # change to a lustre path data_filedir: "/p/lustre2/brainusr/datasets/10MJAG/1M_A/" - index_list: "index_eight.txt" + index_list: "index.txt" index_list_per_trainer: false index_list_per_model: false @@ -66,7 +66,7 @@ data_reader { shuffle: true # change to a lustre path data_filedir: "/p/lustre2/brainusr/datasets/10MJAG/1M_B/" - index_list: "index_eight.txt" + index_list: "index.txt" index_list_per_trainer: false index_list_per_model: false From 6759a9bc21639d5fb9c5b09e255160cec41c9bd1 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 28 Feb 2019 17:10:30 -0800 Subject: [PATCH 139/443] remove convert; add emplace --- include/lbann/utils/type_erased_matrix.hpp | 23 +++++++------------ .../unit_test/type_erased_matrix_test.cpp | 8 +++---- 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/include/lbann/utils/type_erased_matrix.hpp b/include/lbann/utils/type_erased_matrix.hpp index 615b95ac217..4812287a369 100644 --- a/include/lbann/utils/type_erased_matrix.hpp +++ b/include/lbann/utils/type_erased_matrix.hpp @@ -90,26 +90,19 @@ class type_erased_matrix return any_cast const&>(m_matrix); } - /** @brief Access the underlying matrix. - * - * Converts (copies) the internal matrix into a matrix of a new - * type, which is then held in place of the original matrix. + /** @brief Replace the held matrix with a new one constructed + * in-place from the arguments. * - * @tparam OldField The data type of the originally held matrix - * @tparam NewField The data type of the newly held matrix + * @tparam Field The data type of the newly held matrix * - * @return @c const reference to the underlying matrix + * @param args The arguments with which to construct the new matrix. * - * @throws bad_any_cast If the datatype of the held matrix does not - * match the input @c OldField. + * @return Reference to the new underlying matrix */ - template - El::Matrix& convert() + template + El::Matrix& emplace(Args&&... args) { - El::Matrix new_mat; - El::Copy(this->template get(), new_mat); - m_matrix.template emplace>(std::move(new_mat)); - return this->template get(); + return m_matrix.emplace>(std::forward(args)...); } private: diff --git a/src/utils/unit_test/type_erased_matrix_test.cpp b/src/utils/unit_test/type_erased_matrix_test.cpp index 9a77d217686..37fc7e1d1ce 100644 --- a/src/utils/unit_test/type_erased_matrix_test.cpp +++ b/src/utils/unit_test/type_erased_matrix_test.cpp @@ -52,9 +52,9 @@ TEMPLATE_PRODUCT_TEST_CASE( REQUIRE(internal_mat.Height() == 10); REQUIRE(internal_mat.Width() == 12); } - AND_WHEN ("The matrix is converted") + AND_WHEN ("The matrix is changed") { - REQUIRE_NOTHROW(mat->template convert()); + REQUIRE_NOTHROW(mat->template emplace(14,10)); THEN ("The internal matrix has the right type and size") { @@ -62,8 +62,8 @@ TEMPLATE_PRODUCT_TEST_CASE( REQUIRE_THROWS_AS(mat->template get(), lbann::utils::bad_any_cast); - REQUIRE(mat->template get().Height() == 10); - REQUIRE(mat->template get().Width() == 12); + REQUIRE(mat->template get().Height() == 14); + REQUIRE(mat->template get().Width() == 10); } } } From 4a5d381a059ab3dfa4f76a4031a6f395c04037bc Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 28 Feb 2019 17:24:20 -0800 Subject: [PATCH 140/443] Check conv layer parameters during setup. No checking in constructor. --- .../layers/learning/base_convolution.hpp | 131 ++++++++++++------ include/lbann/layers/learning/convolution.hpp | 35 +---- .../lbann/layers/learning/deconvolution.hpp | 38 ++--- 3 files changed, 107 insertions(+), 97 deletions(-) diff --git a/include/lbann/layers/learning/base_convolution.hpp b/include/lbann/layers/learning/base_convolution.hpp index f445dcd778a..04d31342212 100644 --- a/include/lbann/layers/learning/base_convolution.hpp +++ b/include/lbann/layers/learning/base_convolution.hpp @@ -63,7 +63,7 @@ class base_convolution_layer : public Layer { * convolution. The default convolution operation has one group, and a * depthwise convolution has as many groups as there are input channels. */ - int m_num_groups; + int m_groups; /** Scaling factor for bias term. * If the scaling factor is zero, bias is not applied. @@ -106,50 +106,19 @@ class base_convolution_layer : public Layer { int groups, bool has_bias) : Layer(comm), - m_output_channels(std::max(output_channels, 1)), + m_output_channels(output_channels), m_conv_dims(std::move(conv_dims)), m_pads(std::move(pads)), m_strides(std::move(strides)), m_dilations(std::move(dilations)), - m_num_groups(std::max(groups, 1)), + m_groups(groups), m_bias_scaling_factor(has_bias ? 1 : 0), m_kernel_gradient(this->get_comm()->get_trainer_grid()), m_bias_gradient(this->get_comm()->get_trainer_grid()) #ifdef LBANN_HAS_CUDNN , m_tensors_cudnn_desc(this) #endif // LBANN_HAS_CUDNN - { - std::ostringstream err; - - // Make sure that configuration is supported - if (Device == El::Device::CPU - && std::any_of(m_dilations.begin(), m_dilations.end(), - [](El::Int d) { return d != 1; })) { - err << "layer \"" << this->get_name() << "\" " - << "has non-unit dilation, which is not yet supported on CPU"; - LBANN_ERROR(err.str()); - } - if (Device == El::Device::CPU && m_num_groups != 1) { - err << "layer \"" << this->get_name() << "\" " - << "has " << m_num_groups << ", " - << "but only unit groups are currently supported on CPU"; - LBANN_ERROR(err.str()); - } - - // Check dimensions of convolution parameters - if (m_pads.size() != m_conv_dims.size() - || m_strides.size() != m_conv_dims.size() - || m_dilations.size() != m_conv_dims.size()) { - err << " layer \"" << this->get_name() << "\" " - << "has an invalid number of convolution parameters " - << "(conv_dims has " << m_conv_dims.size() << " entries, " - << "pads has " << m_pads.size() << ", " - << "strides has " << m_strides.size() << ", " - << "dilations has " << m_dilations.size() << ")"; - LBANN_ERROR(err.str()); - } - - } + {} base_convolution_layer(const base_convolution_layer& other) : Layer(other), @@ -158,7 +127,7 @@ class base_convolution_layer : public Layer { m_pads(other.m_pads), m_strides(other.m_strides), m_dilations(other.m_dilations), - m_num_groups(other.m_num_groups), + m_groups(other.m_groups), m_bias_scaling_factor(other.m_bias_scaling_factor), m_kernel_gradient(other.m_kernel_gradient), m_bias_gradient(other.m_bias_gradient) @@ -184,7 +153,7 @@ class base_convolution_layer : public Layer { m_pads = other.m_pads; m_strides = other.m_strides; m_dilations = other.m_dilations; - m_num_groups = other.m_num_groups; + m_groups = other.m_groups; m_bias_scaling_factor = other.m_bias_scaling_factor; m_kernel_gradient = other.m_kernel_gradient; m_bias_gradient = other.m_bias_gradient; @@ -255,7 +224,7 @@ class base_convolution_layer : public Layer { desc.add("Dilations", ss.str()); // Groups - desc.add("Groups", m_num_groups); + desc.add("Groups", m_groups); // Bias ss.str(std::string{}); @@ -269,6 +238,90 @@ class base_convolution_layer : public Layer { } + void setup_dims() override { + Layer::setup_dims(); + std::ostringstream err; + + // Check number of channels and channel groups + const auto& input_dims = get_input_dims(); + if (m_output_channels < 1) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has an invalid number of output channels " + << "(" << m_output_channels << ")"; + LBANN_ERROR(err.str()); + } else if (m_groups < 1) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has an invalid number of groups (" << m_groups << ")"; + LBANN_ERROR(err.str()); + } else if (input_dims[0] % m_groups != 0 + || m_output_channels % m_groups != 0) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has " << m_groups << " groups, which does not divide " + << "the input channels (" << input_dims[0] << ") or " + << "the output channels (" << m_output_channels << ")"; + LBANN_ERROR(err.str()); + } + + // Check kernel dims, pads, stride, dilations + const auto& num_spatial_dims = input_dims.size() - 1; + if (m_conv_dims.size() != num_spatial_dims + || std::any_of(m_conv_dims.begin(), m_conv_dims.end(), + [](El::Int d) { return d < 1; })) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has invalid spatial dimensions for convolution kernel ("; + if (m_conv_dims.empty()) { err << "no dimensions"; } + for (size_t i = 0; i < m_conv_dims.size(); ++i) { + err << (i > 0 ? "x" : "") << m_conv_dims[i]; + } + err << ", expected " << num_spatial_dims << " spatial dimensions)"; + LBANN_ERROR(err.str()); + } else if (m_pads.size() != num_spatial_dims) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has invalid convolution pads (("; + for (size_t i = 0; i < m_pads.size(); ++i) { + err << (i > 0 ? "," : "") << m_pads[i]; + } + err << "), expected " << num_spatial_dims << " spatial dimensions)"; + LBANN_ERROR(err.str()); + } else if (m_strides.size() != num_spatial_dims + || std::any_of(m_strides.begin(), m_strides.end(), + [](El::Int d) { return d < 1; })) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has invalid convolution strides (("; + for (size_t i = 0; i < m_strides.size(); ++i) { + err << (i > 0 ? "," : "") << m_strides[i]; + } + err << "), expected " << num_spatial_dims << " spatial dimensions)"; + LBANN_ERROR(err.str()); + } else if (m_dilations.size() != num_spatial_dims + || std::any_of(m_dilations.begin(), m_dilations.end(), + [](El::Int d) { return d < 1; })) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has invalid convolution dilations (("; + for (size_t i = 0; i < m_dilations.size(); ++i) { + err << (i > 0 ? "," : "") << m_dilations[i]; + } + err << "), expected " << num_spatial_dims << " spatial dimensions)"; + LBANN_ERROR(err.str()); + } + + // Make sure that configuration is supported + if (Device == El::Device::CPU + && std::any_of(m_dilations.begin(), m_dilations.end(), + [](El::Int d) { return d != 1; })) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has non-unit dilation, which is not yet supported on CPU"; + LBANN_ERROR(err.str()); + } + if (Device == El::Device::CPU && m_groups != 1) { + err << get_type() << " layer \"" << get_name() << "\" " + << "has " << m_groups << " groups, " + << "but only one group is currently supported on CPU"; + LBANN_ERROR(err.str()); + } + + } + /** Setup layer data. * The kernel weights are setup in the convolution and * deconvolution classes. */ @@ -388,7 +441,7 @@ class base_convolution_layer : public Layer { CUDNN_CROSS_CORRELATION, cudnn::get_data_type())); CHECK_CUDNN(cudnnSetConvolutionGroupCount(m_convolution_cudnn_desc, - m_num_groups)); + m_groups)); // Set bias tensor descriptor std::vector bias_dims(output_dims.size() + 1, 1); diff --git a/include/lbann/layers/learning/convolution.hpp b/include/lbann/layers/learning/convolution.hpp index 76294d6332e..adc8bd02676 100644 --- a/include/lbann/layers/learning/convolution.hpp +++ b/include/lbann/layers/learning/convolution.hpp @@ -78,10 +78,10 @@ class convolution_layer : public base_convolution_layer { comm, num_data_dims, num_output_channels, - conv_dims, - pads, - strides, - dilations, + std::move(conv_dims), + std::move(pads), + std::move(strides), + std::move(dilations), groups, has_bias) { static_assert(Layout == data_layout::DATA_PARALLEL, @@ -97,36 +97,17 @@ class convolution_layer : public base_convolution_layer { El::Device get_device_allocation() const override { return Device; } +protected: + void setup_dims() override { base_convolution_layer::setup_dims(); - std::stringstream err; // Get tensor dimensions const auto& input_dims = this->get_input_dims(); auto output_dims = input_dims; - const auto input_channels = input_dims[0]; - const auto output_channels = this->m_output_channels; - - // Check that number of groups is valid - if (this->m_num_groups < 1) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" " - << "has " << this->m_num_groups << " groups"; - LBANN_ERROR(err.str()); - } else if (input_channels % this->m_num_groups != 0 - || output_channels % this->m_num_groups != 0) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" has " - << input_channels << " input channels, " - << output_channels << " output channels, and " - << this->m_num_groups << " groups " - << "(groups must evenly divide " - << "the input channels and output channels)"; - LBANN_ERROR(err.str()); - } // Initialize output tensor dimensions - output_dims[0] = output_channels; + output_dims[0] = this->m_output_channels; for (size_t i = 0; i < output_dims.size() - 1; ++i) { const auto& input_dim = input_dims[i+1]; const auto& kernel_dim = this->m_conv_dims[i]; @@ -142,8 +123,6 @@ class convolution_layer : public base_convolution_layer { } -protected: - std::vector get_kernel_dims() const { std::vector dims; dims.push_back(this->m_output_channels); diff --git a/include/lbann/layers/learning/deconvolution.hpp b/include/lbann/layers/learning/deconvolution.hpp index e7bf3ffbdab..922353cdb31 100644 --- a/include/lbann/layers/learning/deconvolution.hpp +++ b/include/lbann/layers/learning/deconvolution.hpp @@ -35,9 +35,7 @@ namespace lbann { // Forward declaration. class lbann_callback_imcomm; -/** @brief Transpose of the convolution layer. - * @todo Rename to "transposed_convolution_layer". - */ +/** @brief Transpose of the convolution layer. */ template class deconvolution_layer : public base_convolution_layer { private: @@ -78,10 +76,10 @@ class deconvolution_layer : public base_convolution_layer { comm, num_data_dims, num_output_channels, - conv_dims, - pads, - strides, - dilations, + std::move(conv_dims), + std::move(pads), + std::move(strides), + std::move(dilations), groups, has_bias) { static_assert(Layout == data_layout::DATA_PARALLEL, @@ -104,8 +102,6 @@ class deconvolution_layer : public base_convolution_layer { // Get tensor dimensions const auto& input_dims = this->get_input_dims(); auto output_dims = input_dims; - const auto input_channels = input_dims[0]; - const auto output_channels = this->m_output_channels; // Check for unsupported features /// @todo Implement dilated and grouped deconvolution @@ -121,35 +117,17 @@ class deconvolution_layer : public base_convolution_layer { err << ")"; LBANN_ERROR(err.str()); } - if (this->m_num_groups != 1) { + if (this->m_groups != 1) { err << this->get_type() << " layer " << "\"" << this->get_name() << "\" " << "has non-unit groups " - << "(" << this->m_num_groups << ")"; - LBANN_ERROR(err.str()); - } - - // Check that number of groups is valid - if (this->m_num_groups < 1) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" " - << "has " << this->m_num_groups << " groups"; - LBANN_ERROR(err.str()); - } else if (input_channels % this->m_num_groups != 0 - || output_channels % this->m_num_groups != 0) { - err << this->get_type() << " layer " - << "\"" << this->get_name() << "\" has " - << input_channels << " input channels, " - << output_channels << " output channels, and " - << this->m_num_groups << " groups " - << "(groups must evenly divide " - << "the input channels and output channels)"; + << "(" << this->m_groups << ")"; LBANN_ERROR(err.str()); } // Initialize output tensor dimensions /// @todo Dilated deconvolution - output_dims[0] = output_channels; + output_dims[0] = this->m_output_channels; for (size_t i = 0; i < output_dims.size() - 1; ++i) { const auto& input_dim = input_dims[i+1]; const auto& kernel_dim = this->m_conv_dims[i]; From bc3a676b87a8dcbc38f9a6f30ee3265f17e419cc Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Thu, 28 Feb 2019 22:11:54 -0800 Subject: [PATCH 141/443] Fixed a bug in how the open file descriptors were being removed from the deque-based head (i.e. accidently, not farthest from current time first). Fixed a bug where the number of open files in the loading stage were not managed and could lead up to file descriptor contention. Refactored the code to perform all heap management in a single function. Added try-catch blocks around hdf5 file access with a retry to handle transient file system errors. --- .../lbann/data_readers/sample_list_jag.hpp | 224 +++++++----------- .../data_readers/sample_list_jag_impl.hpp | 33 ++- src/data_readers/data_reader_jag_conduit.cpp | 7 +- 3 files changed, 126 insertions(+), 138 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index a2330ca0dea..aecfa726136 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -21,7 +21,9 @@ #include #include "conduit/conduit_relay_io_hdf5.hpp" -#define LBANN_MAX_OPEN_DATA_FILES 768 +/// Number of system and other files that may be open during execution +#define LBANN_MAX_OPEN_FILE_MARGIN 128 +#define LBANN_MAX_OPEN_FILE_RETRY 3 namespace lbann { @@ -128,35 +130,22 @@ class sample_list_jag { std::get<0>(m_file_id_stats_map[id]) = filename; } - void set_samples_hdf5_handle(sample_file_id_t id, hid_t h) { - auto&& e = m_file_id_stats_map[id]; - std::get<1>(e) = h; - // std::cout << "Attempt to set the hdf5 handle " << h << " for filename " << std::get<0>(e) << std::endl; - - // std::cout << "set_files_hdf5_handle existing list for " << id << " {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; - // for (auto&& v : std::get<2>(e)) { - // std::cout << "{" << v.first << "," << v.second << "}, "; - // } - // std::cout << std::endl; - - // if(!m_open_fd_pq.empty()) { - // // std::cout << "set_files_hdf5_handle Priotirty QUeue "; - // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // // auto& q = m_open_fd_pq.front(); - // // std::cout << q.first << " {" << q.second.first << "," << q.second.second << "}, "; - // // std::cout << std::endl; - // } - - if(!m_open_fd_pq.empty()) { - /// Before we can enqueue the any new access times for this descriptor, remove any - /// earlier descriptor - std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - if(m_open_fd_pq.front().first == id) { - //LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); - m_open_fd_pq.pop_front(); + void set_files_hdf5_handle(const std::string& filename, hid_t h) { + sample_file_id_t id = 0; + for (auto&& e : m_file_id_stats_map) { + if(std::get<0>(e) == filename) { + std::get<1>(e) = h; + break; } + id++; + } + manage_open_hdf5_handles(id, true); + } - if(m_open_fd_pq.size() > LBANN_MAX_OPEN_DATA_FILES) { + void manage_open_hdf5_handles(sample_file_id_t id, bool pre_open_fd = false) { + /// When we enter this function the priority queue is either empty or a heap + if(!m_open_fd_pq.empty()) { + if(m_open_fd_pq.size() > m_max_open_files) { // std::cout << "PQ is too big the queue looks like "; // for(auto&& p: m_open_fd_pq) { // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; @@ -166,12 +155,16 @@ class sample_list_jag { // { auto& f = m_open_fd_pq.front(); auto& victim = m_file_id_stats_map[f.first]; - // std::cout << "{" << f.second.first << ", " << f.second.second << "}" << std::endl; - // // std::cout << q.top() << " "; - // } - m_open_fd_pq.pop_front(); - conduit::relay::io::hdf5_close_file(std::get<1>(victim)); - std::get<1>(victim) = 0; + hid_t victim_fd = std::get<1>(victim); + // std::cout << "Removing [" << f.first << ", {" << f.second.first << ", " << f.second.second << "}]" << std::endl; + std::pop_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + m_open_fd_pq.pop_back(); + if(victim_fd > 0) { + conduit::relay::io::hdf5_close_file(victim_fd); + std::get<1>(victim) = 0; + // }else { + // std::cout << "Closing id " << id << " {" << f.second.first << ", " << f.second.second << "}" << " but the hid = " << victim_fd << std::endl; + } // std::cout << '\n'; // std::cout << "Now the queue looks like "; // for(auto&& p: m_open_fd_pq) { @@ -180,61 +173,65 @@ class sample_list_jag { // std::cout << std::endl; } - std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); } - auto& file_access_queue = std::get<2>(e); - if(!file_access_queue.empty()) { - file_access_queue.pop_front(); - if(!file_access_queue.empty()) { - m_open_fd_pq.emplace_back(std::make_pair(id,file_access_queue.front())); - std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // std::cout << "set_files_hdf5_handle New priotirty queue top "; - // auto& q = m_open_fd_pq.front(); - // for(auto&& q: m_open_fd_pq) { - // std::cout << q.first << " {" << q.second.first << "," << q.second.second << "}, "; - // } - // std::cout << std::endl; - } - // std::cout << "set_files_hdf5_handle updated list for " << id << " {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; - // for (auto&& v : std::get<2>(e)) { - // std::cout << "{" << v.first << "," << v.second << "}, "; - // } - // std::cout << std::endl; + /// Before we can enqueue the any new access times for this descriptor, remove any + /// earlier descriptor + std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + if(m_open_fd_pq.front().first == id) { + // LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); + m_open_fd_pq.pop_front(); } + std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // std::get<1>(m_file_id_stats_map[id]) = h; - // std::cout << "I am setting the hdf5 handle " << h << " for filename " << filename << std::endl; - - // m_open_fd_map.emplace(std::make_tuple(filename, h, access_count)); - // for (auto&& e : m_file_id_stats_map) { - // std::cout << "set_files_hdf5_handle {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; - // if(std::get<2>(e).empty()) { - // std::cout << "empty" << std::endl; - // }else { - // for (auto&& v : std::get<2>(e)) { - // std::cout << "{" << v.first << "," << v.second << "}, "; - // } - // std::cout << std::endl; - // } + // std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty() && m_open_fd_pq.front().first != id) { + // LBANN_WARNING("We have weirdness here, the head of the queue is not " + std::to_string(id)); // } - // for (auto&& e : m_file_id_stats_map) - // std::cout << "{" << std::get<0)>(e) << ", " << std::get<1>(e) << ", " << std::get<2>(e) << "}" << std::endl; - } + auto& e = m_file_id_stats_map[id]; - void set_files_hdf5_handle(const std::string& filename, hid_t h) { - sample_file_id_t id = 0; - for (auto&& e : m_file_id_stats_map) { - if(std::get<0>(e) == filename) { - break; + // std::cout << "manage_open_files_hdf5_handle updated list {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; + // for (auto&& v : std::get<2>(e)) { + // std::cout << "{" << v.first << "," << v.second << "}, "; + // } + // std::cout << std::endl; + + // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty()) { + // std::cout << "manage_open_files_hdf5_handle priority queue :"; + // auto& p = m_open_fd_pq.front(); + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // std::cout << std::endl; + // } + + auto& file_access_queue = std::get<2>(e); + if(!file_access_queue.empty()) { + if(!pre_open_fd) { + file_access_queue.pop_front(); } - id++; } - set_samples_hdf5_handle(id, h); + if(!file_access_queue.empty()) { + m_open_fd_pq.emplace_back(std::make_pair(id,file_access_queue.front())); + }else { + /// If there are no future access of the file place a terminator entry to track + /// the open file, but is always sorted to the top of the heap + m_open_fd_pq.emplace_back(std::make_pair(id,std::make_pair(INT_MAX,id))); + } + std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); + // if(!m_open_fd_pq.empty()) { + // std::cout << "manage_open_files_hdf5_handle new priority queue after inserting eleement :"; + // // auto& p = m_open_fd_pq.front(); + // for(auto&& p: m_open_fd_pq) { + // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; + // } + // std::cout << std::endl; + // } + return; } - hid_t open_samples_hdf5_handle(const size_t i) { + hid_t open_samples_hdf5_handle(const size_t i, bool pre_open_fd = false) { const sample_t& s = m_sample_list[i]; sample_file_id_t id = s.first; hid_t h = get_samples_hdf5_handle(id); @@ -244,63 +241,25 @@ class sample_list_jag { if (file_name.empty() || !check_if_file_exists(conduit_file_path)) { LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' does not exist."); } - h = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + bool retry = false; + int retry_cnt = 0; + do { + try { + h = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + }catch (conduit::Error const& e) { + LBANN_WARNING(" :: trying to open the file " + conduit_file_path + " and got " + e.what()); + retry = true; + retry_cnt++; + } + }while(retry && retry_cnt < 3); + if (h <= static_cast(0)) { LBANN_ERROR(std::string{} + " :: data file '" + conduit_file_path + "' could not be opened."); } - set_samples_hdf5_handle(id, h); - }else { - - if(!m_open_fd_pq.empty()) { - /// Before we can enqueue the any new access times for this descriptor, remove any - /// earlier descriptor - std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - if(m_open_fd_pq.front().first == id) { - // LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); - m_open_fd_pq.pop_front(); - } - std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - } - - // std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty() && m_open_fd_pq.front().first != id) { - // LBANN_WARNING("We have weirdness here, the head of the queue is not " + std::to_string(id)); - // } - auto& e = m_file_id_stats_map[id]; - - // std::cout << "open_files_hdf5_handle updated list {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; - // for (auto&& v : std::get<2>(e)) { - // std::cout << "{" << v.first << "," << v.second << "}, "; - // } - // std::cout << std::endl; - - // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty()) { - // // std::cout << "open_files_hdf5_handle priority queue :"; - // auto& p = m_open_fd_pq.front(); - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // std::cout << std::endl; - // } - - auto& file_access_queue = std::get<2>(e); - if(!file_access_queue.empty()) { - file_access_queue.pop_front(); - if(!file_access_queue.empty()) { - m_open_fd_pq.emplace_back(std::make_pair(id,file_access_queue.front())); - std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty()) { - // std::cout << "open_files_hdf5_handle new priority queue :"; - // // auto& p = m_open_fd_pq.front(); - // for(auto&& p: m_open_fd_pq) { - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // } - // std::cout << std::endl; - // } - } - } + std::get<1>(e) = h; } - + manage_open_hdf5_handles(id, pre_open_fd); return h; } @@ -312,8 +271,8 @@ class sample_list_jag { auto& e = m_file_id_stats_map[id]; auto& file_access_queue = std::get<2>(e); if(file_access_queue.empty()) { - conduit::relay::io::hdf5_close_file(std::get<1>(e)); - std::get<1>(e) = 0; + conduit::relay::io::hdf5_close_file(std::get<1>(e)); + std::get<1>(e) = 0; } } } @@ -374,6 +333,7 @@ class sample_list_jag { // std::priority_queue, std::function> m_open_fd_pq; std::deque m_open_fd_pq; + size_t m_max_open_files; }; void handle_mpi_error(int ierr); diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index 20d0859cfed..57dbc69eb63 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -19,6 +19,7 @@ #include #include +#include namespace lbann { @@ -46,7 +47,9 @@ inline const std::string& sample_list_header::get_file_dir() const { return m_file_dir; } -inline sample_list_jag::sample_list_jag() {} +inline sample_list_jag::sample_list_jag() { + m_max_open_files = getdtablesize() - LBANN_MAX_OPEN_FILE_MARGIN; +} inline sample_list_jag::~sample_list_jag() { // Close the existing open files @@ -146,7 +149,18 @@ inline sample_list_header sample_list_jag::read_header(std::istream& istrm, cons } inline hid_t sample_list_jag::get_conduit_bundle_samples(std::string conduit_file_path, std::vector& sample_names, size_t included_samples, size_t excluded_samples) { - hid_t hdf5_file_hnd = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + hid_t hdf5_file_hnd = 0; + bool retry = false; + int retry_cnt = 0; + do { + try { + hdf5_file_hnd = conduit::relay::io::hdf5_open_file_for_read( conduit_file_path ); + }catch (conduit::Error const& e) { + LBANN_WARNING(" :: trying to open the file " + conduit_file_path + " and got " + e.what()); + retry = true; + retry_cnt++; + } + }while(retry && retry_cnt < LBANN_MAX_OPEN_FILE_RETRY); if (hdf5_file_hnd <= static_cast(0)) { std::cout << "Opening the file didn't work" << std::endl; @@ -451,8 +465,10 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { // Close the existing open files for(auto&& e : m_file_id_stats_map) { - conduit::relay::io::hdf5_close_file(std::get<1>(e)); - std::get<1>(e) = 0; + if(std::get<1>(e) > 0) { + conduit::relay::io::hdf5_close_file(std::get<1>(e)); + std::get<1>(e) = 0; + } std::get<2>(e).clear(); } m_open_fd_pq.clear(); @@ -500,7 +516,10 @@ inline void sample_list_jag::all_gather_packed_lists(lbann_comm& comm) { inline void sample_list_jag::compute_epochs_file_usage(const std::vector& shuffled_indices, int mini_batch_size, const lbann_comm& comm) { for (auto&& e : m_file_id_stats_map) { - std::get<1>(e) = 0; + if(std::get<1>(e) > 0) { + conduit::relay::io::hdf5_close_file(std::get<1>(e)); + std::get<1>(e) = 0; + } std::get<2>(e).clear(); } @@ -515,6 +534,10 @@ inline void sample_list_jag::compute_epochs_file_usage(const std::vector& s int substep = (i % mini_batch_size) / comm.get_procs_per_trainer(); std::get<2>(m_file_id_stats_map[index]).emplace_back(std::make_pair(step, substep)); } + // hid_t hdf5_file_hnd = get_samples_hdf5_handle(index); + // if(hdf5_file_hnd <= static_cast(0)) { + // open_samples_hdf5_handle(idx, true); + // } } } diff --git a/src/data_readers/data_reader_jag_conduit.cpp b/src/data_readers/data_reader_jag_conduit.cpp index e9381cb847c..e4dcfd1fcc9 100644 --- a/src/data_readers/data_reader_jag_conduit.cpp +++ b/src/data_readers/data_reader_jag_conduit.cpp @@ -758,9 +758,12 @@ void data_reader_jag_conduit::load() { /// Check the data that each rank loaded if (!m_is_data_loaded) { - std::cout << "Checking local data" << std::endl; m_is_data_loaded = true; + /// Open the first sample to make sure that all of the fields are correct + size_t data_id = (m_sample_list[0]).first; + m_sample_list.open_samples_hdf5_handle(data_id, true); + if (m_scalar_keys.size() == 0u) { set_all_scalar_choices(); // use all by default if none is specified } @@ -772,6 +775,8 @@ void data_reader_jag_conduit::load() { check_input_keys(); check_image_data(); + + m_sample_list.close_if_done_samples_hdf5_handle(data_id); } /// Merge all of the sample lists From def53974eb8cd74a33ebb33e105e282be3c07031 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Fri, 1 Mar 2019 07:21:32 -0800 Subject: [PATCH 142/443] Removed debugging code. --- .../lbann/data_readers/sample_list_jag.hpp | 55 +------------------ .../data_readers/sample_list_jag_impl.hpp | 6 -- 2 files changed, 1 insertion(+), 60 deletions(-) diff --git a/include/lbann/data_readers/sample_list_jag.hpp b/include/lbann/data_readers/sample_list_jag.hpp index aecfa726136..49695c835ba 100644 --- a/include/lbann/data_readers/sample_list_jag.hpp +++ b/include/lbann/data_readers/sample_list_jag.hpp @@ -146,66 +146,27 @@ class sample_list_jag { /// When we enter this function the priority queue is either empty or a heap if(!m_open_fd_pq.empty()) { if(m_open_fd_pq.size() > m_max_open_files) { - // std::cout << "PQ is too big the queue looks like "; - // for(auto&& p: m_open_fd_pq) { - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // } - // std::cout << std::endl; - // std::cout << "The file descriptors are over the limit, lets close " << m_open_fd_pq.front().first << std::endl; - // { auto& f = m_open_fd_pq.front(); auto& victim = m_file_id_stats_map[f.first]; hid_t victim_fd = std::get<1>(victim); - // std::cout << "Removing [" << f.first << ", {" << f.second.first << ", " << f.second.second << "}]" << std::endl; std::pop_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); m_open_fd_pq.pop_back(); if(victim_fd > 0) { conduit::relay::io::hdf5_close_file(victim_fd); std::get<1>(victim) = 0; - // }else { - // std::cout << "Closing id " << id << " {" << f.second.first << ", " << f.second.second << "}" << " but the hid = " << victim_fd << std::endl; } - // std::cout << '\n'; - // std::cout << "Now the queue looks like "; - // for(auto&& p: m_open_fd_pq) { - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // } - // std::cout << std::endl; } - - // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); } /// Before we can enqueue the any new access times for this descriptor, remove any /// earlier descriptor std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); if(m_open_fd_pq.front().first == id) { - // LBANN_ERROR("We have weirdness here, the head of the queue is not " + std::to_string(id)); m_open_fd_pq.pop_front(); } std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // std::sort_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty() && m_open_fd_pq.front().first != id) { - // LBANN_WARNING("We have weirdness here, the head of the queue is not " + std::to_string(id)); - // } - auto& e = m_file_id_stats_map[id]; - - // std::cout << "manage_open_files_hdf5_handle updated list {" << std::get<0>(e) << ", " << std::get<1>(e) << ": "; - // for (auto&& v : std::get<2>(e)) { - // std::cout << "{" << v.first << "," << v.second << "}, "; - // } - // std::cout << std::endl; - - // std::make_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty()) { - // std::cout << "manage_open_files_hdf5_handle priority queue :"; - // auto& p = m_open_fd_pq.front(); - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // std::cout << std::endl; - // } - auto& file_access_queue = std::get<2>(e); if(!file_access_queue.empty()) { if(!pre_open_fd) { @@ -220,14 +181,6 @@ class sample_list_jag { m_open_fd_pq.emplace_back(std::make_pair(id,std::make_pair(INT_MAX,id))); } std::push_heap(m_open_fd_pq.begin(), m_open_fd_pq.end(), pq_cmp); - // if(!m_open_fd_pq.empty()) { - // std::cout << "manage_open_files_hdf5_handle new priority queue after inserting eleement :"; - // // auto& p = m_open_fd_pq.front(); - // for(auto&& p: m_open_fd_pq) { - // std::cout << "[" << p.first << ", " << "{" << p.second.first << "," << p.second.second << "}], "; - // } - // std::cout << std::endl; - // } return; } @@ -324,13 +277,7 @@ class sample_list_jag { /// Track the number of samples per file std::unordered_map m_file_map; - /// Track the number of open file descriptors and how many times - /// each file descriptor will be used - // std::unordered_map m_open_fd_map; - // std::set> m_open_fd_map; - // Using lambda to compare elements. - // auto cmp = [](std::pair left,std::pair right) { return (left.second) > (right.second);}; - // std::priority_queue, std::function> m_open_fd_pq; + /// Track the number of open file descriptors and when they will be used next std::deque m_open_fd_pq; size_t m_max_open_files; diff --git a/include/lbann/data_readers/sample_list_jag_impl.hpp b/include/lbann/data_readers/sample_list_jag_impl.hpp index 57dbc69eb63..6a81d216008 100644 --- a/include/lbann/data_readers/sample_list_jag_impl.hpp +++ b/include/lbann/data_readers/sample_list_jag_impl.hpp @@ -127,10 +127,8 @@ inline sample_list_header sample_list_jag::read_header(std::istream& istrm, cons size_t found = sample_list_type.find(type_exclusive); if (found != std::string::npos) { - std::cout << "Exclusive (" + sample_list_type + ") sample list" << std::endl; hdr.m_is_exclusive = true; } else { - std::cout << "Inclusive (" + sample_list_type + ") sample list" << std::endl; hdr.m_is_exclusive = false; } @@ -534,10 +532,6 @@ inline void sample_list_jag::compute_epochs_file_usage(const std::vector& s int substep = (i % mini_batch_size) / comm.get_procs_per_trainer(); std::get<2>(m_file_id_stats_map[index]).emplace_back(std::make_pair(step, substep)); } - // hid_t hdf5_file_hnd = get_samples_hdf5_handle(index); - // if(hdf5_file_hnd <= static_cast(0)) { - // open_samples_hdf5_handle(idx, true); - // } } } From 5fc54ff894fb76871a4cae50ab63c882a92c7053 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Fri, 1 Mar 2019 08:46:08 -0800 Subject: [PATCH 143/443] jag metadata prototext with repeated scalar values commented out --- .../ae_cycle_gan/jag_100M_metadata.prototext | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/model_zoo/models/jag/ae_cycle_gan/jag_100M_metadata.prototext b/model_zoo/models/jag/ae_cycle_gan/jag_100M_metadata.prototext index 371e812d976..1643b6db51a 100644 --- a/model_zoo/models/jag/ae_cycle_gan/jag_100M_metadata.prototext +++ b/model_zoo/models/jag/ae_cycle_gan/jag_100M_metadata.prototext @@ -32,20 +32,20 @@ data_set_metadata { "tMAXt", # absent in Jim's list "BWn", "MAXpressure", - "BAte", - "MAXtion", + #"BAte", + #"MAXtion", "tMAXpressure", "BAt", # absent in Jim's list "Yn", "Ye", "Yx", - "tMAXte", # absent in Jim's list - "BAtion", - "MAXte", - "tMAXtion", # absent in Jim's list + #"tMAXte", # absent in Jim's list + #"BAtion", + #"MAXte", + #"tMAXtion", # absent in Jim's list "BTx", "MAXt", # absent in Jim's list - "BTn", + #"BTn", "BApressure", "tMINradius", "MINradius" # absent in Jim's list @@ -78,20 +78,20 @@ data_set_metadata { { scale: 1.490713e+00 bias: -3.495498e+00 }, #tMAXt { scale: 4.375123e+01 bias: -1.593477e+00 }, #BWn { scale: 1.685576e-06 bias: -5.330971e-01 }, #MAXpressure - { scale: 2.636422e-01 bias: -9.762907e-01 }, #BAte - { scale: 2.419509e-01 bias: -9.853402e-01 }, #MAXtion + #{ scale: 2.636422e-01 bias: -9.762907e-01 }, #BAte + #{ scale: 2.419509e-01 bias: -9.853402e-01 }, #MAXtion { scale: 1.430615e+00 bias: -3.351173e+00 }, #tMAXpressure { scale: 2.636422e-01 bias: -9.762907e-01 }, #BAt { scale: 7.154074e-18 bias: -1.864709e-02 }, #Yn { scale: 3.166824e-03 bias: -1.864709e-02 }, #Ye { scale: 2.102178e-02 bias: -3.071955e-01 }, #Yx - { scale: 1.490713e+00 bias: -3.495498e+00 }, #tMAXte - { scale: 2.636422e-01 bias: -9.762907e-01 }, #BAtion - { scale: 2.419509e-01 bias: -9.853402e-01 }, #MAXte - { scale: 1.490713e+00 bias: -3.495498e+00 }, #tMAXtion + #{ scale: 1.490713e+00 bias: -3.495498e+00 }, #tMAXte + #{ scale: 2.636422e-01 bias: -9.762907e-01 }, #BAtion + #{ scale: 2.419509e-01 bias: -9.853402e-01 }, #MAXte + #{ scale: 1.490713e+00 bias: -3.495498e+00 }, #tMAXtion { scale: 1.346439e+00 bias: -3.118446e+00 }, #BTx { scale: 2.419509e-01 bias: -9.853402e-01 }, #MAXt - { scale: 1.459875e+00 bias: -3.427656e+00 }, #BTn + #{ scale: 1.459875e+00 bias: -3.427656e+00 }, #BTn { scale: 2.061877e-06 bias: -5.213394e-01 }, #BApressure { scale: 1.392544e+00 bias: -3.239921e+00 }, #tMINradius { scale: 6.266253e-02 bias: -1.384504e+00 } #MINradius From 4a7ef5d392c891f80a53c252d3bda316fb231944 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Fri, 1 Mar 2019 13:29:49 -0800 Subject: [PATCH 144/443] Add a driver that just prints help and exits This patches around the fact that the help logic requires MPI and CUDA to be initialized. --- include/lbann/proto/proto_common.hpp | 3 +++ model_zoo/CMakeLists.txt | 3 +++ model_zoo/lbann_help.cpp | 38 ++++++++++++++++++++++++++++ src/proto/proto_common.cpp | 9 ++++--- 4 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 model_zoo/lbann_help.cpp diff --git a/include/lbann/proto/proto_common.hpp b/include/lbann/proto/proto_common.hpp index e0cffaa61bf..4c39a96ad97 100644 --- a/include/lbann/proto/proto_common.hpp +++ b/include/lbann/proto/proto_common.hpp @@ -41,6 +41,9 @@ void print_parameters(const lbann_comm& comm, lbann_data::LbannPB& p); /// prints usage information void print_help(const lbann_comm& comm); +/// prints usage information +void print_help(std::ostream& os); + /// prints prototext file, cmd line, etc to file void save_session(const lbann_comm& comm, const int argc, char * const* argv, lbann_data::LbannPB& p); diff --git a/model_zoo/CMakeLists.txt b/model_zoo/CMakeLists.txt index c9f2d0ec882..10734803520 100644 --- a/model_zoo/CMakeLists.txt +++ b/model_zoo/CMakeLists.txt @@ -3,6 +3,8 @@ add_executable( lbann-bin lbann.cpp ) target_link_libraries(lbann-bin lbann ) set_target_properties(lbann-bin PROPERTIES OUTPUT_NAME lbann) +add_executable( lbann-help lbann_help.cpp ) +target_link_libraries(lbann-help lbann ) #this can be done simler - quick copy/paste hack //d hysom add_executable( lbann-bin2 lbann2.cpp ) @@ -28,6 +30,7 @@ set_target_properties(lbann-inf-bin PROPERTIES OUTPUT_NAME lbann_inf) # Install the binaries install( TARGETS lbann-bin lbann-bin2 lbann-gan-bin lbann-cycgan-bin lbann-aecycgan-bin + lbann-help EXPORT LBANNTargets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} diff --git a/model_zoo/lbann_help.cpp b/model_zoo/lbann_help.cpp new file mode 100644 index 00000000000..ae520085f52 --- /dev/null +++ b/model_zoo/lbann_help.cpp @@ -0,0 +1,38 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +// +// lbann_proto.cpp - prototext application +//////////////////////////////////////////////////////////////////////////////// + +#include + +#include + +using namespace lbann; + +int main(int, char **) { + print_help(std::cerr); + return EXIT_SUCCESS; +} diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 7aaf5d57aee..dd0f7420c79 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -850,11 +850,14 @@ void print_parameters(const lbann_comm& comm, lbann_data::LbannPB& p) void print_help(const lbann_comm& comm) { - if (!comm.am_world_master()) { - return; + if (comm.am_world_master()) { + print_help(std::cerr); } +} - std::cerr << +void print_help(std::ostream& os) +{ + os << "General usage: you need to specify three prototext files, e.g:\n" " srun -n# proto --model= --optimizer= --reader= --metadata=\n" "\n" From f07ced54fef030fb7df902b0e9d18019e5d8022e Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 4 Mar 2019 16:15:52 -0800 Subject: [PATCH 145/443] LSTM cell in Python interface. Renaming "concatenation_axis" and "slice_axis" to "axis". --- ..._alexnet_batchnorm_dag_frozen_bn.prototext | 2 +- .../tests/model_mnist_conv_graph.prototext | 4 +- .../lbann/contrib/objective_functions.py | 2 +- scripts/proto/lbann/modules.py | 155 ++++++++++++++++-- .../proto/lbann/onnx/l2o/layers/transforms.py | 4 +- scripts/proto/lbann/onnx/o2l/layers/math.py | 2 +- scripts/proto/lbann/proto.py | 66 ++++---- src/proto/factories/layer_factory.cpp | 4 +- src/proto/lbann.proto | 6 +- 9 files changed, 190 insertions(+), 55 deletions(-) diff --git a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext index c1c959f41af..d5e6f1953cb 100644 --- a/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext +++ b/model_zoo/models/siamese/triplet/model_triplet_alexnet_batchnorm_dag_frozen_bn.prototext @@ -580,7 +580,7 @@ model { children: "conv1_head0 conv1_head1 conv1_head2" data_layout: "data_parallel" slice { - slice_axis: 0 + axis: 0 slice_points: "0 3 6 9" } } diff --git a/model_zoo/tests/model_mnist_conv_graph.prototext b/model_zoo/tests/model_mnist_conv_graph.prototext index 78a9dbafc4d..cd270d838eb 100644 --- a/model_zoo/tests/model_mnist_conv_graph.prototext +++ b/model_zoo/tests/model_mnist_conv_graph.prototext @@ -139,7 +139,7 @@ model { children: "branch3_conv1 branch3_conv2" data_layout: "data_parallel" slice { - slice_axis: 1 + axis: 1 slice_points: "0 4 6" } } @@ -184,7 +184,7 @@ model { name: "branch3_concat" data_layout: "data_parallel" concatenation { - concatenation_axis: 1 + axis: 1 } } diff --git a/scripts/proto/lbann/contrib/objective_functions.py b/scripts/proto/lbann/contrib/objective_functions.py index ec5458fddc9..37ffe9b3541 100644 --- a/scripts/proto/lbann/contrib/objective_functions.py +++ b/scripts/proto/lbann/contrib/objective_functions.py @@ -68,7 +68,7 @@ def __init__(self, weights, height, width): self.width = width def forward(self, _): w = lp.WeightsLayer(weights=self.weights, dims='%d %d'.format(self.width, self.height)) - slice = lp.Slice(w, slice_axis=0, slice_points=' '.join(range(self.width+1))) + slice = lp.Slice(w, axis=0, slice_points=' '.join(range(self.width+1))) cols = [] for _ in range(self.width): cols.append(lp.Sqrt(lp.L2Norm2(slice))) diff --git a/scripts/proto/lbann/modules.py b/scripts/proto/lbann/modules.py index 71f1ae4c77e..60741adc7ff 100644 --- a/scripts/proto/lbann/modules.py +++ b/scripts/proto/lbann/modules.py @@ -8,6 +8,7 @@ import lbann.proto as lp from collections.abc import Iterable import warnings +from math import sqrt def _make_iterable(obj): """Convert to an iterable object. @@ -21,6 +22,10 @@ def _make_iterable(obj): else: return (obj,) +def _str_list(l): + """Convert an iterable object to a space-separated string.""" + return ' '.join(str(i) for i in _make_iterable(l)) + class Module: """Base class for neural network modules. @@ -69,9 +74,10 @@ def __init__(self, size, bias=True, weights=[], activation=None, activation (type): Layer class for activation function. bias (bool): Whether to apply bias after linearity. weights (`Weights` or iterator of `Weights`): Weights in - fully-connected layer. There are at most two: the matrix - and the bias. If weights are not provided, LBANN will - initialize them with default settings. + fully-connected layer. There are at most two: the + matrix and the bias. If weights are not provided, the + matrix will be initialized with He normal + initialization and the bias with zeros. name (str): Default name is in the form 'fcmodule'. data_layout (str): Data layout. @@ -130,8 +136,8 @@ def forward(self, x): else: return y -class ConvolutionNdModule(Module): - """Basic block for ND convolutional neural networks. +class ConvolutionModule(Module): + """Basic block for convolutional neural networks. Applies a convolution and a nonlinear activation function. @@ -158,13 +164,14 @@ def __init__(self, num_dims, convolution. weights (`Weights` or iterator of `Weights`): Weights in convolution layer. There are at most two: the kernel - and the bias. If weights are not provided, LBANN will - initialize them with default settings. + and the bias. If weights are not provided, the kernel + will be initialized with He normal initialization and + the bias with zeros. name (str): Default name is in the form 'convmodule'. """ super().__init__() - ConvolutionNdModule.global_count += 1 + ConvolutionModule.global_count += 1 self.instance = 0 self.num_dims = num_dims self.out_channels = out_channels @@ -177,7 +184,7 @@ def __init__(self, num_dims, self.weights = list(_make_iterable(weights)) self.name = (name if name - else 'convmodule{0}'.format(ConvolutionNdModule.global_count)) + else 'convmodule{0}'.format(ConvolutionModule.global_count)) # Initialize weights # Note: If weights are not provided, kernel weights are @@ -185,7 +192,7 @@ def __init__(self, num_dims, # initialized with zeros. self.weights = list(_make_iterable(weights)) if len(self.weights) > 2: - raise ValueError('`ConvolutionNdModule` has ' + raise ValueError('`ConvolutionModule` has ' 'at most two weights, ' 'but got {0}'.format(len(self.weights))) if len(self.weights) == 0: @@ -227,22 +234,142 @@ def forward(self, x): else: return y -class Convolution2dModule(ConvolutionNdModule): +class Convolution2dModule(ConvolutionModule): """Basic block for 2D convolutional neural networks. Applies a convolution and a nonlinear activation function. - This is a wrapper class for ConvolutionNdModule. + This is a wrapper class for ConvolutionModule. """ def __init__(self, *args, **kwargs): super().__init__(2, *args, **kwargs) -class Convolution3dModule(ConvolutionNdModule): +class Convolution3dModule(ConvolutionModule): """Basic block for 3D convolutional neural networks. Applies a convolution and a nonlinear activation function. - This is a wrapper class for ConvolutionNdModule. + This is a wrapper class for ConvolutionModule. """ def __init__(self, *args, **kwargs): super().__init__(3, *args, **kwargs) + +class LSTMCell(Module): + """Long short-term memory cell.""" + + global_count = 0 # Static counter, used for default names + + def __init__(self, size, bias = True, + weights=[], name=None, data_layout='data_parallel'): + """Initialize LSTM cell. + + Args: + size (int): Size of output tensor. + bias (bool): Whether to apply biases after linearity. + weights (`Weights` or iterator of `Weights`): Weights in + fully-connected layer. There are at most two - a + matrix ((4*size) x (input_size+size) dimensions) and a + bias (4*size entries). If weights are not provided, + the matrix and bias will be initialized in a similar + manner as PyTorch (uniform random values from + [-1/sqrt(size), 1/sqrt(size)]). + name (str): Default name is in the form 'lstmcell'. + data_layout (str): Data layout. + + """ + super().__init__() + LSTMCell.global_count += 1 + self.step = 0 + self.size = size + self.name = (name + if name + else 'lstmcell{0}'.format(LSTMCell.global_count)) + self.data_layout = data_layout + + # Initial state + self.last_output = lp.Constant(value=0.0, num_neurons=str(size), + name=self.name + '_init_output', + data_layout=self.data_layout) + self.last_cell = lp.Constant(value=0.0, num_neurons=str(size), + name=self.name + '_init_cell', + data_layout=self.data_layout) + + # Weights + self.weights = list(_make_iterable(weights)) + if len(self.weights) > 2: + raise ValueError('`LSTMCell` has at most two weights, ' + 'but got {0}'.format(len(self.weights))) + if len(self.weights) == 0: + self.weights.append( + lp.Weights(initializer=lp.UniformInitializer(min=-1/sqrt(self.size), + max=-1/sqrt(self.size)), + name=self.name+'_matrix')) + if len(self.weights) == 1: + self.weights.append( + lp.Weights(initializer=lp.UniformInitializer(min=-1/sqrt(self.size), + max=-1/sqrt(self.size)), + name=self.name+'_bias')) + + # Linearity + self.fc = FullyConnectedModule(4*size, bias=bias, + weights=self.weights, + name=self.name + '_fc', + data_layout=self.data_layout) + + def forward(self, x): + """Perform LSTM step. + + State from previous steps is used to compute output. + + """ + self.step += 1 + name = '{0}_step{1}'.format(self.name, self.step) + + # Apply linearity + input_concat = lp.Concatenation([x, self.last_output], + name=name + '_input', + data_layout=self.data_layout) + fc = self.fc(input_concat) + + # Get gates and cell update + slice = lp.Slice(fc, + slice_points=_str_list([0, self.size, 4*self.size]), + name=name + '_fc_slice', + data_layout=self.data_layout) + cell_update = lp.Tanh(slice, + name=name + '_cell_update', + data_layout=self.data_layout) + sigmoid = lp.Sigmoid(slice, + name=name + '_sigmoid', + data_layout=self.data_layout) + slice = lp.Slice(sigmoid, + slice_points=_str_list([0, self.size, 2*self.size, 3*self.size]), + name=name + '_sigmoid_slice', + data_layout=self.data_layout) + f = lp.Identity(slice, name=name + '_forget_gate', + data_layout=self.data_layout) + i = lp.Identity(slice, name=name + '_input_gate', + data_layout=self.data_layout) + o = lp.Identity(slice, name=name + '_output_gate', + data_layout=self.data_layout) + + # Cell state + cell_forget = lp.Multiply([f, self.last_cell], + name=name + '_cell_forget', + data_layout=self.data_layout) + cell_input = lp.Multiply([i, cell_update], + name=name + '_cell_input', + data_layout=self.data_layout) + cell = lp.Add([cell_forget, cell_input], name=name + '_cell', + data_layout=self.data_layout) + + # Output + cell_act = lp.Tanh(cell, name=name + '_cell_activation', + data_layout=self.data_layout) + output = lp.Multiply([o, cell_act], name=name, + data_layout=self.data_layout) + + # Update state and return output + self.last_cell = cell + self.last_output = output + return output diff --git a/scripts/proto/lbann/onnx/l2o/layers/transforms.py b/scripts/proto/lbann/onnx/l2o/layers/transforms.py index 8a426d14d40..e698ff6e9b0 100644 --- a/scripts/proto/lbann/onnx/l2o/layers/transforms.py +++ b/scripts/proto/lbann/onnx/l2o/layers/transforms.py @@ -31,14 +31,14 @@ def parse(self): offsets = list(map(int, params.slice_points.split(" "))) sizes = list(map(lambda x: offsets[x+1]-offsets[x], range(len(offsets)-1))) self.appendOperator("Split", - attrs={"axis": params.slice_axis, + attrs={"axis": params.axis, "split": sizes}) @parserDescriptor(["Concat"]) class LbannLayerParser_concatenation(LbannLayerParser): def parse(self): self.appendOperator("Concat", - attrs={"axis": self.l.concatenation.concatenation_axis}) + attrs={"axis": self.l.concatenation.axis}) @parserDescriptor(["RandomNormal"]) class LbannLayerParser_gaussian(LbannLayerParser): diff --git a/scripts/proto/lbann/onnx/o2l/layers/math.py b/scripts/proto/lbann/onnx/o2l/layers/math.py index f54bd0d45c8..db815b66d17 100644 --- a/scripts/proto/lbann/onnx/o2l/layers/math.py +++ b/scripts/proto/lbann/onnx/o2l/layers/math.py @@ -15,7 +15,7 @@ def parse(self): @parserDescriptor(["concatenation"]) class parse_Concat(OnnxLayerParser): def parse(self): - return {"concatenation": lbann_pb2.Concatenation(concatenation_axis = self.getNodeAttribute("axis"))} + return {"concatenation": lbann_pb2.Concatenation(axis = self.getNodeAttribute("axis"))} @parserDescriptor(["sum"]) class parse_Sum(OnnxLayerParser): diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index 1d99d8b883b..e198b67caa0 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -295,10 +295,10 @@ def traverse_layer_graph(layers): l = stack.pop() if l not in visited: visited.add(l) + stack.extend(l.parents) + stack.extend(l.children) if not l.parents: roots.append(l) - else: - stack.extend(l.parents) # DFS to traverse layer graph in topological order visited = set() @@ -502,18 +502,38 @@ def export_proto(self): # ============================================== class Model: - """Base class for models.""" + """Neural network model.""" def __init__(self, mini_batch_size, epochs, - layers, weights=[], objective_function=None, + layers=[], weights=[], objective_function=None, metrics=[], callbacks=[]): + + # Scalar fields self.mini_batch_size = mini_batch_size self.epochs = epochs - self.layers = layers - self.weights = weights - self.objective_function = objective_function - self.metrics = metrics - self.callbacks = callbacks + self.block_size = 256 # TODO: Make configurable + self.num_parallel_readers = 0 # TODO: Make configurable + self.procs_per_trainer = 0 # TODO: Make configurable + + # Get connected layers + self.layers = list(traverse_layer_graph(layers)) + + # Get weights associated with layers + self.weights = set(_make_iterable(weights)) + for l in self.layers: + self.weights.update(l.weights) + + # Construct objective function if needed + if isinstance(objective_function, ObjectiveFunction): + self.objective_function = objective_function + elif objective_function is None: + self.objective_function = ObjectiveFunction() + else: + self.objective_function = ObjectiveFunction(objective_function) + + # Metrics and callbacks + self.metrics = _make_iterable(metrics) + self.callbacks = _make_iterable(callbacks) def export_proto(self): """Construct and return a protobuf message.""" @@ -521,26 +541,14 @@ def export_proto(self): model = lbann_pb2.Model() model.mini_batch_size = self.mini_batch_size model.num_epochs = self.epochs - model.block_size = 256 # TODO: Make configurable. - model.num_parallel_readers = 0 # TODO: Make configurable - model.procs_per_trainer = 0 # TODO: Make configurable - - # Add layers - layers = list(traverse_layer_graph(self.layers)) - model.layer.extend([l.export_proto() for l in layers]) - - # Add weights - weights = set(self.weights) - for l in layers: - weights.update(l.weights) - model.weights.extend([w.export_proto() for w in weights]) - - # Add objective function - objective_function = self.objective_function \ - if self.objective_function else ObjectiveFunction() - model.objective_function.CopyFrom(objective_function.export_proto()) - - # Add metrics and callbacks + model.block_size = self.block_size + model.num_parallel_readers = self.num_parallel_readers + model.procs_per_trainer = self.procs_per_trainer + + # Add model components + model.layer.extend([l.export_proto() for l in self.layers]) + model.weights.extend([w.export_proto() for w in self.weights]) + model.objective_function.CopyFrom(self.objective_function.export_proto()) model.metric.extend([m.export_proto() for m in self.metrics]) model.callback.extend([c.export_proto() for c in self.callbacks]) diff --git a/src/proto/factories/layer_factory.cpp b/src/proto/factories/layer_factory.cpp index 80b5be52918..7cd2b2782fe 100644 --- a/src/proto/factories/layer_factory.cpp +++ b/src/proto/factories/layer_factory.cpp @@ -229,7 +229,7 @@ std::unique_ptr construct_layer( return lbann::make_unique>(comm); } if (proto_layer.has_concatenation()) { - const auto& axis = proto_layer.concatenation().concatenation_axis(); + const auto& axis = proto_layer.concatenation().axis(); return lbann::make_unique>(comm, axis); } if (proto_layer.has_slice()) { @@ -260,7 +260,7 @@ std::unique_ptr construct_layer( return nullptr; } return lbann::make_unique>( - comm, params.slice_axis(), slice_points); + comm, params.axis(), slice_points); } if (proto_layer.has_hadamard()) { return lbann::make_unique>(comm); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 2511e2d69f2..7070303bd24 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -1059,12 +1059,12 @@ message Unpooling { message Concatenation { - int64 concatenation_axis = 2; + int64 axis = 1; } message Slice { - int64 slice_axis = 2; - string slice_points = 3; //should be space-separated list of ints, e.g, "2 6 7" + int64 axis = 1; + string slice_points = 2; //should be space-separated list of ints, e.g, "2 6 7" //the following is for jag_conduit_hdf5; string get_slice_points_from_reader = 4; bool get_slice_points_from_reader_bool = 5; From 1ac763d73ae3159e6a7fdc92f2d67ae49708c2bf Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 4 Mar 2019 16:37:43 -0800 Subject: [PATCH 146/443] Python model class has function to save to prototext. --- scripts/proto/lbann/models/alexnet.py | 12 ++++++++---- scripts/proto/lbann/models/resnet.py | 7 ++++--- scripts/proto/lbann/proto.py | 13 ++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/scripts/proto/lbann/models/alexnet.py b/scripts/proto/lbann/models/alexnet.py index c89df25c2b9..f56d7390178 100644 --- a/scripts/proto/lbann/models/alexnet.py +++ b/scripts/proto/lbann/models/alexnet.py @@ -131,15 +131,19 @@ def forward(self, x): l2_reg = lp.L2WeightRegularization(weights=weights, scale=5e-4) obj = lp.ObjectiveFunction([ce, l2_reg]) - # Set up metrics and callbacks + # Setup model + mini_batch_size = 256 + num_epochs = 100 metrics = [lp.Metric(top1, name='categorical accuracy', unit='%'), lp.Metric(top5, name='top-5 categorical accuracy', unit='%')] callbacks = [lp.CallbackPrint(), lp.CallbackTimer(), lp.CallbackDropFixedLearningRate( drop_epoch=[20,40,60], amt=0.1)] + model = lp.Model(mini_batch_size, num_epochs, + layers=layers, weights=weights, + objective_function=obj, + metrics=metrics, callbacks=callbacks) # Export model to file - lp.save_model(args.file, 256, 100, - layers=layers, objective_function=obj, - metrics=metrics, callbacks=callbacks) + model.save_proto(args.file) diff --git a/scripts/proto/lbann/models/resnet.py b/scripts/proto/lbann/models/resnet.py index d274766f559..fa38e4c3b0f 100644 --- a/scripts/proto/lbann/models/resnet.py +++ b/scripts/proto/lbann/models/resnet.py @@ -543,6 +543,7 @@ def __init__(self, output_size, target=0.1*args.mbsize / 256, num_epochs=5)) # Export model to file - lp.save_model(args.file, args.mbsize, args.epochs, - layers=layers, objective_function=obj, - metrics=metrics, callbacks=callbacks) + model = lp.Model(args.mbsize, args.epochs, + layers=layers, objective_function=obj, + metrics=metrics, callbacks=callbacks) + model.save_proto(args.file) diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index e198b67caa0..1671b3b2502 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -554,6 +554,10 @@ def export_proto(self): return model + def save_proto(self, filename): + """Export model to prototext file.""" + save_prototext(filename, model=self.export_proto()) + def render(self, filename, format="pdf", **kwargs): """ Save a vizualized graph of the network to `filename`.`format`. @@ -567,15 +571,6 @@ def render(self, filename, format="pdf", **kwargs): # Export models # ============================================== -def save_model(filename, *args, **kwargs): - """Create a model and save to a file. - This function delegates all the arguments to `lp.Model` except - for `filename`. - """ - - save_prototext(filename, - model=Model(*args, **kwargs).export_proto()) - def save_prototext(filename, **kwargs): """Save a prototext. This function accepts the LbannPB objects via `kwargs`, such as From d5ec4916abb1e9cd4fb848236dd5e895ae109db8 Mon Sep 17 00:00:00 2001 From: Jae-Seung Yeom Date: Mon, 4 Mar 2019 16:49:39 -0800 Subject: [PATCH 147/443] add command line options to override index_list specified in data reader prototext --- src/proto/proto_common.cpp | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index dd0f7420c79..38278537216 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -641,6 +641,29 @@ void set_data_readers_filenames( } } +void set_data_readers_index_list( + const std::string& which, lbann_data::LbannPB& p) +{ + options *opts = options::get(); + lbann_data::DataReader *readers = p.mutable_data_reader(); + int size = readers->reader_size(); + const std::string key = "index_list"; + const std::string key_role = "index_list_" + which; + + for (int j=0; jmutable_reader(j); + if (r->role() == which) { + if (opts->has_string(key_role)) { + r->set_index_list(opts->get_string(key_role)); + }else { + if (opts->has_string(key)) { + r->set_index_list(opts->get_string(key)); + } + } + } + } +} + void set_data_readers_percent(lbann_data::LbannPB& p) { options *opts = options::get(); @@ -726,6 +749,14 @@ void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) or opts->has_string("label_filename_test")) { set_data_readers_filenames("test", p); } + if (opts->has_string("index_list") + or opts->has_string("index_list_train")) { + set_data_readers_index_list("train", p); + } + if (opts->has_string("index_list") + or opts->has_string("index_list_test")) { + set_data_readers_index_list("test", p); + } if (opts->has_string("data_reader_percent")) { set_data_readers_percent(p); } From c7980765b2a99540ffdbfdc9b9b5710a8308b0e6 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Tue, 5 Mar 2019 01:39:15 -0800 Subject: [PATCH 148/443] fix preprocessor use in any header --- include/lbann/utils/any.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/lbann/utils/any.hpp b/include/lbann/utils/any.hpp index 00fa8d6e60c..c2474a773ca 100644 --- a/include/lbann/utils/any.hpp +++ b/include/lbann/utils/any.hpp @@ -3,7 +3,7 @@ #include -#ifdef LBANN_HAVE_STD_ANY +#ifdef LBANN_HAS_STD_ANY #include @@ -14,14 +14,14 @@ #include #include #include -#endif // LBANN_HAVE_STD_ANY +#endif // LBANN_HAS_STD_ANY namespace lbann { namespace utils { -#ifdef LBANN_HAVE_STD_ANY +#ifdef LBANN_HAS_STD_ANY // This case is simple symbol injection; don't feel great about this, // but it's not my fault they couldn't get this into C++11... From ef7b1784f1012015391cd7e61da1b35ebaadb366 Mon Sep 17 00:00:00 2001 From: Jae-Seung Yeom Date: Tue, 5 Mar 2019 11:27:25 -0800 Subject: [PATCH 149/443] drop --index_list and only leave --index_list_train and --index_list_test --- src/proto/proto_common.cpp | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 38278537216..992b60651f2 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -647,19 +647,12 @@ void set_data_readers_index_list( options *opts = options::get(); lbann_data::DataReader *readers = p.mutable_data_reader(); int size = readers->reader_size(); - const std::string key = "index_list"; const std::string key_role = "index_list_" + which; for (int j=0; jmutable_reader(j); if (r->role() == which) { - if (opts->has_string(key_role)) { - r->set_index_list(opts->get_string(key_role)); - }else { - if (opts->has_string(key)) { - r->set_index_list(opts->get_string(key)); - } - } + r->set_index_list(opts->get_string(key_role)); } } } @@ -749,12 +742,10 @@ void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) or opts->has_string("label_filename_test")) { set_data_readers_filenames("test", p); } - if (opts->has_string("index_list") - or opts->has_string("index_list_train")) { + if (opts->has_string("index_list_train")) { set_data_readers_index_list("train", p); } - if (opts->has_string("index_list") - or opts->has_string("index_list_test")) { + if (opts->has_string("index_list_test")) { set_data_readers_index_list("test", p); } if (opts->has_string("data_reader_percent")) { @@ -941,6 +932,7 @@ void print_help(std::ostream& os) " sets the file directory for train and test data\n" " --data_filedir_train= --data_filedir_test=\n" " --data_filename_train= --data_filename_test=\n" + " --index_list_train= --index_list_test=\n" " --label_filename_train= --label_filename_test=\n" " --data_reader_percent=\n" " --share_testing_data_readers=\n" From 078fdd659cfc54b8d8012404a57c76c0e18c634f Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 5 Mar 2019 16:01:02 -0800 Subject: [PATCH 150/443] Refactoring model visualization script. Layers with multiple children are now displayed properly. Layers with no children are now displayed the same as other layers. Removing custom parsers for color schemes, prototext messages, and command-line arguments. Expanding documentation. --- scripts/proto/examples/viz/README.md | 40 ----- scripts/proto/examples/viz/rnn_1.jpg | Bin 173026 -> 0 bytes scripts/proto/examples/viz/rnn_1.pdf | Bin 51956 -> 0 bytes scripts/proto/examples/viz/rnn_1a.pdf | Bin 51937 -> 0 bytes scripts/proto/examples/viz/rnn_2.pdf | Bin 53587 -> 0 bytes scripts/proto/examples/viz/rnn_3.jpg | Bin 316285 -> 0 bytes scripts/proto/examples/viz/rnn_3.pdf | Bin 54714 -> 0 bytes scripts/proto/examples/viz/rnn_4.pdf | Bin 53794 -> 0 bytes scripts/proto/lbann/proto.py | 11 -- scripts/proto/lbann/viz.py | 85 ++++++++++ scripts/proto/lbann/viz/__init__.py | 155 ------------------ scripts/proto/lbann/viz/layer.py | 96 ----------- .../proto/lbann/viz/properties/__init__.py | 67 -------- .../proto/lbann/viz/properties/properties.txt | 62 ------- .../lbann/viz/properties/properties_rect.txt | 49 ------ scripts/proto/scripts/viz.py | 39 +++++ scripts/proto/scripts/viz/lbviz | 98 ----------- 17 files changed, 124 insertions(+), 578 deletions(-) delete mode 100644 scripts/proto/examples/viz/README.md delete mode 100644 scripts/proto/examples/viz/rnn_1.jpg delete mode 100644 scripts/proto/examples/viz/rnn_1.pdf delete mode 100644 scripts/proto/examples/viz/rnn_1a.pdf delete mode 100644 scripts/proto/examples/viz/rnn_2.pdf delete mode 100644 scripts/proto/examples/viz/rnn_3.jpg delete mode 100644 scripts/proto/examples/viz/rnn_3.pdf delete mode 100644 scripts/proto/examples/viz/rnn_4.pdf create mode 100644 scripts/proto/lbann/viz.py delete mode 100644 scripts/proto/lbann/viz/__init__.py delete mode 100644 scripts/proto/lbann/viz/layer.py delete mode 100644 scripts/proto/lbann/viz/properties/__init__.py delete mode 100644 scripts/proto/lbann/viz/properties/properties.txt delete mode 100644 scripts/proto/lbann/viz/properties/properties_rect.txt create mode 100755 scripts/proto/scripts/viz.py delete mode 100755 scripts/proto/scripts/viz/lbviz diff --git a/scripts/proto/examples/viz/README.md b/scripts/proto/examples/viz/README.md deleted file mode 100644 index f66e52aa20e..00000000000 --- a/scripts/proto/examples/viz/README.md +++ /dev/null @@ -1,40 +0,0 @@ -The following cmds were run in the LBANN root directory. - -```shell -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 ranksep=.7 output=rnn_1 -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 ranksep=.7 output=rnn_1 output=jpg -``` - -* Output: `rnn_1.pdf`, `rnn_1.jpg` -* Notes: - * linked layers are enclosed by dotted rectangles - * `ranksep=.7` increases readability (IMO) - -```shell -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt brief=1 output=rnn_1a -``` - -* Output: `rnn_1a.pdf` -* Notes: didn't specify `nodesep=.7`; harder to interpret (IMO) - -```shell -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt ranksep=.7 output=rnn_2 -``` - -* Output: `rnn_2.pdf` -* Notes: same as above, but print layer names as well as types - -```shell -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt full=1 ranksep=.7 output=rnn_3 -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext prop=scripts/proto/scripts/viz/properties_rect.txt full=1 ranksep=.7 output=rnn_3 format=jpg -``` - -* Output: `rnn_3.pdf`, `rnn_3.jpg` -* Notes: `full=1` prints all layer attributes - -```shell -$ lbviz model_zoo/models/char_rnn/model_char_rnn.prototext ranksep=.7 output=rnn_4 -``` - -* Output: `rnn_3.pdf` -* Notes: didn't specify properties file, so uses the default `properties.txt` diff --git a/scripts/proto/examples/viz/rnn_1.jpg b/scripts/proto/examples/viz/rnn_1.jpg deleted file mode 100644 index e38df945bc8f7f6ce22f6515b79bf7b2b730b65a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 173026 zcmeFa2RNMTx<5QZf)GOVHWGv=k%-tegEz~b{;Pedly@F6;&m6)h7x{?B1e6 zclg<@ZJt?pIeD_Xd09Bwd-`3ym;lHFNUmHVzH*s_n3$N9l;kQo%{6i|GIB=h8~xA-|&Sa`(l@ZY;HEG!J1f-;-eHutl3VdI3Q5TmW8YyIlYth2>rVLPReBSoi80KFUp|I@mnU zR{uzWVteNT!14Ltz76{8h4_*)p@01^i}M(HBo@@Z3YxVyf%IkMUjSy*Q5OJ~EF4`t z`ur+WI*w@K0#{_nUQp z`?t$e*q2UeYvqfCZ>*~2I^4f;JL_8QuA*j#9=F7r(qp}bn4YR9 z=1mrKcsH{Im}2&s?lpND60sbpPWxrd*Ph3EIKH|)& zWqVMqdy@X64}w^65nhh%nNZ!nZLXND`JORL30}-0AyF`E&F}@_+7Gnf-1hY!Ent|i zp8IV@ia|Q1{p=0P%*FcjFBgFO=aw1{%%(Ux$bzSb1D)O(_UxN)Z7uOMZ}E?V9x!7g zdLRQ4t0F?Sd!9kwk4-bV|9X;`h!tJ0+}@k*kz@w5@R|qG$jhV%{UY1_W{g1d180i= za-DHMz?{YR69CW(C&X1JX(XwP9e}qhPohO^xS5NX9k-S)05=0=ik}H6JB&x(%pA(D zc%5Sn<;{3b{P^(PG1zg!*zZ>R-?@{BOB&4gX?AdN-F zNjctHHcBU3k+i7ifxwxi8g9tZXO_xiG1+OX5AElCMXkZpzE0X@62A593jnj@aq8?4 z|Es-+E;IX?@*L=;Yn@s79C>Vq`bwitUPy@Oh+PNQF5523q13e1|AjB;O=0cJUHMQU z;b6%2;j@(+VAq}a>$)#>dB`bk zWjuQkX+pkqlNW|9OA6{9uKPmh=0QVHr5Dq*znQf7X}}4=N~~T7Gpuzqeje9c{s_rH z@uJYL$G6S@Y`92vzIS)4lx2Cjvhua_hoeVA3y_94y?r`0YQ{1Vx}P__>Zd&*8G^69 z$ci4x=x^$?LB!W= z8O|_K;F#1-CuR52Jjm!-l#RpGz1?5bU8$l1qrT`ob%{)9I7W*EcZwo|1To*C&SHF= zm5w{871y=6^o^X@3C+qf+i|PvI7fCyqC;^BM(o-`<@M&W?ga7U65Xi=7w%7NIkk)5 zJ9d=mCD;L^2lNcUY*$PVLpH8qnyF&bh)9A}XPIMFXQg0oy1?A)jX~Jl5=<_BoZcz;QQ|`>EK<*ZRkNjXE-N9MdDEvk_9RZ=G4F z3hhoGKGR_!di$J0Hbn~q3YohOTDgx0rC*0?>=h6TCJxmq3dfFHoI5$Sm1(Sli$n04 zGp~-gt6O=!ZyCr1**?xyF4t5d06)Mqay5)BcOjDdlMmEN(r5X41!~QnhuQyvRGeAc ztR#j%<@4{+JkQVOeA8Pjnow^a)2=+o;l_ca+mMlrs436tzKtxCq+RZyy_@g2ewuWe zR&A4GH{bvjQn>_gw|W}b{QZ)Qx=AIn68YwvtF$(|xBzn);M7__#T0L9UJaCIMArJELdHdtc}Qp1tNHh-YPk z*+ar-OA|+LUUHRjJeL>bAk0I{4!0YQ8>`Qv7{75+)rRivlYfi7_bD=1dLWlU90gmF zEK9LYhcEbFwskb$T_!$<7(rHl@l0cV1|eU4+>#&~Lk)(!*INJAzj+Dk7AW|^yK459 znpKY9VWIw+5qvAX@BirD`;WT+-=L0|_ew<$zWX>;F8FyU@C25=WKFea3Odz0rHbvb7g@azADHY;3)U&n#l; zC~j)m_G_-AJG^SaYeG&iB2X6ftTuM!y9QSbwcc1Yz&ynxCAzx3IBB45svQ)=ct!EY zR+nMQL5HOZD9Ru+<%@)ZQYBVALq$=;^a9X1oq@N=BspPvUXuPasg;s+&eiSGI3v`! zG#i-jZPGrV4jkwFzJccEL$aJ&(Jb|qw+$(rAT-^|Z!cOB5PssNppa>`>sOLSLdWF4 z@)+BVi2CeL%dxz3bAyU>^-SzGv~#m~XIBjH7m1L)Te0%0u0kg*=}Yb06Cy>^`bU`T zZCy>FYwwYyNU}+7A+HahPo&n@d7qa3o;6oyPa+?F@J&FYYuwDZu;lq8_j^i@soJr; zlQbTk$5jF1h6OIR#w(IGMMFMM#@W+^_eLJ6-DBj=mreuA*gJX3h^hNA4GbwQQ5(2e zUh$u~5t8nt_nPL%!tvAmMyHq<``IA6w*5`L5i0cE&E_S`FF}vuUId5c*7qH3sxC<- zCkOU2GIU>B+|tY7ROk*6mcdm=*PcjIg}Vj z4@UT;7D9TXptvib3jkBmRi34KxaOX*GEy)mElDANNM9$G&iDeruh>;q4e;W<@{mKe zdG!}bu2=Lv`kG3-y`F$X?1>PD0od9`{20I6>ct;2DN>BUoyb+t7;wz|8Kh)c7My-t z@~5@A$x z!WR9~JkEO`+cNaZ+!R{R-Po^W8|9Tt6!1@H=}68l)sBU@K9f3IA#l_c*hAKJ;`l33plUu2h9{8`wPN{g=E+5gLfF&fbm zf;Ia~qWze@e~R8e_rLUfS}q;-X*D`D7pgQkH#pT`%x5e<-f>f~Dj%t-D*7o!W zf+uTS9r;5wUbrx`evV8^oL_(gQK4sGX_{BKSRzFptFAkOn(v`J#g$(G2HvbImLcGn z{g*gY5$GUOvWDU?bK2`9aZXF;Hq7>f?NEV6e*nlvqp1M+^ZM`SfnP3E7<}E-w`o*! zpxjV|%;M=&@8~(6PE;Z?tE87L{`GmxJn-~J@daQFglvX< z3BYu+ z_|I(RZ|>oCnb5o^8^H08Y532cy8w)_-M;{2NN(dvE`nedbq1|=@4~GG`2+9x@Lyht z1dVUPmXvs!uRQd%;n;zD6l>3-v2SB80HcS;xcKgWrD+EB8}|HP!7ku$*cYHc6Z9zj z>3_FqhAlS--$R{AUz;_4L%;b%d9QA<^lCn@ggEt$ez5A%+8y*|MIkLUOWTig-1Kw@ z!E$&bY1s6qef;mYkH44aw4H(C7l0Tc&?&AS@Aaw+3Vx$`4_QrVG#_66^ZH}+z&)J; zwD1^;L%~r8#SUxhA29gH=ax0(@EjijWP3az)>oi;Lh$GH$L4`aZZq`c2FC>ezP^3V zmsE|4s)4_i2F(`8*fyVjsCY+?(;djyV?Rre zNZ4Cqe;Z0hC%xI*%@MheReT|voEgCb`1ATh^57C;DZ?gL@vFI+@pu*sADH(OcOSJw zI+ha$ya0UKO2lI`yd8*oiWwN6+Hb*8gszb+zuqnuQ{ug%{mxLiUD}HlpMWMAd3gbN zs4jK^Ah?0$<&xF6oM(oCX1VdT?sH<#UMv|DYllzhr8i#y4p;|4J9k05Um!bCcAx{I zLC6jjs0oTuEC-%mvPWS^(r2|d1H(5u&G?Z!%Wnhkzg)q%5N(g{8ULp#i0!<94_K{_`B=@HE^`z5u*mHt*iPs|DiSESR=F9G%>zb@u2Huf<>RC>pP=3VEL0s z>TejuN31;{+zmpTktcl9D6e=w`6J)=sSamQf|(Gp#$^$$t)}N+tEc-FXfyt_yMHX- zL(DOUI$^G*zW|tz0#8*0JVkna;W)+Z%Rj>vG%i-n^qPgfheN<+3k4C@X;p{w@=WJhfNRJ5-HJpi>nQ@jzv*?;9(b5v%w9sCnoA z>-NnXg+pYoIwqe#8lM<9d=z)Us_uRaq46YH+;gJa*7~?ZJ$|DD?`*6Nu9U?a$j{lD zl2hESFVC+)PJmHX6`56n7-g(mfm=4lQi`D$i$KN2Y*#LVGO}<~KzfJTbyB; zqLKdA&gXJ@N28cq)3Yb3z_BmsyK8N-`b!W`*ArNv{w*M*uaL}ot=SLi>Sdq}4B_du zJcyA}-$6&#atS2=P?J7}k#UxFRF%+uO@GMO=U}~nBojT=C>c>>v%7Q|*(cX&El>SBmVU&q z)>d4Ke4z|j&LtEX(Nt(^G(o-jtXyj|gN_-dG5cZjamK@uhjvJxd0t&;)M%IZ)xpH@ zvx=(P6mjjBt>uyT0^Bp-Or6qvU{-bcR)o(DaiD%aF@60cS6Se$Y?d|gEk^Li^!E+t z*MN2yXx0Uw2r-EG7_Jw2;tIw3oG_srxpoz>2+80K)!ZGv))T2#oMbR}fWmY8T~TH+ zs`VPS>>P=C52b35Eak#APbQzpK&dY?Mh-`<&|hL|D?l%w4l5!g>=NUhD(O``RFf;c zzMKvBS1SNQg(&1Q5#o;`cs^bF^ZFmegKS8_YK@T#3RO|T3JGA>3Vv}|Y_>fY9l}zt z5IVW0op#CiksnAK+Q-zRYeD%oRp*{~iemYv@oi_>^XQR?8OSXA?g+dsakk2H1&T$~ zLp}y<&fvnJ82LcWFZ;#56*lt6pGr+5lyZE3+JkN^mFo_+Cw=4l;03_9fZ@bE@w^|k z~X%fgm|w+&`trsSbcgNHW0M2QBJ`mSyP1Z@fI2!95&K16E!wB#==FJ&R%2Jh>`hWD1+3|CaBRt$8xew}ZrLwp;*Q zefRY*0PlUmpAK5Op4bQ7HV9?`Vma-b$ajaiMP7Aa*QFpt}~ z{|flNfTqJHnk32av+}%Lg3r@SP%U+Prsv^Jh3+G*bN4Ba%iIs0f^>$w)L3uz7bD2> zHfPP}JR;8shSDue@UcELXUJtjedqN{2YvalYKu2k$Gwc$=d$ISVzVbmq^Vjfth>PM z@Os=Et0O|x0x{y*TD85pd2hxjOTDmGnyAUU6Q8&f&qIjEJvi3Ux!Ctxv)HZ4P2qQr z9Y@oqKmi*MXP$;$!hLK3-b}oL{Y2Fqux%Y#209P*j8mJ-3&P2M*WC@?8L5s4w&)rgaD`w zic#?do?cr;Vd&E5^+B@?KO+%9c`ud-ipBTnzOwomiCn@*BGjOr+>P39RMP#X2IK zCMRoZSzXe_*KY*XG*`3^*^~U$c9gK3HI(O94yLhW;N61sok(8@?uC&p?vfIY2GVr@ zXD~uwpN=5~Vsy_0A0&^{;6o5DdZnuNZPCZ4Wv+puA)k( zm?`qS4H6z`^Vv7X+xW315*R$q(-Dt+>d65wW9chc>gdZ1xT4rE#2BlAvE&$<&0wOi8 zV|Abh&_R(t`>6*zO9dg01rx3^)QWo(drmiuBQrh}0CaZbSRDy9cR*o>kCUTr{gA+k zD}5L{?8I-G1z}YN9>o!#5F@^=VLca#&quwE)y%OUr-`nUNDybxAn?20Tm89PrM%Vi z7N@`MHqEvr2)Nyv>-Z#k?H6~e3jix4_8QxW+T6Pn2jG(|Z4B)kHrp442DfbI6!5UdhdzB(r+517UZ}91Jv_JvYA8t5@M8Lp5!cjv@Oym#E;O@i&)v!+LGg))$eiw8=c4#u}Z+A$GHqx zjkbe(!v#pKm$1z0p?(ueEVtxQ5jq^p?R5)?l$qTdJf+0?LpMNd>(Xmx?3Dwvt|vB@ zi58e0$nLyhfG3V<`f$k`V$%cMrsbg2_c>PF-Y%UmogQ1=^{a$EF-D)SekK7KEy8i! zzApXP0>33x@+UQech=_`1lR|fMuCETR)Xb`EYdN>y%-#Ol1(E z06%!nn%SDx7)b5~gvwMs`*m76eY~yX$SXo0|I98_4tYX6eV z2;QEje@R1@1j^4}12)^g`xbS~8JN%?I3SkGTAxeX6$ERo7hl>;mg_gh8RVXx=ue}T zLpB`m=&J0Y_55jsPI}lz3KqhBM*CfG=6>*E%mn~E=mIJ`_&%*W%&=)3;QEmeH%N|? z=Uz2*^Hnn0)hY0qI~$}ek&1haaKbwlUns8nn09!@jBy}P!gbNUqJWtyJia;xS9C6^ zR|o$3;V=(p3Ytl(_2L&phDN3DquwnJfCK_NE10~}V0IN1qWu#Mb<`par|H_;dU=@@ zDgsnXsv7E@8k?_aG#|hJNnY`dSs?Z;=;)Apk9KmDbO(z2CH>4l?&NA4q!pTVPN@-& zeYbuAhyb4XP2!VBrAvRQ&D;cplC;anWoNz+ud|vxEQi@@?VNvGIsLdGEUk!ZRRT9d zt#kzf-SX||?^w+9U8&^URDX1Nk|u2#IJieUJtQVtaa^(O>P7DHX#N^Y1S5FHkzTRq zU^0Ei|9O|+T>jjAFB^1IQ0Y|a69QRTU!nHSa6XNm(U9Y5Cxl8{pTKLzSxK%s-~w=E z8c}wmdV|}!UuEOLIpn_H0rZXNwzqKa^$A&MKE>7U7Vs9s0oM0en0LfGukZo@NHg8M zwPwN9%T}-{sC!zFRi63&dc$=&;`a&yPJSWyk_4eC-7h?Aj9{XB?IOJnW`sLE`EjCg zb*-Jk}Ey)_h#jrxhK&9M}yHC30k0gpAP z=2*E?30q;jFSi=~rCWhejy)q#CDtd)Eg$2}s>*c%NT{IP2L{!qV?8xDlQ46m^C!ed zo(>S?aAa-rBE++>eyQg)Vy+O!V@LgMVVbN+)U8o14@|uF;B>C0fK?kjG(T+>;Obu2 zrF8{8%F3ra{0c|sq2Dm~`~uMWQmwB1>_`AFQnoyQI*)(0^U$wFl?~4AySjYBM-qd| z*MrN&oY4%`QVOOCJjFt1b3Wlk3wuuE^IBEXB%7Ap+-q*>%_7=oK4||@re`4(Ffz}i zlfC9?SG=sTioAngOQ=sui*!#>ul~Yyc}iVr$N#*2yMS4H_gTniYqPbA{;GAR`eVFY z>ff%0V;&-+jF)zS5iZMmXCp`HNH88QqaAG@?@)$$)-se=Wxt28qPM}LOEbZjU?PUj z%>MX}7{2dM??9*vk~e8>jytDg-d_NyM^3uT*!sL!)f?N+ldlI$wsV3;UO zh!nADt~s2|`uW3`U_;K}D>cqy&62lcURq>NJZjE*h1cQjtlo8_a^1E_jxUO|`z*R% z-?UsNE6m-$;&W`};-S9^`XlC+$kcqQs*}VmyJ-t9fc~(B&z~^$cyV0osTI9iaeIqQ0qM`R)`QX;Dc_{%`?03)Hi4dSTZDdMRXNqC z8NUF$#oDzAQW%TEvg5*rTohT+XN1@XhqM7Im>|mWw%j?jiV+cx-fhS*cZu2jI=ZBI zScmuZlS?5lh#jJh;$;Qs=e*8LZr{MDjf0V9YX_!5W@$)hbUjL5qF7vJ}{Qrv&dN&HI@-*)YkExCA{ z*;#c!Vjf4W$VmAmEo%qglP8>1c9)h*L-o10;}_|Tm0+G7O(8q_YX$ad9VJzuS~*-J ze(bP)1P`WbT|Z+&>U)YuVDsVSRVOBFGmwCjv#YC)>ARFKJ)zWh&+?=TN{{bmoLHM{ z1mHxVWgkFc2OAdv`N7jga=nl3ng=t!4M zxnvx8n!C_##Pge+(ACkTpoA=XSb+WIPKdW%Gjycdcsq=ZwK}e+JbE6S;?9Zq5DCm^ z2Zr&LWoGKkg5I(U->-@U?c|JR4Q_qMXCqj5+oF2n=adggl(3~J4RFO4c4CmN)*v>0Fe`ijVgoQjPI8zk?P2Ev6vv)_;i<EuTBGJ0ZnNU;RDn{)L7GLQn&MUlm#D_;b+kf`c$ z^N`7YLnGU_o~Bm7r$d{$R{ezdz;!#Wchv~-CDvhOe@!b@!o6DUOGT>3XQ$r9;}w z*_+u|S+CSCvHP~nv+pIB2XB2sj+7UW)+TprbdH8`7bJRls&mMttu5Xiaeqd$^wd zI3jf0XrS=+o~wuypS@dZoJQXF!$x5O|A^3mqX8UHvS*3;V_DQrJFCZP%oEdfBi&b1 z>Pxvc&UYVtwI60?tGfL{Q1gkA*|>T0vf+|VV&3UWS0_Ifw`#FA3u%d2`ia&?^iXOaMQ ze=OGODj%9k8jIFO%6+|WIrKR0$##~oV93tKP4L=0~?TQ%gljfRb_SKtLD=(`B%x_+9 z!9v}YiB}`WYeqa%Wi>BPW!pHj1KupAUZ1ZdI><^Wtn@y(0KCM>Z9b<7zUtyyn+ls` zh3r|q$8}&riuNIeMk_mF28E*@P&AgQ&=4!wH(*}wpf$M$BTfiNj3zy$fCxFlf2ZK) zz7a3DL&7eqr^f}&v-IZBJ)$BZYq-DoU44$(%HzgG@N^0_jlPgJ!6 z2YhUPzeHNY{$Lsmw}sf|wG51zHhM5s4D7VC5U$2dCpGVKM+>`N zqb)!x%QN+(47Sva(gsA?d#R3%E)e{D-_$fRn0_Y`Vi7hEE^#NY;9vU+Lefcyz=YQV zCGVaGZ|}=cTD@+9FrKMg0CGh>|ztf0RX+??{r+iFtc2f8-nOULsOC9}s_3t-rj1s;z5PV*zf_-g{mx+x7W~E5;Ss1$E0^wxrPUemkkIw zAKb}kNSIA6&W^YGg~&_d)PGb!wL~{Fc^mWnxa1@GFMFK`4p!!Olg@08=jgBfzgHVa zHD~^4n)UmAqyEj}Ku`&)8rMIN6U42fU2R)&q6A!5&wMHx{P z29vpChzl;T-4|~Uv?}s7vo70>dTU2Io|O7!OA(*BqQsRR70c^1wAwm+I68*SUjT@cZk6JUd^6Twx-FIm%E|^bGl`tu~N*1n>Vt; zE8m~p(^Nyd`$oz#7<;^*f*SryCV{%|kd=Un^~{WDR3` zN^Cho))4sO1fu#4ABa3?83*Bu@jBfMXBEfmb#AL^kcWJgzjN(E)2q&td>{eJM``vE zul%^~+8xDzMls`v@pU;;aHOZ12#P$Y_~ytw*lBmYGJTXbD+jB*Gm_aPRwwG=d4CcP zgQB-^h{1mg_xPAi-!=INeH3XMcs1eiE|H3paEMy|s!2S@PA3kvWPg}KYd){F7&LPM zQ1Ut!c9ECXr{XVBjcuD`Ww-}=lh4;{iGgH4W;$+4W3MjdocAh1C(|jpP=Mzgil&hG zxf0Uug=q(-yLk~}PO-B$lU=Ps^-o0DMl><=$BuaUl+AhsKdZCGW^bWJKyOz+=%=qv z)0I2E$kIeqmpl`n((i)q*eu|52b<@nDSDaOla^8u)daDGbBLU>U#mu9xmJxCR69u4w8Wve~vb^ z3X@=VXxai$eD=~bFx<60r>LC95aQu~TR~6TvnnUfVacBEf(;9ODvh|sm^B^!gyq)s z1IcN~%(Yzn{ArdfKYl)IR*^jU_0@&xz^$?X8q>%`N|U+QsU>|LnZpcc2w7{Afu7GZ-hLblUs^XM23_ z&qe=pJY7hNAAPrRfJEkD?S0J{%oMI!Ifg!peQ<_yIOx8*WZX-+TB%Q2Z(n2?tJuJ3 z`64a^t!;{t4kEX7c~=*eulL%x3bvA&=U6E;hJk3+R0hS#JMza=5Hu5Uvam@;Y>Ei> zo2leX`quBOmDj|z!9tc^b3UyQts=_f1|p9OB>w)17PP{yKeZfphj>V^iAD_k1448` z>lB11=|7s+hXT^F75rQR4e>d^qGKFj9J2f6!Cdbtt1D{f3chP6N)uNy14=yalC4v! z{zu{o{Bza+8V7Wh49jEj)22iGvlC0b&YUV$GcGplw)@3a4NPf8es1t_hw(3ux zTCi}>yaO=hcl+ImMnd^Z{O_*5)|!22)vz2hl7sB`EI6rM?-uG@GO-Hm&%gT2JT3@F|_Myq|8#TC?YQgx}Qf6Y5p( z1iggoC!*ec6ACFCVfWspn_LauRv3UU?Zbt?@g=%N?LRF3e$YX??DzKy_y27|v;QeX z|Gh53>}~8VDC2CMuaz72v|Eu>A2#IOJuv0Pxk~Oo{#p%Vm=?ae34I)6=JPxK!Jj<* zKf=T6bVi3X`hLqZJ$UYznYrW~Pa2IMi}%9I*TxYV6w)mL|h1@F4H zOYAl7WfC1WMJvrX=7l$zv4)C73Cl>4H3&FGLWD>~$`k@hvPm3bmPgTZZe$6cCrt4r z!@kgg&VeT2?VfRFyRy{xZB!oXAx}(KYILzvf9dkOd%xa}c7iSORy7p3=8s5i3$g#a z=9-H*9E$nI{~XQXY+%|zFckfTkL=f2$<6A4i72&0s_nDkBk9hn2+wqWs9O2g@{;h5 zvWl>A{$=a?uS`y7@VWQ9@er|8zZ^r8Z;^lLC(KcXOZ^HUDgMfmEL;E#o&&pZOSXq= zGQIzlB|#%B#QUJ?@X@?unY+J}9ktuPrkC+o*6CmN2sF$3ae!3gQzHpXma=os?D3Xo zQ0OhbEa#KCs=x0ynq(8-Rn>O#N#DueI%2=}R|wMfS4i~Sq5<;Y>|>BW)MH?izli_l zp3`PV#XE^HatnIdDtu++UAB4mvYCB?zR*1LQiy5kYAj#R{;#0KudEaFFq{C}yJhhQ zW}=%pl~>UX1;nIivV~uvMzY2JCnX-#lvDJ}k}U{R$FF3_zz7zaJ-oGmtQ)<%Q1fAN zac_`&rwwq#qNiIGPBQP{KH8sp;w^PI$}G@0uprYIkIlC{f*FSo83dzKoeNk+uPG|1 z_PG=eIAxZCDSsyW*zU<3FC0G+;qS`|m(kz?Q^uMWq}WFpcu} z(ALb`l^%cp9Q4EkmkGi1@zYr{{PS;jzE?Vl%+lAtHAieN_9NN;8O}y`^lzx~Wcxqm zvJ}DW8@9SXWoB~#BGn)J$O`|u-|;@JlwpUvcLJ6Nye(P`;^geMbf&L3si&8%j+<$*SO;lpyds z<-4nV0!}|=?+w%P9oUiX`y5HvHVTL*nm-OcwEeYq`hJ}wCpXprZ`pFR^&6S|Q&ckl z|86ChgfL(n2DTgNM?^)RH&_DgBE+q(iov-7J{%mo_*M+TuuZQ!f`J{P%`V7yNkOaFS_oqsBF&xXAgKes7@Xdt_R!03!%Kc&Q}K>qWf5 z(2~F3gYV6|*CY+SOt^5ji&jVX%mZZ1{u#!)0Pq6ckJE0sT6yeIJ8*tA-Bx^zQf}5U)wO-H72@(FBSM? z6jfc<$BBA%p{^3PklMl#+Wp$g3R|7Mb&2aXCUQ!ongM!?;9z4@^Q_UzgwH_oKu0zh zd*9)uw~h~%7)!38Wj>w)&89WdD*rtYva5jc0)8|-e3#p6MeYFF9i|RStIWO| zweh?yLsPcH1w%O5SBh2Vmxa6zT}XlNJxgqJjGxOrj1-yYu5sMet2sRD)%a&%y_J74 zvA(E3fz*El&i_QCA{zfNmYSeUeY!rRlix9UB9!qiTY@Jyf937x)zYzK@q;rR1ov&) z*N<+jS27ga>S8Qri_|>OPDfhla|q`w+=&G6J?)sLS;}e~gt~gAuXVH;*`Ex}l3p8a z!rk9Vk<|;yH=H&MHQuPaPsDj11etk+KO=$BH(^el*FXeHnjHFXGrt&Th_h%ayDa4QN52$UP- z4wJckIyIqD&As4jWiXdm*U+E`iaF=2|MxOBzLi+M@1T{N+~;S&dDCkM)LZ@{O9W!q z>)>9?y@DepfU`qk6$Mvs57J+t!AE&_BU`>RkZ- znsDl+|Ct~CX8__~W#j+MzWg7_zWkX5{96{#Cg0SzzE=%d18sx7o_)RWWJI)z^9~0H zc?5slS8P+egG=^$ezue}a);B2Ac2QeoW)CI`X#!r?e+@t`*^&aF7mDAzs0rMqjzT0 zPgt<{`mOoM^r^}=ADbK zN)X)&8IJNsYz*Wxl|(6T43BoLMSh5lU8B)7GtXZO=BnL=zv17MR&%qkwGzJHOs-cV z&ipB=gR3v@8{1(5A1#|YlN{w#R@kE-{k<~Rg%mF>dA zH@2{9ZV=lb;r?#&iV}9Ir!kF}uTynKA65V*8%OicLLC`;>EV@cvc4 z-dPC#>=K(fRw>drvkdwR4dDoH&*$m zB_*^>gkq7#ZoP!nwDZGFs!3B@9d(6q2m~s%Lc&HJeY)jL4d@W3p(fTg^O;xI`__UY zb@3{&v-!ocUOxrk)zTT&7t=!-91lU4pi#{?HrZT{CJO_Qn^MuBdwj}IjTg1NojXLF zQ?!DxojR`8XNY6gBHAd(oVf6LFj_gGXX9HCvLo@4pLJdbQ35f0fX}>$vj|s^gXukE za6OOSv*g9S2Fr)Q00}AMN{2hCw;@d(SA+ro?a&{Eai%6W7A9H< ze9+n*0D?7TVCpZH3!Bthu9k!Bic|55 zx}-yATpvdS%eF~KBd1TVjJWiGv2U41&CdBqNxu90@v`g|b^7gF)@O|$Y~M?)gV%-& zvpY97F`LKGvCh#_q=LtL%pf?y^d7VE%BP_Ufx52uA&lOPBpM!RllQlc^!;lZrAv&* zO{Ti9h9yWnO?>EY>%r53mW$hL&*m2~K&lv@RxWXq3iM|wSQ}%KRjYR}BLmBHN|=b$ z#__Ru5{`ZD*CurEfTx`qc}Xk@(9c?d`UT$A-l$pBwRj}``J}`h@gp&N0;2aMoL)zTXLUvlu3_mcH%upm zgB|55etfZIv}_@WYgpz^mp7Sic8_wNyFo3CKmX17hm#@$Z+`-(qO?Fw?5 z9RGD9`B`}Vi3FVj;9q9VXNL~W^H&z9$)k)qcg3&~N$7LmZv0sfOv8on8<6(=Lda<3 z^eF+nK!zi<4_~P=AB3!r2%ZCUPct&`YoYLGB8(QAp8|>l&^7VHpCuIvMfc0~{Znfn z2ni9~qE>mAK+J8o20#;yF1lr|Te1otJDm(u`0ZttWLAfGWMgzUQ$3Rlz?<)eUbMZw zispMRl+pggEgdF{9eFbO?c|H$DzliDA}o8oXY>uJcXMb*)=HQ1C-xtc_s$|yjD8sLO1*pIEU-{j6=M~~b$1I* z@NiAtN-U!2R&4VoU1h?KdHb!3s(2cXvY`GK6EK-d0u7p6&}$PKBU5k zDq#9wjplpL#7%B;+zfuk=;RR+H#nqg3};xSt7l@u_~n$Ye$3(u>A5G@M&Y|mtJ$m@ zU&Q)Tdk(flNM&MB3YKzn?O^AZf*BCiW4tNqOv1A_f2$G(;J^&9F<-5T5$Rx>psW!&6) zW1u70v!x+Qu>M}@Q=`lk`-# z1wa6|mAQ~mTR8337#ywCRC@s>yMfv^Bn$Ff_iu1&%w{PpSJRN&sRBq+; zU{y6&U6lJncIqp>YzK%&htW&(bLOv73-1a%yS46LDula$?JDtjSf`LdmE^?yEp& zqZu@B>6WeqUN-jUe#<^;S?|lgHS2gMiPS3Hvf^+JKn{5jqTs~;Y5HzH556w`mizQO zmwHhx>V`u^ZuT14m(};?N<@8V$#8{_cQllz_SLL`BaIKq?|1Z-dz*&p{X;Ez!tbGuT*2)2 z;OvRD(1|Cw-XyR6Eos_Z!-9q9TuQ)Idoo@>MmD8!O`K~a(Pl?sR!-AE4xNU^5@EpfRtqf zczA8kV{i56q<>oILOk5cGNLKDRWD({FuA}O8DQK&Cw_N9VQ(@yp7>g|2W}t&=~d9| z8dyQ4ATm4)?Y`GQ&zsn`#Q%^`M#w$D^Ye<1UJb71d{Q1yLw@|Lq|Bx5`h*qoV7W?HHD&O53;G^rZz1 zI2S5WGh!NQ?&mje%5lCR=1lrFC~{`1v9;IxEopqO3o1J`1L!1am(Y?U@*x@p?%vR^ z7!C$b^`*3?H{HlhwCSUAqr<(p*l+r{EQ7c#m6UkZ z!nE`qT_k3FZq@o%sc0Kdj)@^+y(=pBE71J;RTlu$?#zdb+?PTht~vH#?eX982AdV= zK{%7=Me29%zo4i})U2VFXxtuvV~!Y(F_uAD-M9$^)F&v{4ZU>Po}4W3i$-_#?V_>> zFe*_sY9rmdejbro3dbs4Nr!)5r5>A1bglGzzys!wXJn6USfF&lJ}b)k3@=j4A+tYj zz1lSxSZ_eCOt@{!?dbjgu=n0kO}*RJaI9ER6hx$>G^I-K#6}koq_?P`l+b$*f`U?| zOO1eZlF&ns(mT?n_fCKSp(gR$-us^Oo}Zq3&bi-r-}{aGj^7_RhGT@y&fd>@)?9PW zwH~YyGPF+~1=?`9!uImdy!mDr8wJhx_fB&6<2zXOU;)V}o!ZF>HA+cW zR$VtqFG6^$XOF`y&xgiES$-6A7zdF*3k~?7$nyGoBGf%~=xm9YE86vg$z4T0PPucP zzRTh-v{J|X5V8oJy#*4&M`{I`a$(PL-R5WNxKmO#G^KETxWoj389nXrFlFt7I9vGP zQZ`wlirsa~VH4)6A!$8dMS&}}jqOQJzpOr>!OhOrKJEMc%GW`T5M zrnB7c$T|h}xkeY)gszOb!phfh^L)3{SeDG1@;}Ha*D?e1X)L*6I)ug<_9|hEf)E&$ zrLHV9-KfzlP;y*-S|RFywlf}?V?2IJ@&2Z&@+HaH=IzOiMX{(6_VdTs4EFov_r%w~ zXRP9FhxvvHV#if`mz7*abCBlsvg~lF& zD1!HMiI@)Bbq^iIV{hCVvQRC?b(!9AiI_>PSw9%2^t5cWe}g`oos+2<%fb?M;&gc3 zAlxng;iawqIe%z%Rz#P@^Rd{+b3A%}g6<6SJIm0d16vJ5&4U;6;b3fa*t>=^x)=Qv ztS=AD1=@z@)$-b;R4Lj3FLkQc$>=tI09~tg5<~TKMbCS3}DuV zAVkox7eEM63&W0ZLBJqlYP=CQK{7QTFT+qh6Pd4X!toQ8>H(>X7PEWGpe%OQ?vPp} z;s}=nd{_i;{@LDunGkYhNAzq0pyvgQmN58`W_Ric59G0X)IIL|npLGiie-V5uECp6 z(Sfhz@3B2l5^(%7k7c}fV>*$56gIYgn3al!OQ!Y6YPnw1z02o}xlmhzjCQdk7I}WS z@hwen!S<|{#D&X3+e=f4y>ttub!^cMeTHgi=9gkpANk{O8zWyYB`+17OxN?UW7OH- zoyz~NM&VL~_~6u|e1a?u$@wL&P|0T2OCePFlu>2G^OaH?kZ&y6YZrmfgxe(p?C6RL z#YcULg=hFnmFM9fL+4NilUWzzlVT^bsnuGLsgGIO%J&cPqA%6a8eM;| zxHvh~x4NfdP8GmO3Z@9gnr)O}!%N*ma;f9A03gICcp=)M^63yqtzxFW9 z=80Psmu1ufp+ib(PU9>VTP{$P)-dO*ydJeT{6W<|bZ=Yr`am*2r=7&KQ!6!j>fI8W zs<4>r24P)Yf&IcyzP6E{Z*5}?@-}Cs%_=NQLi4K-PA1PZ8V!6F-$V27 zv&z)=UcB)3aH6gGi;UyB-5Z>rx#MfW8eR)h@4^`{C<92@Lu_TMMlY8HIi8I#D} zqZZkBom#+53>ZZXtA=@UUtVjv9{0_}sKv&`1!|@|u6{E25%+!&&qdR^Zw~l8=B+qY zaEMLmG4~zYf`xwBan#j&FUi}f0~d`zVwXMIR+^!TGu%pIHkkW)n&!H4=EkdQr?C~N$JesWIU^mzV#`f@kqNTD6o?8>+w}a<&?4iRaF)YUI^oe-`zVt za-(5G7a}^v)qp|IdKVhzYF7}1^Qh0_eesXX)Z9Rhw z_8x+R?lR1@$0vaQ2pMaT6f>KIz#3zKYyAdW5!Rm6UD1afr|@sJY%A10JkZ#^Z(;zO zhv8sEA4Av*kM^qeLiW$p2aM|-9z>I(nv578l4kg|4?oVq{5y7abl=-;2=`pJqKPYP zW9v0+U43fT`hfDr1}hN#a8sknK%Rmc5%XEE6b`H{!g(Re@Qw3?eWsYy73Rfd^`3wp z>aBsTZf9cV(TELZGUAr-CX;Dp2CI0FNPxp^wG@<*4u?gC(v^N_ImMOT+83JRP#rEg zCld9cg`!z>dw@S7ErFi#dyhZt{-FjrPok}XzSzF)>!-H+QjbIDqNIClxV6ic^VgpGaV9SSR#pr0qzvJaMtMyZm)^*j}!J4U0-ezU-`y5DCbb3gc zaIX)pf*w3mo<@WVry3gH04avgek$sDpkR%s{R#w|a-XY)513IGs7wYWY9j}G^^V~0 zc}fvFJ*ufvaTPU}pI?2TCpy^1Y!VQmAs;)d!mEp2Tj;SA33#!{oq4X;U+~6sr2h$t z9xHhM;QZ1;Kj{)i=1d#6NplnR+kcoqT-S61Cl9S}1w->2m=mA-@o5W%b?W(@^)#E` zTT%L!gK#0b99IPa-|?C*eeS`6?^jdN1gLO?Xg(8MP+Z#`V$bu2%D?gCiEfECV}`1{ zuh*Dt3f7$L_2ewKY3Ug~MZXDjyO~Cg&m!Fo3YFUIU|(#L;}(t{Jn&ZnT~-o_&w}n% zrA#cq<3-9A5}15yznqA<`oZzD|GACxNboEN$|jEhu1&qD5`^aSzgVAN6!Gy>WcT#B zalw0JINGWk5(5Z2%|0ym5q52=>+-eatn?qC{L~OFGN4?#^0Qom6WQZX{1eQ|>ELq~a*<-Y~^#lvd*gDs~CP=pdNk%;pp@l?lPE3v6yR9xEgf(NLz1 zW3{=6&I^4vt{-ZpBsr=YnqPy%1Y_w(WU)RiP&re`LHH#wq^OU*q*`#wcKg+nRDCYN z`|PhTJY76#BcVExd1I<;A4Qmb`TAY-1?0r zsn-|2kEIuG)I2qZBI?3JUV`AYfB?KzgM+x8mG%L?}|#6LbebaY#UH^k0~jYZx# z@Bvlw68f>%Q0yeKP@U)cUc6YzgS5=>?;KB<62{vF9ln_zJKj;bq{G*zLqZ(BuUB!l z=+(bY;s8hnS&`DprbyDQY6I*)_cx7U46rdb`m2E# zkTjt1?~J)EVOaFNkn^^|aHVAUzEQ3Z&|tRjGj6oM_EE%p`yXbN~3z}o*LDQ+U{{-hfJq#FOE z8viw_alsx&0h=%0MV0<-2RP0ys!=Q>-~WTL}{XU!2tb%A=M!;yK!98v&{SF zJyD%4^n~k>^x_&7m_wGk)03LAKqZdgNd?U$-v?Jdv^X%FlrlS}Tig|R`En*7Of|kD z26cgE^F)oaX05%wQ~BNR2T1IJuQ!uBDlj|HDI3c_)L}!~^|+etzQfo>OWW*GKp#Op zGN!jY9AV@9Z18iSIIXsP zwPC8AFC@8Dj+&X%E(Xumh)^1O-^0b&x4hAKrA;#_%1Mi7Be|f{-J>qKaxeLJZ(J`< z_!K9x$8Bfw3DmM9eod#o{nGYoR0!rE3Wo648XfDviRR>antZB4CY;%|S0#7Kan|?W z81-a2&k*i8*uEHJWEe6*TW0?1nhxV|FfZ??n#%Ce)zzWa*oxFMn(uTsFY8@z@%c2j zro#*=9y8s839hOv6UN-rPB~?rnAo`j`pT%d7G8!LT;1#-+HLo1R<+foz4YQel15T9_=!C1`DGo z^{p^=5I!%Z0(u$&|E!=w-X-hlw zKYNtv?H)$f=hq*^Y>9+qf`VrDizcULCOX;HhBn@2oz824t4`X{wXvdmhFmyy(%Dx zT@~@N(VZc#Zy6ZLR61WVp*9ZnZaN+A29lqN0)gLkef!#-Y0(!KZwVQ;Ga*^ne_jxw zAsX!(eEI_d^zdCi(!Tm{k@5z-wz>pF(@SJ>Jdp%PEqC3;G{0Zf(vb6w@oFK%T_im; z0X~TWXw}_B1OM#blxWC#V9xwzBrwyIFfzBu+++e@bYP;dEsLI25Aio@Y=#tjxZAtg zybEkWQW(4pOtZ`IR>bkRHO+b9%e>^SM!mX7t26ext*yyTOE(4=X{#F^&L#B&#Lrg^ zb6#|LYNy%Pf@^@Traentqw>d#Z+&*zsl(o9&i6zdCjv47Sd{$u^iv#Ab zTP#WWUvUi$7xZ8lDi~V8O26aqG6GT0T#PyO?JnzB=5wyNG&)^FVn@m0+7Hm>hw&BZ z>PGzUHj^4sWs8PUrm>|7JONNQ7!<>=dRCFz!n*Q-TTXqzi-Ty|<$7vssva zOjDQ6Kg;#WPyH0_O7!cgEACO-6xRtlVJ5LF<gBRvW}Yre7aKovvmJk-*NMx* zaB54Fn{oW>MeH4y%}g)e_A^cClouL1C2{0rxT`CT6G)L&QqcC2+TYB=yyi=HC{g%$ zU`Pbd$PEV}MlfZjfs1!oMjx`$yr8ZopTF#5)%+SZ!@YLVf^`deO4w-kngQEK`6zZm$CAV zOPP(pV3cZ2TA^Y%lAG&eMR!sJA41OL z2dE>xZ=$Bnk8l~S0|YdS-XMeM&F*T4BZr_)hbV80S#-MA)N8C z$Y`RZ&v^i|8SkiQ6)40u_{glPzwH6!{mbh&$HR5I^r=|p1i`1hg0Wnwxx@@2oPSTL zjHbo|1|>IL>vrAg7tt0_MhjD-g+glgilOL_3r`bQ$I1>E+}`dUSMyrIEdS1Y-xy`__F{-R)*f-Y8W zbxcp-)VQDA{q%$W$HOns{-d;TjYIFThZj$sx}5nj6eO`E+mOob+Sfiis=2)y@20Fv z&!Oi>HassUL2&3Y`kenjxHcwseN0u#Hp;DEI~}{qXO_{97e`$l&kn+)u6(!Z%S4az zPaaL`Cs}odlU-0hjRf7QL^$>WR-(7QH$A@kSxF`t8$RT0uNO@a4P`nonwzY(3Z6`T z-ln0ouy!3k6~B;4dYQKuFH-GGMls91CV%jDx|!x~EHhN%vOr+y8A8vgp$H0-QT|3| z+G?YED&mz5K7qII0tpci<&E0~B!FvPKlJV4`vIC-ted*!lee&dR`C^+wo?PdZ*pld zk3H=HUgx#_sJ#J}=>3=Tr(-w$MwW&}#+PZBXzFT$?Jiron)!KmAf=Rf7Y!$9Cjv@1-SUOZ&L8 zKWy%O=rnYkW>^BZBgP?e+1?t()tHBt)XnebXs6F_}+g$82-6yAto>Gg?n*VC#efxExhwMmhEadoM?rf)r<8O zc<%FgAt;3p6r;4kEN9!Uy@LH5&RbzN+$q_{1rq&y?WtWwIQ ze}JA0l@iTcQm<6>+sY1-L!g4vij5r01LU}`J+>2T-)^T@<`0F*oomvUGxWmS6>peJ z+|g$*`h_(`4%x6_zLi}RcRef_Q=+sLr%zC%C9Fjrc>Oex(ZGgxnU7SdUD0TV?|S1V zs@3iO9I<<$DS3hphriORkLW}`$I)Gfn~P3~UbL9?$!2xTA!Cm97y$}nU@AADhTv@g zb+AY`bHum>W@_hgd6UaMPHX(GYhi28l8MFNa!5fr#NJ6;S0Z;ba`ShT#Rc&#P z?hQS?wISYd?HOoaB-;E3h>GG3^h^R(`zU`5rBKD@wgWo+aE|(WFXW>7)i%}pyjmzC ztReM5N`{?O>Qk5eQHZm-f7{EIVR7MSJc1T+be7 zd{~1NVI6xkm+XQB8p%^7d4<#Z08HS9{f5?;O#%Mw`>lzI8Vsek32Q2c;6>FWBgU~8 zH;dK+hSG+t`Gq%W1;1zb#5dX#Qyr5%G;YVB>0!79%&M`G54i@VyRG$v zTFV*anyoEW%^i~Puqn@YG{?yL?F$w-B<02uu7&!Vfz7yMMtLRdR{lwiKJvS};_n!I z&WY52O013|z*?I-DjN@^8g=Rz`{^4bXVouf+6V{d&Px$L3tb>h`Sk4Op7+rWKs72#*yX;)J5qrJvX%@4HMH6H4l9PMMXQjQXztTr~*^^6$oDZf=BORQu+G05WLK`S;nEK zuQoim$|Hz1Y|dAg46{9@S~c!~f?Ae9UN=C|LlHQNui@ys#n@`8Hvu;+{2-mX_<|8u z(jbnaQHB0a1~e(j(D}V~v(Y<67GG)R$HSSuPsu&sXok9I0G^turMjt`TtfzYaRghB z?thY#v*9}G=Lru6ZOh(9$w+AjQrud;X(k`$8Y50d%LZ?+KDzDxU7{UlE2v}HVcg$4 zrXpMgzPp#Vsmh%2#l7Q6;F4mmi{f;Y)H#uAk4f`Tq$z#LO-;eGYJ#35eY+5(wOUcy z8afV|W5HGYRZ0ev7`f3rA{yD+x+)_ZyDY4sPJhF3R0@Jp*q-XyIyb01Y6EMWON&q( z*!9u&=FS?EThdpdocLM8ke^zc;r2MEQTB{6A6tpD6!tysdwt{6A6tpD6!-0LuTb^+vS& z-c7viO<#~;_UO_}LB6@rB6$#nlfEbnclSWPP(M-{k$!leRVw@kh?CxZGgZJijC!jS z#}G3>ISBZIhY1q%cSc2bDi_2IYBTrW5U9`O#_98vUA*a-OT~OU{mK{Ia6ro^WB@j< z3m17W6{vJxc({)XVY<7gkPmuERK`dfWKQJ0ZEn@Mbsm4%8xCV|(2&cTXyKBA#&2^K zvNN(YP&|D1y5(Dd5!d}pp7c?ZfrcDEX5Q*C(M@4asJ88D zkT}*^Hyvgma(`dW&;IBrJ}I_f50rttyJLtJy1nuwkpl+NY9GMXc7zr&$jveuRpRFwtVR45T!)O$D{{^1tg=c&#}KsE}M{HEG<#V*rBjO?)DxglZBR@ zvvm%m(F#A$S{Pu6PFZwsUlgboq-JF?o8v?+#2r*EHm=bzReQXu3iQwCp(+_W%w<}) ziMC`24bSLbF$s{nhARr<%_uwCh4B)AVWMGLh^%3Pn&ge$Fm;7N-TEqd>1%1G zRBftOQi38=Cd!UK%;|G_S#MUfCM#W#TmtBOe~Ni4;Mf?TYOZI-#B=Wz}VC(%lW z3%vKIoFZ|6##j%TMi}r?-eIGZ50nVLw$ontBJbSkErHYoaYh{3dwv$2T|j#O?*65m zNom$LX5Wc`T*tXQ?1>WiDtA>nGF9N|!a_v-VO(9(3bLbc(8N&RdLx|8*f1hAlpHs2 z;j7;fQM+W$Y~&Q&)Q_@J4Yy|#T$I0C{O+v8c%!LIp%6S<1S99v zLq*#^ALF81g7H0q9#F1w(hGY;yv6jBt{J;~P9z=ch}*u1y7XC4!1bnKVfVeIp#qD4 zyq9qI<{*|n-l?q9sf3G0t?sYn9m5mU$~Ln$j+42pG~C^0`ARqn0n=yMK$_pEks?L> zaoNpinG#uGg|ezuQo{~?8C)K^(*3W3 z%z43^P%(v~%2IzNyM$2fe9tN51EaG&E;wc$N>5-`i)o`oyA6 zxL@QuV!Axge83kX`mO%UmxkO|tf0#tJ%vlb6Q#u!I2y6-kI1xqYKX(*7?@9T^N?7nn6}aBfgI^NJj%HTT5HQv#Hi?;m{J6 zU{#yhWjpn{;UM^^x9+C9z}rrCh0*SZq0ci;z=XcwGa81=LvUoxw!?Sd`~W?Y3JHDw z;wFZ5xUV{x8;ocf@sfpN{;XMV@#Hk-TxsgneRr^YdLkz8oBvxRL&vY<7yx+aVe?#S&L4~ zbK?bS!RC5x_NUSMoIaj)qn0SuN2}SK+;)|VB^~LOvLZud!Om-L|0Wu4EcSPuw!ZL` zUFo!G_79K&;vkM4Laqm~l)R5FEE&96hH5zKscjYI$HDgDPG z6>~fqey7HVRN;ijvR4bPo2heK)ICw`p2M^?Pf9I?>YB=V^GYkQi<~ag#NYu{%;SP< zSj~oq>JZA(X9j%czSYY(72XG}MtP4~+Hb$PEA29kTju_B{IbN`>ZT`K!7gq+eLm zUt`wsB5V(GwPDYr-C;pFBLTcQx!I`e>J$)aUJj*FDjG7`!B!E5_=S~w$C}KLSMRg( zxG}_upZab-&>wimsj8Y!n_jkDi7%z^S)SFxf?WgYL>V2AMuA0|#yxF~Zzo1mv^Lg} zd@m`k=ek*?ITTaB^Y`&}Hifq^XlYoY9ooZ37YM-x=M@z1Hhi09aoA-UzTj`Qp@IYK z4)lCWRR35PDn*$X3$-M^LVH|A_iqT#nXNb4l=~bv&x+Gdn`e0qL*-n0XaQ|V?$S%I zwW;z%w^gfuUz@E4=|=9`z^)(sh|XHlYP9q?u#Ii1;)Cav3=sTjguyJSpNt?h9J{2j zXggM7-|ahCeFNVDR^F53T3KWW7YhSUDg<<1AJH|3=2}EnmoM2RY5y!fX;Hx~=Sz$<98=F3MU0TPREUyo?e1uHm`AuNPTg~3Q^Bb|`qG@Z zi`rE~*A;iMIya<4KD~{k6B&In&0rP<q zH9s-O`+z=$Rc?^gUvZsf2)pEwDp(n6+xb?pl~%lM?eJwCUuBVGa}aV;ADQ+kujBOk z9?43fn%-9gYe1*z^EQ@NSE~ocZ_ci#orR>CaNUh zH0I%5(|X`A0?u3wIqD;gO$O#hexSisfrJ6Jojyye3(GZ6XP(b$JX^~d*HbqYQn^-m zOQ7TCgLC!@@-q1}B6U5 z9VG{}3%&Kqg?(EBW?N3P16C0Fj9-7f297tSCG;Lo9WqRf;Jr|5^SbE;u=?5o%(BzK z56~7R@bbfY0ZYz4U|>z_c?t|tRk0(9DI67rk}Qaugn(mNlnc>2cvm~(K({KMc-a$9 zI%x>QRe@(rRw26*tFVLffVS7X`~##T0PkY11!hga%m+6xs7RCGqTjDe;GR$+1<3%- zf+u(}2NTSz%|eG6`RMY*!Z^> zc6r#y#60L+?H=mc)Gt@ctrq!+-tG8a8pAP2CLd1q;YH!rUX&$@d*y)sNjp?0p3Mxk zICV(XYgA=uW&IU4b8GNCCxe^~oJeKtZE|Wb<54_`GN}ztdsp2K^ms1qwjU0wLKdan zRIQoID0Wc#1Jw4AI{HOnQBB`=$+&-Td)-z`FyfVY^wWXI5^I)>)qXOs-=UXOOZ#6` z*hC%7>6AsMDXk;onIqzZcj0Fjuh+b!<>#1M_HeMbi*wZQ$#gl)_vtM9k1fiPUpS(E(0>t=nU~SVfl5tC5Uu20n?FM>dEU>(02X&&pi`Z$V}x?X~%lm)57v=@3vG z)X`@zJxRV8<59PD{XdLJKo}r$^s~~kCiE3Ma~&==cGli|Ca;Tp}&G{?Stf`aEmPux_^3y)|!vWLRFWb+smbe|2_>Q~-bP&KdZ;D5Iy^D)s;Gdg;?iYj4 zJZzBbCPr6(fOxXYUu)E5K4EShy7U7S=>61EO1dgF{aKHpB6j@5I{71#OTYRXhcfCp zaY)%HEV9KQbh*9Xz)h*_fSYs{K^<$JCFeZG++u(zO}U`{(z=4+UE^C;#_Wg6x`%V0 z(r}icwXsnIA6`z+K5x2Gt=a?}6*F(Ca5c*v@|cQni5$N*+pbX|pl`#qB@_M4;+dA$lS^86D(L9<0TNnVzP3;CUNq zG<;s#gtj-U&vqrVJfw*Qx& z`9I4#|HFbpW7YTk&^k}*>{#Q`+c6fY45*ijIQ~XNlngjUOnEDI2=xLjN)j2^a(5Oj zuY9-+b2EVDxlgKFcF}ucwgrxAYEeZtcMOOwyZzakR{spBxxoK|JHs&lr9kbUzdBY{ zBOv+wM{Qi(;0CX?BoCIPX2~Quu4^vGyoIf-ryUqwNB5)z3jxR)aPVxIO5q7rc#g{WWT7H0XoL3AUzQL8KDQgr1MTw~{1(#ze zy>46w1Y;Bb^ufPS7@Pk!owM5gcYob~fmZnErSWfjOz&U6(fOy~vb+s5tzVxp4odHc73)dJ8IUkRe1uLJf7Sb(Q+Oc;ow6DVg;r@&&RXH^7m|bN0Tn(1i9oZZ?^Rz6$X= zT*;MGi}5dA0YzTcRpfR)*$nA(V-YU!)0>?lUYmAQVW-*Ln!+y}$cqEWb$HWHLMC#c zklLW`?LVhUFn;*{x0hnF&7Z0ztk?j&D ziVn4lzEW}5WPi2#z)ujybwnP|oBuj=P$vZo5Tjb*Gdn#%J$AGVc9f#M$+Ez`?zI+S-6(I9*jEJ zI{Et`HxC&G(zyCHvXIpyW1E(c1;^jBhalv{|Fz8E|GyGX z{I{{b54>f5jx`R!tQ}DhIfAR(muh%tp3kKDVspkJJwAdRJSFLO+CSX`-6NA@%uhp= z*7rGEJV-_dt86IbtMT2{_&fTVg8|&nta=#FXfWL9Hgxtdav}KkbC)x$T7Mx=$o@i} z`~W@fp;8*mnhh4#<_6Y1!}~gxsW|4ZjEE9=pDdWJCqi!6F8LC_{xD}Kpw%eZAWR`W z?W}lIN)XvALERe7jWHFR!ds086!9ryBgU9Y4%=S6(X4o+Wj(zY!o)q38`JsrIZs2@ zWmDGeYgmpBoti2Y_cHpVK$*wyuYf#La>P-r)UjG=hTol5gk3{~4kwB;wp%)@XQV{CM+Dr2U>jpht~^$UhxZmknxwu{BM^PR)G-#k$>a(8p8aN%GI+DP8!O<)CZ9SIUnC4 zzRhcbUoY?(h=xXKoLFwI79<2v1vb`~F3|IJ0arqhQ!Be%OEUO3V#UPsx!koUC;rjT zqH{D;}!&Nlg*|@W-X({qbc+^dSnI=GSVDXX=Z;o-V;%D1&Pl^Tt*V3p<3Pnh% zYaU0pOkHEReA99)z$ON4@Yw#2abIk>ar(?;vYuxNIPwCeH;`~z42!7nb`HY z*G}hF@XRDsGg7>&!tEGux5-=Qqr0&9sIW9YUvu~v1>1(fKd4T3fEEpDH>@+)UwoLu zFX6L%0-8jbaoAkL$!1BOwLtJsj}3Z#6M>r9v3aFWQ%AwNDy1EaPYX+)O|nwy?!igF zUx<0m*_zB84c!{sl887^bP16axpeXcnJ!u?XrX5(_Y*EFg4HH=vD|i6uZKAfnwFDY zhORU&`Fbs*@6jc~YAxct_Y-u+50F$e-djud0-(lGs^fH6$_Jf~YkX4o&Yxc|CMOwN zExa%~Px+=)j9RxDJfVg*SzQ3n5k71?-X(5KB+7wS1Y3F zdY!@}fotYls$lsLVa=1kXi}zuvV=knxoe=GKe|-2V2Z~_1&$*Mm9|ObWOBaq$4rZP^sq zn)Tc96#!o0EL+7NAm*Ve18z_E;L^biUNA+)0wSPc$Ss-5=ZLuW^y4u&gapWjf)SCYgjG_y&$a@Qi!Cv^CQ1Qwg|z9IJ%*Cz%BQwFW;B!MAh zpbde>2XKsKd4?sy?m!*4jZe;jt>e|4)>yNkC)d=sprQ*kiu6P0-vcUiuSTX!Onf^a zWSH`(-HLCzt+E9CN6R%9__tnVR{iJ-Ad+4Nl*PHS(0x%?7;bKMEVJcN6sH4;Ia5AW zaL{JTqN==>X{$K;@PtnBXw(qJrmC+++fob*R@<<*84`_|_LOnNY(*VJlivK)bx$F{ z2UzWLT-i>uwW1F!_gQ{hNE0lfs@vYErrY%pye&ny z0GnaQaT3n~0yCKb3^$rC%c69%(bL+Lc>DqUk@^)s6;up@!)A|&5$k}68OibRaVIG8 z>tH%rlV>;ogc9~G=%=9WlKlh3nCsnhgZa=jE20t5$>|Bte}JS*H-Rt;W7Z)N ze7BB{fDI6^>3&g*tArdVc`E_Ok~0Cn_j(9UaLwGQc?Mn>Qw9J0V>!p>A@Rrr07Gxo zfs-`wx_~JE&Bwa(I zOpF=9aw-BL`wU{+1)0kB2kE`N>Q|Mszp@eOIFMY&nuGY;dnVqJm>5@%y?)kZ@9|)) zMNVk4@$7mR=ecg_VqFCuZ<*W`wzjlYpw)iQFd!EE@CRu1Xi#dC0kU$Na=&4i!Xeov zSo_K3bmb{#;8>Jvds`C^UR1ni55?;m1+N^{r^aT$WZ(qmga-T_&EZS35Y7T$>~I_5 zZJ;~ymj1Iywy-Ke(fdgA-`*$3RzaL&S@T;j_T->wD#l*);eMAWn96vin1_xS@-X_f z)0Ocsk`IPUwjdip1LK8H49{O=Sg{O_;7`RJA9=64Q2!n}eFhU_ll2_s3G4vcqhuHu z?Ce+T+zgG0?XSA<>$+aer5*I$Ie`}r{Qa#vhq+n9J!fy?9W+F9vr&_nGG_gFf>jLL z-XqCE!&1ZF-!P&9Vfhf54Xkb0Fu^j&q>qfyh=6S_=j~~OV~LH@a@2dnlVQIF*9d8< z54_9{h&C{}+ry;+KR^Of>c<4koI+790kuIhWC3KBe#=mC8R%DIQL{*34BE)iN!OiG1`ua92@wB4=8AeS4C6`{Bq22E`Av5emuEr!X z2OU8?U4`TKW1Wc)rk<9lyKZDx$l`l`-OEZlj+F@f-lJK`Z+8zVjCDs0HRA22%WdywS#L2N z9MXO6@~ZohZkn#pG~|#5hAn|9uVj-p!Fr_ZGj{v!Cec4YV`_{W%o&#In}D$Bb73|^`-FR| zaR{?|8P`&-PT{MUMfX-=)DY36XA8pLBaT$RkidHihroot7E8ohxlYlTAptxPb-S5b z4*xS{Ak*vXr&LQ+49&*qC>r5UH+8Jmv^ABl^l=|m?+De;WS(6ebW57*X!GE2vH!c@ zWJ&73cai+PoA~hI1TcDD)8Yg)=GTm1HL)MUGZVi)bhVb)JI%%rdJNy{(k8VHi}WOE zS06JL{Q$*7eLs5v&l=KKWf~Y|G|0@gGc}JEH;)L3XFt=!&l{gVw&N>DRA`cP2{^os zb#YWX{lvRPb}1W=bkZG=w)84Bv~UZto?|mqJt@xgrtMZT5ro93^%e+!Zpy66Fg?@5 z1Ea&Jk3G#M++G6`B5-8=f0ymr$}Qbpa@j zeo1Y4>4hmu18zyxk}De7^3tWVQQN;SZnh}#c<1mO8NJyYRg~pjSiyizu@ukii1|D6 zl=1aQiNS33xhRu@-rt{MzUD#y!-&j|HqyK*@m>yWMx-p7BvX2P*mEpE4wIaCwuUJL z5_jyP-XkoQY8SA~SpAwUqDhoC0H_#rx7`Ig*MLoC#DX+}V}P&L$kGrGg8(CQM4r~# z%N@ufKbDJPX7S%pkjQUvLS=6gKtZ>9!ILq}^8-E^&6j?F_&gm+oIgOp8LKG3g7@i& z;Q18r_I3YVAS3+@wh*2OrXmU3;erS|{V*Ytu#e#}?zmxXjpPIDzb7OOe!cbhZX8h9 z#`_49I0+WVkdPK*$**8|Dw{LR zj<=hmA5kpDJ8Qn%rDd=Ea%<#Bgk+{J($H{_+=~yzw66cYVOymW9!7}!hy)#%oFa0; z4%EarmPn8Jj%)5`e#CBn9K&lbya8lk|X_O?W%Ky;nPJ5h0Il zcmt)zSECr0=k?IKF`~tSV6@IUQgQg14v9io-Nq!4zQ1!k0Y~&FGj6SUEFtIPaB$cCJFu8QS6!N-^ z=PXmwqDJ)w9_Cej;dXkVWPPznm(FP4DXU(eERVoJG{Z&@>Qj!3jIDYp%-m9GX)={I zh?}Nz{|88RP&PVUAv@YZikQBqX7Xx@slI|sH`aCe+hIrgw(-<-Rmz7eU*b1Dtw$n*edy&n_xq3KnAqj_)dlT#EwIMQj`CTL6iH zo&VSc)!m}1q3&X5U7ekJJE$O>=5QEbGtV_=0GDB&WU03I{0w0s!TK@-z8Xl|RB0v7 z)V}-xaE==5%L=n8M4H3zGIe~gGu*IXm2F*)N}O$o%?G>FVJnnzL{Dw-fl6j2z>T_w zcsZnoc9g*c635E>N{=7=qykevNbGJDoToZVkA{f3? zD*e9pbq$|jcZK1?=3Y z(L&eh>GMb?-3J}}?2D@=%+Sy^@f|KB!bt6V>!TF;$%F#4@(@9q%{OUDPq0Vbgko=9 zqi3S?%}suld7aR-K9k+1rG24!&7CRw)7@e$?2uiQ+UP(L$iLwAxB!|FYNXlfbmYo=prVYp&lK*N_|q zYQkM}-mndL{!cQn+6&Ow#H|+TiLwi+7v6>F5mk{^oEZ;hW8_64@ zLIYr1K}A>t8$DOq?!-?Eknla|g6QPJ)%@30JJ~Vmk$E7YM`k`QV?9nD{)rVS^}bz_ z(?DpF+dRWmoI{TrE;6n8@!%ET)FNRfwoTeO=?WV}ZuVgBC~7($`{Ae_ShXfd<*t$_ ze^xpumY@EfQ)J$?!PQFf;r@={3IICPCCi_$4zUb-9IbT_&m0?WSNSnnOW8*K!?IW) zQF?5pEEt@l8`m_-l1hxs01|9vc67CEiPf7D)@;&ly3Y-9l7K6e%U$IQ&PXcU;j1|v0i3~6I))1Hyk0fxH7%R{rI>>QE}hzOr)T`nGz#D9BnoFwmS zF~E-Ad}l9&?t=#URnR$vGCaUeZ_i!?GIa2+6r$XxnSGm>3qPa)GIU!!IFgFcXqUAG zCTMt6Xk(Lr!(XjjAO!)a#z`B{K7;{&1RZSe2R5&01j@APa&jf7g()*~cAcg^Eb-WT zxGi#Ib#R@i2;SX+9$vYv(jTCbF{Ub%wo}Vy&S5q?eiEEuGFXCjw3zCPXz2mhza(SQ z`rPXU3eI106KWa~Pr7VDX)JvM4%sDvT6M2j1m`H;nm27#7F?(xP`-O~6I)RtW3>h?$bA`h3?pAXQeVEhqtD zF7D2chRI0@`f=kJhr0N5lWxpxF3kNT(@h2fBVH|MY_;J45M=NHmLNu!E>pya3wn^_ zylj4nF00Lu?4o>FM0$+UU(lFN5>S`kn$^ zjBG7W6T_8gpY~UD{v<;$@Am`-WWf9Kd#5i^Iv=@x57bd+$I#U%IWS|bwmK2-QL;R2 zfO0qK{sMIR+~D9lVZ`-ZQ7Qj=X)R1dyEQ#zrh8jwC*e;?q)D+y)j2F8w zK)>f0^k4z{NmiW!R}yGSs#OGk>EDMkv=1C;B@ySZk-`OYGhrpFn~caKvoje zkp6e>?!ALXv5(bm;U`(#fxm-+W7n6lxk&b1szL)(!=wpz(!B*_HMR-U>_)l*#TLLy z>~|tnr)^wGL%^zNYn4OVT4RF%(&VrSU7(^cUs?s%f(c=*8v7BEn_Fha*o!9!U+CZx zVX}nwX`9!$lBq*XX1_^0J72)9>ORPVyI$wC?rhpmvi0xK9p2Q|Y@foD@|~=6f<$zk z4){j+P_^^bFK3sg-%(A!&D-Os6W~ji*FDT&uOpq)?VQ&7Q3TqBBt&y;nr}On222Qp zP8CG+xPULW6)4PN7ijUby4rS8H>zfT@s&y8;k$`mcj1+( zWa0z1D<@=><7>D_4a>1wdrv(P!Qu{w_r9$#c;AB0>^6}SECZ-AqYK0Wt;NUS=Zkex z8fcb_ov38=792_Qh4dRv^PHfWfojT8a^u9(`a z;OzDA%q-&i3@K0^>T4OmltEb)Ib{qX*z0elhct*|3Fa_tiLFie0(T+>&Hb^g>m49& zH(8Cq)Tv37+HDqY1hXqX=B@0lOxRJV(%~Y(ZnoJEKo#G4upji=Yiq9qVSpHZB(rH6 zL#Ts|M@pIa5Q5}T;zx1gC#4&8}Z4h0-b6y7O1_3x)t|_ zhl}bixwfT`P>TI??75fh#tvaBwe3myiWF;$z;DoAX7nJ|M|rQP0Dkwg(`%=jw@7f* zD#lIYg3=JDu|`x~QB>3HRX3`DQh_4fX5|6ZorYv@0z2e=I&+nT_p5lbtXexnvRz%? zWLvWI{fwA|_&otP{^4Hm%VO$?k@1O9uoX)x6t=2C>fQz=5&RseC$5pM#?O&t;pnn` z@tiF^{4la-1=q*H-jWH-Z8ZGHT3>qj0=OE47dZsD8j*$44dpi5?V!hFZ6^2DKN)jX z#bp?OiwxrAi2JHvIZ(TU*KkqP*eB)HBsdz7-L_t4yQf(P$An-FOAsClozAuTuU z{b+J*EfD0t{c}A1TU=cOKM8bH)d6d-rr-=U%up|&bLN2a=_Ly0!twP)D_klaa*E#% zaF>qK_JAm5^j2m#AKBl2ErNO<&zNRp*|CGx;HzA$*DVMDye?8=rxst>od9iSe0=M2 z2BmL6k^!DW&PMn15JEFFTXK_%LGo0#`nU}SNR#(Q2lna$c{+t0lgQk9{A#cuwy3PJ zCA8wXARDJAZ?RrL{lc8 zl~r@I3|^i8IbJM-xa|jsOW%TBaVtxdG?^Sn1a+>_6Azb<9Y-#9s-sju@M<5 z(MLE_da{dl3 zm=XNG^tF1%p&J#Ss`E3MQnz*NhW#s_db2W^>Yx+CGXire%l3`utGxnCRh+f>0ihCIa$)#0;>n?*(f(K&Hs z(AkP~!w3*NHqO+pKfOZ|j3qrzkiF_GMm~ht*n|+PzmW2<1qnuzSnI1t%A1>G+rqTm zPG+(^Nq}<7L=oX)M1!|}c}E~rBL-Q=%s^5Pe>v4+!B+=f>aUo^cRRK(hOaw{Ylyb9 z+mr>LTWa-|T<|TziZCe&DkldO1gQq)#&qA(@lNw*=NcoJ5-Al$4mF$@6052YTxy77 z`Wxa9hVjLop+WLb4;}_RH*{8dQuZmL-jZ%dnma+!wPHmBfOOkaVdM@n-Mw@r&3>I? zP(^_`Y047k192f%z}DXYp2-zJD0xx(Jp9=p}#C-sb=?$!IMed#d1jLkZ zVgkG4wm0$QS|}0l%WFkBnMr5{0v_;^ENw%%$d|as$ z0s)xQFR{DfjtVf>Ub1Tk+M_lhbM6)@k?bUky(Ln(7OBgY)Vr+*hnl`1$6tn#cxp}f zaD&T?M1Wp4bC5bk1gKee-vcqOzi}MVIgFRBza2(Qh8^CMojN>1dondZePN5q%lTQW z34q8hH_%u5KEywlXw7?6o~m`)cEz0o#s8Dc39{8{9?{Hv1MIE&2Dqg}(i3zgooK6J z;x5!T`zE7NfcWeI$GK;YLdYSLCVFrUBV=B;U+M9fUwPpzZG-TS)G@t^`-uaAc%KyF z+}YgB*Q4KAbSG#bN&|EFQO}Hn9YcaZ-Jz9ITxr_npJd*gN6hHV3jIPMAdzCSv{j#Z z3FUf(d?n7iY}-v*DBBR>Xb!2_({AWoZO3gZ8!4R%5 z!B;#3Rl)0s!xJh5PKH$yqQ!fbIAe;#sB4>zH`|=ja=jJ}T4@W7Dx&jM*AjpIsb0)0 zGHBr^nX-FthJ!Z0<&2%gi&zU`@>S!@;5VK!1&%kc?&24A8k*j4z%tq#42fc`qEOb- zc&dJ2E`gKKwK5L2&wdyQ%~H2D&>GT;T~y{Ve5nTart|f)tA<;Intdt;og1lf-`7n{ zuicz;tFmC*rhFHf=ogh7BK7$397)FAps{mi8z@O#J%t78f-M+zB`?L=*S=Qga!9n% z#*gCDCRC>g&V)XPwaJ_8JNQ4&-}D2mftJCbnV=8IMIg!9;&F#qZ~FqVOh~$XIP@@C zzIg_{T4W-EFFQp53jDx`AsG=n>F~R^|?sW+*QX`IJ zMHZaSxB)0pka%_gc*&noW+!7=WENN#iq{gxwj(qGmB}p@Au^AynmTL7wd9{BRDI^ z(%dI`Tzx-Kp11My8Yep+0s1wA)H&=41%UdUYv7%Aax7qpMGFCe1H~^>@rV=LB z>1k|ZMs#wfMeGHu1}i}&2G#hM3X;)MqqFy+AJy1k9v~eyu|iU}K8zP9GPgfCwYGJM zIOk+U0#6$*W!CWz2H)OkGG-Y}%{|KA0Ah_IE&#%TO$K9oC(V*=welwvTqLA0G znmzn(&~AcduaPai&ERvtaPnE%cP#T)33yWcDwM`oG{VthqIi)+5{&|!HjY$@)HX9! z@TLx*;fkSp%2dkx%414`*{spDePek?I+0sTZG{vVL_*$LTIL{+N{9)$%ikfsvItSE zA2jRDG^aD08&h-3TP8u76EQ7iVZl`er;N>@)dy7H00!5l=IuU}?l)!t^aCTT@dhuL z4M9W+L7O`@bxItcYD09+rz?=ZQa8`z`xC?F58h9t6fRZaGS70gy1`c#G_wdw56jU` z;`rc?=;tcStf;Ne2Vl%@X-LtevFX%U1;ee-yGO59sI1I9KcZ4bzj-D7AVN{@xoMu= zYb!PbeIIOp1d&aWNvdl1O(kwLLAR`PgvhF1H|KlHMhvh{nK1+N%s>48RGDmjXF zPz=xSd*~fe&XIFl&#YV(A@)w5zV0J;g`yVO24}5V8M2kzTw7YP(c&XF>nUCGI zt1sBH-HC$5nYHx7T8ClkTTs8->E`DU(OI67+FES=$T2aPm64Np@7dhVL4D|&I!P+T~r(zyinWb-25Tkbt+?$3v(n&A z$yVU^wu2+xBN=bEkA}dHx0O=>n7w*yg_<(V} zi2wLWR=a-ac*yc)`$^RkVMO-$=>G0yr@N%Ua#IQEzO?)o`=+0>tq4rbK`P0$cOEH_ zy&7U}YuqZ02~fYPeGxwOd<~qWH2{}j^Uq_L2h_rjBI20Tr(=_M`-!VsUq&-)plgoM z2a6iRNJuxz)X$3j0Bf1F0YE<6q@=sba z+VD47EbBo11s%WKl^y@s%A%gHsn1!7XgUP5K+lKh4eT{7tmwMj8o0g^_>+tXH8o)i zqleimZ;CExCELM(2D=B}JU|1PzIjk-5){}Yi=%R0G>Ox?RH3Xoe?N=W_&XU`dbH$T zQdv&5;RH}Ev`P$SQiB+`*y4m^APYET3^!e$+2Pm0-W~BV-%1Mnx%glLV1I`hVrJ3| ziO;r{UD&5#3vuia(&y}tw3bCwX;sbsVOc;ZU?pt;Gj!A*FwtJmgFFsV}4B?6* zS5hLQ`l8#&#>xU8N1}G<$|}9>F94u0G?T#83#@QpTmVH?rBDNznmib?tf_UjNh}N` zgD(PbW}rQ1zLV&X1K^3hb{ACogX*HJhjK~`7D>FG2abodj^d6I?Po{3Xf6Az=8$)JYpp2uOLM@w%->eiwdfH88v_|&uSu!+Y{kiL;OAH|&2N<>T5!BiSIlpbGUxJPH zq|>4!i=2SK&`ilUsX4$_qw$}GEH zN`rXRVLdB0Q(<6LE4z^zdPG#BHGRrUtx$Nf`i|=>s4=* zPOCCghG(73&RTIRD&;YoRKE|@lPs|bT58nQ>6;|-j+f}Z(Jm3;QQ9y$T2EX$R*u&2 z+2hO-x+iWmQZgR_JNM@I?&`mCTi`;GjQ{rshBvbdmYVYh2l6uGGqX#$JwUt+9_~?@ zE6nOl;trYHo)YCOulB$sM2VK&j1e=W+@2Z~)v>~b&s&1FRSPssM3oRM^oRI4_B-YR zsqCk5*~<NcgYk^vnkM4_OY_-4p0Pg%ym=g zI!JX8sD6?a78@TXP8tWH0=SkrStO;Rv;5zEZzK zTLU#5sN58>T2{%tlf5@Ahf#hwfc1AYVBM-3Vc2Y$@`a*GAjRgoidTB~$0+1`1L{L9 zP7&?Idp)|r-#r;gz>Y^-h`nOF85>QtL{63dJ?I>vaTdT9XI8Yw3RjT5zQlzIT5a?8 zYk!_wzjDF<-tFiJ4qDHPc`wg(fEMY4Sfs0?WFMAIR%S~uTvl;=m5kSm z9bW?E_;i;^Sa$k*Ck_~i_B+(!*L%&57{E14P^;bLIds7;CFjuC{W0T^n z3N>2y5qsk;>hnL_IE6&#v`d$Eqv{Y&Bvxj#_M;~_jOJRN-Aw2ljxQjIk&GaiQ0Wr z$ThQ>O2XUW4msg*PsJnRAOR&%p&6Ez|vgz3>P*ZIMXMq#W+C zQo@ccn|?at#7WIXh=IClzP{`W?-&*&%W{Wh07*s?zSc?DVd@u+~%Ctu(J=M6| za*&);qnJDCZ|b3=6_9xfH&gzTY+7)eoOrJGwXFi3SO`ZXi2DdY`@)Vv_8y+wK!*HyP)1WI$iaTv6a2QlD4Zu0HcFZ0z zTGK_HS-S70B@L$A{C*t0wnB0lY6@BMo)HjLElq)JuDDna_VDYuv^G`pW|@o-YJ*g) zi;O2B#=a8L*`v?KUhGbXOI>Aq=V^%=$X}M0D6{fkg!p70=!Dv_-9uMxmj=4M$%B^(VfpP{KXs#|8$ z&R}C$MWOBD`&l@K#V|{SIpcMj;OgKa61(8zlUg>^V~43qtizfH!CgmJ{t0u9+)6ln zMS5)x6uN$CeqsOJw(od%&4yd~hkwHSlmpe7e`GuYL!bUBsl>)N(0cCsN}OG}6FDk` zZ|<3&;IQ1P%^;sXEL`}O~oKkVUx=S?p0fMVGQ^=;%K`?sB=C4po5l zV8E0`{?5MK1nNB&p7TsT*JM zq@8BAZp-q7iYZ;ZdH;=DexIuF9eklb+WYrT!AvKy`We>hf}oP^zb54PE4QQm-kbe@ z!RY&|>;A=E4s1KNRE=KkJX;``Hk@PE-8_4($~8s2gEd>r|6z?*(^` znGG>W9?zYN%D(p6%jr#^;Apctxnk0$Njw}i7f>ZFl_-kb;FvwPHkkzAH=Gcot^JJr zLleAFE!uS5!FKnH)x(u{I?b=zot_&)rhPKCF4>iHX=lfov~oqhVQ{BC9S3taTq`cw z!|PX7!n%XqgFA1d(8Y#n5NhdY%z>W}^3>SZM+M))0~&vl8P+)uuK=iuds@bS)))Ps zeg>MbQucmSn8h9T^V2+<3F@fHW&%kc1-Zg3VcNNtxQW|vRDNA( z4*ZqLJLFlAS0n8eooSlY+)cQp-@vOBs&UrP3N)VDDdwg%I9j|s>R9#u+4ZUiR5Mn? zumX_Nr<{;Dcom|!RybyFcUQc?LO{MRF=llFD4+v~9^I(hUxNkuSBVzw8fN2tg%fYx z+PCrxbZBRLC7w+HL`0}?m4u+l+@^U$`$hC~D*#%NVK zu~5x+|LJWQyIty|pi=<1VXwfkUhImRFi2{y0ZuI6fi)9|f<9zqWREDnhUfo?hl9NP z2>VAujrg#cR6`-UE+jjWPz<8xxD%6^NqI8=3+)PD=}_?>+~Gf1Lij^CWPwngd@W!h$IhAi%x>l@7yl1mo_mM@>{BV$kiXr;C zuG%&-8E|fKq{&VBKV6kdGsi2Wy-zH#}sP{k)=I+nR>&PB(9 z{0FqH@b>q-PGdK)96W1m=W!xkWv}Y4h!E));DCwzTL0gu7wNNVsa6g@+F3NaiH{YT z)Ll2s&o9sfaZH0ex?ayy@GFg^4oN3$1dEQYj3f@FDtTprUqMre4q4In5<#&NhRRGA z^R9(I`_}4uK}l4wt@(`66Xr#(ipH(ED7lc;G}+WnCrx9y_9Gk)b7{p=vc7n9oL z5Fu9jtz~@!wFL#Lv#0Vh3+-?TRe4T4RdFsPZm_Rb2Axyg^zA+YZSMzj@z&(s+)sfi zt9|`Hot1x}bYPYj0-bZ;2G)Z$g)Vb}hWqcZz`_C2j?)%|2sHzCRer3Yr=5&S$Nsg&wSJqJ*$BgrF#{q}!@>^}V7Dig zeavD|nhv79%!0QFN*V3B?FI7Xc|xzV-)tl=pVm}pYF;FNt71|kgw4LvdK&XkMZ9Kb z)|brY3)c^}&t&kg&v`S;A8*r#$yK)YNcX|H=-aP!CEYg=Ie2p$(pG9>VKOzEGo02V zDY0`g`{QR1%Qcm=&v{C1EPyg0rU{D0y8~+k#R2TAz6QW;I^g?6SK#}xn7c;$o zYK0p;E@W7qe5IL7^uBDjEBz^0Id;=@<|&MJqvmXaJ6ZM?%5U^%mb15*u=qXmi(8d; z`>u9Bdb#xNy*U;Zc^BW)o<*@!CJM3-H9SwdtHXsQ-<}OPsd-}cIR%05q@SRK2(@k8 zn3-8~@$!zsGgsLHkKo4uE9g})!G1nxzJyq4Zam)X?tHCF{X(cRTZ@bBN&5xQ%_G); z}0cBBtQNJuB>UaWn7!2S2nygZHzjQ@VX|9-&#Uzx#e2VuxLJv4Z= z&6!BnKMg;k6NloqLjmR$UC}>NOaGH21jwx5V zSkNx}?&PO+@su=^X=iLqFiWf5hzb%g;D)Tv)OGgt^cN3BBuS>_xe2Fd*9GcSwu9gc z_sS-HG8MSS)+;`^74+u-tPp}j_RDi68AgwybU=*fvvTeOe*Y%9f|B_~(HvjZNiA>U zv-+?S^Pviz3WILk#1Rn2nDd z_%%0I(F-lmilK(OC41c0mraKBo6npP_JgIHPc2p5L(xgpSOTkeWBq}WhTz8qfG+#| zf7I(Z?pXOP?%<5slT$Lz%zYS%xsmW~mY2b&PApYgxgeVcr-4Z$TU;B2&q0a$a(Q8n zlaTVMwtYj+LZC9yPH=E5n)p6C)jB_MCsyQHw92>rQjx;G#LsrdP{HAVT0~;{@4^?} z(>G6FR#;LJat~D=s)&WC)QKt$jcK}pf!J^bH-v8FdELDqsLW9IdGBpN-ZoIGGRdFS zg9v$c5cY#$&W$}<=jUn)(r08yk6$J7Xq8Vy z8T4u{x<42biUI!)=<_ve|)Jz;or8~_2>a|6kpu$rl2d|CRY zwdGrd4vwmfu;3>H{Uw7FOa>VOrYq^FELOn#D!lo7D;~$k8S(>DoQ2>3cscYGPW6(f z$3t??-$y4XpWnH2OG9^if0l!2@pqn6#L%cV zD&u>1%jSN#t2U53YMA^V!jS9R*Z$F{t6gq3iig#^=>YfOpsmwc_On5N4dnpe9TgW- zd$^SuG8EfufoerFlShib(>3&0&HOl2LixBR@{G-CCvfdj9M!6;NwSoGAYaZ5@lvsoSg3_E!d4 zK?O6d@j;zm=$ZO%r2|ygKCr;Ltd+Hc`?mTt4&_W+#D}CIzjOD+`53qv^Awn}o=0ax zEbHFmI#;eTCYB7xrcXt67dn_m&-$?mthc|eFfCEhLdK#MT=V!%m7^DoTCPR178a9p zzG?@Vb)*}(splHOU@*q=SKacgSK@ct2Cu;%1$cCrMdI`k!FN<_W>P#(SiWjWin;gv z5l7T(G8sw@51{B3X~)~bYq9f`G3090jkVNU<8`Ab05lLUbd*<3NnTiCoEV z0`7B{jr1$g4Lov_|9yOHcJRBT0Zr#EkynQDnNHh)fgR4a>JZLte~Y;-MX$9I!103Eq8IYIVZ6iTx|oD-U;ZcVY#n6kV6Ao@ama zeTlVzIlZ!?+DGf_@h@-cu~FGakKGw8akgr>pA)-syGOg%)s?F!+%H(To#{)T^lb9$ zy)q4ZpE7IqPcCOd+#_v}W96`}IxWT#lW~I1VNm&4t3a1X%ZqQ9e<)_#V9r*fnh_N7 zb&*T`jHTnMH-8c%G`!ArT8MsCdvJ5E@~xR&mqCA4P2#O{oaFXJ zCpvy?_Zo`wCZxtLCvwitw%z!#g9^PjFI!g8!TX(^Rg{5KKEYx7A@#^%?gW|9qiyav z?Q4wZvMk9co8!#E1WsnvZ3;~Ndt^nAyaIe(2apf!^ID&%W?E!i)$FgA$~g>}fId~Y z0daU=$~)o8l``jCKPAJaNiLr9&1-1*??`tt*=Z>L!SLUG^7eowCV(B9jS6^{xZ53U z)8?|y@%ps)nW}<)Q1>C!Y2_snVG5>M|y^O zS(}Sk^yh^Pxf{AB={Jm3)>2Pcrp_{L?yt@6ln%B@7*vC48`+EZx79vHY+v&@efx3& z6Sk>jcWd>lweYO8xUyh1jXHHq7ybBEfx3_NFVa5PWXzO~`=~3`4td~@gq|=kyCqcE zZdgYuo@*K{{Ni?7)?L3RY~F^e*80ui;daDr+4AXrDXklhq7?h&?GJ)GP&p=tdZ)dD z_^a>GGs>#^(%wINlJEPSQy=FrKuzOZTWo-$=O&Nyx){}48@wKJWjC$P+p*2JO?t4x znbIzZT@o_#R-IvV!6EFxOqJ*8>ZW90d)$j&o;}t|ki&Zna?TS-wu8ohk}Y!$4;qRg zpdG6}kT_xuupX9berU@}Q`WTO*zez-?th!7`>)7OtX4*6MmmIquBA{J^-2E zjnnt%mPh%hL{x>rA-C4-NpV>UIc-K)gqnA`dWT_qbWPH`2wpA9y;YHZZ|F-IPBn2u zIY*od0WZ>;P|H^EA9@^%5wu+IykC44b<>@8H`bpw1!}&@c z0KD_xG$WqGwqJazYOYOgVD(&(5=v7%kP^r3OKYKY4m=#5AS=(f(9W8DnWI;N z*>y=lmDP4u4pt0FjwkOm>_=-29h3$IXgI%cQ>hy!j)}m>9lI5iT8^d?BQ_@pjk{EM zF~i}bp^e5~C+PenEnOpyMm{+V<1C<`+2qWNNIpxl^(7W9$WECrs7%C zClLzGy%nciFC(2|7p+I6I>HTAk+|>A)8k6&MzmI%`GP_TLxrOVH5!kBT@DE-H-j?Ghu%9ZI|06=^D%yvG>*c3e&dG znwNLufgCp3L!QgQPN}iXc0bmG=0v>n?%}txR9F%T`A$*Ax$2)=`=T@QM9nwpFT3;;|7>xseni4rH@k4Tu08X>fy27Wb z(;_Gj%lb*iJ4aILdu`2br=maX30bJ`P3Ft2(|nlupLS{fxr4J*w;8-6b%-G$OOSi9 z%qvSDC{depM{A&rO-V0rXd`$E?3RjqP00qXiZ9zd}}XNR!(l#B<3>zv@)x!KfYNZ*I-^wg+!i ztjbUg3Ix!qj7pMpJDe~m}M)WGQYrKiB^uO+F^`k`1 z=t3tWw8{k06RjO`@%dJl$~5pja#5M{g2{h*ui)REC0f;s@`*>~Um~%P-~FtGKHK!= zvs9F3<31Lvn<UeV?W~)OnMM(_A~D-6{KVUH_bTD?d*(4luwyn*Sa_{~kgA9zlOH zW&c_mv=X3kyyAQrpoT35T{;W92y~AZ0Db@y3C59?m)VPJ!avPte?IGYD}iIdd?!AzDOX-Ib?_P{Rn#;M#rfgtj~DzvmC+Zz>Xw(tXx)J zKHlt$+JXT6si*H+Svu#Lo)gMCpH;BYK9pRpL^pL#nOZyilL|}r+^Q?Zno7&-nczr9j!D6onciCww`Qr`N7*?2;=|=uxvJ5=20MTuqoVacmRFhAS-U+DbK7*SyYD4qVCyqZ;}5Nla1m49qz#N#(d( zUjdzTqM+OD0WJz=fonemB2O5N(LtbY-~eQ25KmHux^p10n=tj`k;-(ZGZ!Fg40CIa zp<&HOE{@UFovD{7JNF@sQ-L6GQ$S77@3x7aXZSGIuX?q*N9$aZ8Bgw`#JqW%&z5p_k4ZO23(L;oeq^_-Mfg{ z1Vlt)#>`D8?{QbZ=e|3kX>Z?Dv(=-{|9Ox%whOvX)di*6-OCRtFR;7EgLwMnjqCoq zvnn%SB1BETTH53>r!%M!C`<3+n^*b^N+=+mOULy6=;-hgn|u>{>dneDF4ix<^lJ5b zDZ3^wMAS-IGcec*-pmXacnwshe-RF&yQ@1O$GF#QV5Hy=CnHPvg$W95mzsU=)Sd0 z>xuVTVDuwAgsPPA+mgUlpb;2H>9+-Rw`FqOG-+PfJSqPA_w?5ahW2|#iD+)4qBt6c zir2Fpx}Q>rue^qRx+nJ1&jl*5At=A7*uUw-JhQ4+mTuFw%w4Vg^vjn{PC2p@>6cyq$Af+}vipQ8Rd2m^MHSm^gU6gT zF*QoJC3W7qXt^j592!b5Odo_+n8MDfaN*?Pg2bvlC;ul_FFg59ae0wZeSI!7`mei| z|K>CO`_}*c)c-x;|F6vTQ!P_F0uJohSxM4!+bt9!JQ+;%0j$@D^TWBBT&~5gg@t@4 z^p(j@eWWE@`Mc}A#pZ@)61x|!o+I@K5zer@J^&@yZ)LIBArIy64=RP>m@er)2~oE*!b(%bEEpS2w8jZ{Z(9PjNZ+U3~)Pjq`+ZngA5a zvOz;hv3#cvDXT*Rt^JV7V%NMeV)S=_eec`2%?3n3_Sn$SXE#iH_-u~#m)|Xp4OA9^ zt>2UojNMB6rxrza@Ll`ycF;@i@|(`FfNlm2s#)S~=i)^9300J6&7{Rcqh*DmJ2>gN2%I?n#P&-d`owU?q)_Iw{M znxZa_YVpEtQPwU}R1{xOg8(tKaiV0205OeNwFLT^Gw@&hVY}GU=L#smwNUo&Msxg3Z@E3W;i7qNY4k8xujZ=M~3KJul>I{`xi zLK)I8yDefbys#>6-}jG>5ZO}QOrPprHpQCke;s>ReRI5pl}hVjpu)PD{>@L3X=XAN zwWTl&syB#N@78(kea{4p%CH|a=4tqXSpMOY>FUYlSIF%aaPuC{fS9)4K>VCMs?EW! z`2j~gS{fn%V&_V_flvr>{xRP4)K4I!a61VnM_e z)36{^UdsW7^|jSJJM2&oJYrIX*tM^Q+B^p=Rs}oIQHfg(H~QYW^UZ9t+c{-y*GA`S zMNMI&{vvXs#tGk&Xn>geYO?WTi!(Gms8VZfbfc_ys9CELywe44Q~@A7u-Zu3;Elbx zF^7{G8xuxE4buW#rODJL&0vKGM6!-RzP>z2y66cgW^;ccJRpMaeH^@VQ}H5vxBti) zRXuG2)W71F6;pj571u}m4#iK2PkaZA+>p=uKOBqqI7*L|U*mq|!yI@wWYcqeQf*o4CuDDic92EqFEn+dJ|oe)I<%5o4ZFC zQH1WLTu!5(WPs`ySFPXrlJi;y#6(*C2--~mGFyS$!W^~1PhTpSyO)1tDE{NVf6{3D|IZv2)`ny`=8iFKKz>$}d5M05`R{&z7L%Q?bIkJ8!+BYEls2vb$txOymDr`M!N`N0B-d9&-=+43fH>&W+j^WH26fa}| zJPa)9^pV6ca|b9wE*lgRG_2f659UjFSr``?7gtHQ3qvH*js!|oeMvBzgK#6`v1E9C zJyO3`!uG@EJJ;se_6#k%XfgLN z4zG)tR}ZNMH@#f^6h0mF|FHL+VNq?{mM99M0)ir$GLR%n zmRKMnNkB3bNdzS497{!VD9K8coO8~Sa}Gt$IcEwe>h$K^d*2hz?bqktzWu(}-S3BA z`D3&9+H21>*IaXsImSX;1MY4Pre_Rhm*@D2<}_2!4ljX&qI&T$%B)rZ;2=*Hv9>gV zk1=GLX2l0zw}>zR=FIh?F9j$tF8k>D49#m_)O5{-@85 zu^jx(Q(xL)wN)K_CAS}}LD(T0Eg`9);<;~p7Pk$C1>^0_R_3F1d?zzFAyUPGiRoIx zwcUeM;`$S#q~RR?Is=Qcf*9G3jyihFDy!B;@@D9*o+|SuPK%?WAV^};I#1K#c8Ke& z^CTyE(#gJtT5I`1Juih@SgK>1s$pMN1U&pGt>yMWdm7fkjmT7>u@wwJyjOTrpfl>w zdBOW@@Q83uP20c-&ScIRK5xB1&XydFP|HJrM4~WDOY?+lim-Y>drT&dW2nk6?#X0@y<@-;|7|c~iMD7PK=ITR3|zp(f%9rCXIHaHofb1>ULYnXx!|KW)M}M!pQ$;+8LTYhB1J*RL(cYoOi1oIt=mRU?2$_m zs0g!GkDU4XS_OXR=a@UW?B{Va{WkoB@nR!|yqvWD2C|DN4{V7` zHj@A0v3OL9*BI<;*_eY z@(cE}Y|4wDm0nvAX0R;bx7tXAWy|*4#=Jj0ZmZd;-ipHMcR9()UQj>PCWnC~;!yZd zT%i1I{|2bMm;Hv;4Ew`vX9c*7YAAuJ_RKnr!(EdbV_-W`mhjFZR(Hv=NALBwJU`tB zyK7SI+Lg}pG7{T50-_7rtdo)qb`IA0aQ_mjq|8J*c#M z#b{4Ka2mfp`Vs8mn7RRQ!-vGLRK#YcttoNB?X7R&bLZfx>PV;FIDQY^4icn*{}WRV zV011SJRqTn0vhLjY<&kx;6S7qfF|=VxU1Z_aRvWo{rK2g30vpMIGOUT&Jp4lXv;s? z(KaD_d(9o)`Ta|SDHE7Mck`P^4m%101&T$*hi(1Vr>W2Ck1I@g#rKygN`tIdzu55s zWg}m-#D8K`hEqllAvVTThl|x2EcHcu8P{E}Q3LDbk_YI$aaz2%7!VPoliZaqRfhY7 zSSKKVJwlJn)hI9|Yn>2YBX#VtZ8KsQvfO2B{$zRJ)`sJ$0sy-WK|oRN75b=~Ehu*A zsWO06-ymyx_5M*-q{du4d;oBz+fH zAHg1jD>8VfAu5+&Qd#*;FsRGIw}Zs?1x@As-_Trq(T=MB@wujhUbT1dWw&zO^HW6_1Bt54xs02# zfdr{xZ||>QamZZt03T_Fa+w)Ss@BPwOeIWzI^OFbGli*F($22~3_1ELHPw6ORm{#Q z4MlMR#+iXQ2fXnrpHm5(oE0>*NE_`tK7{Q3>2p(TEIQHuQu^ccz@ zdh(wKPsyNeRXsCs9~X^mM0dw)4Nd6Aw>PEsvNztCGN>%fgV_VY+Zljf8y54SX@qj$KonucfKx}+5+rqqlzp0I%8$%*br*988F2nT%C?Sd}fv);vQ$VD6oRIpXo1wezz zMJoDxIbnDztYs%ETbS`g^Jj-kP>d?cxlQwCI|%FA{%>fy^?H>R-cNK4-?z{ZpCkdk z+*P^Mp?@1hj0@&8XVL~{5K z@Ws%q#5V=+s5r%zty`YQr2w3XQJ+QWLFF08kmTV)i(ww_@q2T|H9w(+C}?v^pa!Vn zVe?a3!yaZ>q^JNYY)PrfSz(CVs?MCbU%(A&+kNT){V)*inA{J}1k$3eG{TaET*}Nx ze#?uCW|kXti(AS1snqM~!(frMmKC01uCc=|9X~Mf$px}J(cg})JW^7e8@nxBc zt(iUDs93kS`AFJs_2_S6s`lZ2!{fCLgLER+*6*ou_0ClZpv8w8Dx3YLeBWe{RXyjJ zfTTf6@lGd&`4|Ur?CJcl@E)wb)JaxN^d_-@(RR$q=AN4CF{?vL1S3^@Rq5F=9-fY% zcf5iH8qxJ|U;3g&Af@tOtL^VUYdo2bsj6#OC2KcAdV^X|DTshV!$@qSi`x)(bvdm^fs9ApUS#MVPVtg=7&i5IWa(yA^+K zRA@LWi>du?y05`+cfe_*tp*|e4UK+!aeALeg6@UB?;KS)@k6oki_@;SMq5fw;Oj^` zpxh__Z;)f9hXI-D(++-(c5EhlHfP`7yBp1H8a(~F3}4bwD}LP}UXnj(OTS`&GWI2% z;VkN@Rdc?-lS>9Ix<4Y~1{Y79$KzPaaO#?p1Djb){fgXDnLY zQIof*20#FtP!EuOUqunz43E*#-^Hz$Q&Uy@xwNz>DT1H=bfaMRD?KE=1pw>t$K8XtqPh5Uovb!@V+qZn3$z7mmk`Wsl1Tk7NrO*7NMNS4ltHJGj5Q(&9CXc z6!9zh(&j!mY3Qk;s>AM1ofXsfq4LR1X%~zC^c$M!*8`5AUq?l6KF5som*xtw^=d7TPSDTiqg|q=6|aKR;eN&JV81-#?c{o%>iVf0jPm{oyk7Xh zN9?#K;U>|`1Eo$wNuiqc5;#X{muy@}FI8m}DZCRJKCHCEU0yEN_bNl|jG;>R_HYK^ zh=5AnEa>zUICA%^8k4|6>&)Irf}V5x@BeyzAelr&LHK=DJOCl z_ms~oKI_*(sX_6e3oNtDr>Ho>f+?lAH+_soI5B$X7De(LIK5BH^# z6CKr?f233_54&K|xdl{IhXBr!4wbq0)uBb)yFtG3jJS$GlYNtb#s0RBg|KxxLXLP& z{JrsNK``w*Xx{=qw(jL&jU)ViLwgLvg2Dkz!KoCLe6I5Kkd*C-W7gJG@k!fry#HW@ ztXDKw#(ehw6l~e{0+h{D$*nVT!2R^{TRbBa>g(0E$-KuZgLF=HbESXj%dPzGs@Wy} zx(pS#oHiahq2dzTZlG*;zBWBV*KbXZqTxQ+oGLb?BFqbRuMThe)y&_j8V++p?;)wE zio>dK%*+&uWa)whI+%&C7iUC6_rM5IR1MH_5e{j&Pq2|3-{bGEh-Kg;P-su(>5--> zT-x<{OkCcgbs|HoXYqA_Jr&9$bxNwVD|U=q%i!lkJ66lG{M4Te3l#x}Cv%CV;cu)v zj~@#$2%-l|X&VVJ*ML7*E_7{Wk3e?xEqKwVbuj5#*2Tsd(nU5Gg zZPvUQPbFPihsfddIGOT%kEYT8JBF_%bcQng;m6mSUkx0RMfurIK^GH{*(b+HW41G9)&7$KbCJ%cNrdEXKt265 zGclpu=-sNc`l{96YPa{A6h1FH<@6|?I}zPO#612goB@&4V@|)3jx;vA<87vcFGMHi zg$OEm_Q8|hyVLr0@Z+@{R(gEsou387Me9CUfVPmfk32#Nx@l7><=TUco6$Psm)QbW zLnDz=Yd{gDugL1jUjwu4QsXf#!V*7cz_%9fj}X|djY)HtrZF+ z0g-g^0f}5~VB4ecxE)aIcWomPYf;5XW8f^hPb-z6v1(O3?89_zaZ`O!?Lw)gJjM9v zxZgEaj}z|KAWTCnFjbtZdKwEe%xLi$z{jnm+v#|_GmvnS*CVs&>{98bDUz|9Q4ZX0 zMiLqa?d(k`4{}aNxPI2k)Koc8`vPKxK~c5KYDW>_$GuqD$*T8`$!ijeO+^WA=LW4( zRX77xZ%u>t90~oEWD^bQsPB3XU@`yr#guh=+h8_yxI*)&!<&id|KQ+4v}8&+he z$WCLsv{6IOVV=NbR~!Hn{Wgp$&RW+@Wtasf6DERY1-i0tSpQ+SHNo7O&OM>nQbiK3 zpFV}{{)QH_=-~RkJRFa@(45qJx0Uj%v{2hkA@XXWY|~e3oSt>~=W?yQ=OueqevgPw zt_Q?BzQ>e~MGk5*^f4V~k}~IY+}2RlY;q)udrXw#KzUtMOCmRG<#h6V*bjPR_{m(e zez2tDofD~Yi{}>N^xZg^nnOh)av`NU2@(==h63(3=id~<)$u9;<^e5rz;O?Vgx5%Jea zqNH^Aoct~E6P@m@Bnz%saIXhDtg345co~CsvcR(sr*)2Erq^ATM`2u@Jrmu|?+cBR zD}d}Y|J|gFvcu}+lNHrni5?ml-;P5MU$~^!cJ5cm<$tz%F*mMgfDF8i4b^Ri|F-@EnW^5m#7SZqz!t>mZ4$ z1+sVjhIBDbBe=5wc!qzX5Wx&T5CsAE<89wLZL($nWc#ML@aDgYKZA>j1``y z&7<5oa>g7Ip?7^mG?C=FKKDw91=VK)Ul8Y5x`@v)Bb1d;45kvSyKUbYJt{WCT;{&f zq;2}^q*`~`IDPQST18O}b8tpBI~)?tQN2ER+$Zs=cg7k{x!Ye*JBu?6o>hU?GuE_h z^41QY^b}HYaUxKrv+jx&u3R1!(ISimk4mOcDu@&u~eRSQME6)YIFgug>N2fn%uW z>NzbB)ZkXT;9Rc1qm{c72yQ{yhs_@PE~&hV?5Z+*+VUt^h#QlV;D+5HMse}|v@y~f zl82IywvWc!+8I9TEZpYwSo9|6ao(48D122poMlqe!0^mg&zWH1y~~&P-)FvJk-O5> z6eh0QA43?C_f`D-h-1YW7nm107k}2(QRcpQLGG;7FC`&88R+xunRwDEcCW49_@-7P zkD5CB=emUFGVHus_>WCkKbF^#S+J?kAwn9pc5&dgA^?ATqPR$iIW)IEY zkNdzk%?7sS-t_It2wyKm9&=+itpT&m+E)ye- zjy;PRMGiNeUk@_X4BH%kY2pr${jU3Rr5Ph8iQ{ackoH zhU0s7>)mXda)u-ViF$LB){12hYd;LS#C+yfsc!x7~$AsQ$|`Pm9bjPBOv8n&?!jcgI;t%$X7q3O-ikWf{$mevQR zcRKFz*c>kE7Ls}#*8a8-Uena!n22K+tXP_cLMN(~9pwqGDgWHnzI6;&m zBV7MvMUP6cC3};(=Vgztk|UqvEI~=o`!J6gWxdJLV__QdZZ^cwHntJOZ++}2AWg$H zYjqpT*P0}ewbdi<<;NE-$xMlfYo#@`y&MXX9DdTOE`w|(ORUrbr3;(zX#KIIes~n)_Tc7^(#Q_iTdE;)%e8T{=KJij!RfZHTC*J}eUYm!eW*PZ zgoL5+0BmR`?3^mxUwJ2q&wPi{Df@w}=Hq?Qo4bT4VG8_3*S>UqIUmyMn z#RfQIgSHmq>fCA*0B-_1nNs!|rvI*uZ_K${1|$0{eq({`s{-EmAj$BcbfSR?9`g`` zd+oS64Q;_S*jL&y!th5ntJ_^7b6xn`koJOK6zcycl|K)zqT@ z4u*FfU+<=im`7gw_2nCuKfA2`=49}o^%bSNI!3Y)fp6MO7~v+F%Ma@KMNM?bd1H+e z2iW=bWpd=+rK&|}MK0>i?94{nzagnSJsJkukv;UucG+x6SN zOXqp#atS?WhFVU~es8-(CQNNiE7l|KLDrtbem%YElP3B*KXW=N&9^+Id874rGp9%f ziwWVyo1||m&Qu4c?mt-Kc&!(7=bZK_Zph+G%Y= z1hU)rs#nhpyPQ}d%AFN+n4S?kaQ(0)iGAm7m6v(c&7$Cx*`RWTpt$w`k}b*eJ2F*K z=284bRyBQ}%0utNx>Ku*0#VI#-KJ9tyX-PWGn?A(ux>wku?+^(joHaXA^gUuKcpjL8~YZr9a z5;lEP(rpIR+fX_6<3A22QR2thQn(Gzj5_RHyy4_ZkB@bJL%WZ1T|R&u$6$zv+33 zcQ#2HWZTM0UM?fxi{_n*r*w4k9%6f48je7#BW@&9=Q23oR)Z^sUY40?^=8Y{9Y7_| z3K6WviMMuyKTZ+*9vcASmKet;=_s-*v?~RHJqkGYUTfF&n(nS&}d8lQvrmaw6C0RIn3SJ*vqs=!MZd(peIw36Wxo%DBJ59OFwiOlg5mpJmccCrW+Ti6xh zwOX(5vt*AY>Lrb<07dQtUgYLWg>cudpmH6RW_|%h0$nn%D^a})B7%=|EY5>ER>7C5 zCW`s#P4sgLp+O*hzXY5Xgd~}fw`6vKY*_#A*|0V$@yWw;JU8F?^Ku{)EeB+xuir-+ zpeBGs`7=FWhOtl_qn(wpUVP3GD;c2iNYV3~47EFt!OgJEuBWKa;aZ4$aq|~!^1EAF z329I>L(sv4i~Rf-0HOpgWrv;KR)x;Iga(jxlv%fwyIu8kDVP7^bBAnx6~b$lb~D#> z+0xpo>3x{Wrz2IzpB$dllB1IHQN*+r_=AV1v(;uyrKN>Ir!kDty7eDt;pM7Uzh#zh&GlqdW2*7 zI}Q|ae|_O!Co>U{G;%m*?o;^neSe**K)ng>`H9-AyZgYe*9+;TjZC=!`C~hH$$nXR z`TKl{EI}ZDJVVlOpK2pHy~{sT1Ix(i9N74u?R))U<5v=(>cK8aofS_Fd*WlB($QKc zc#&qe1=8%ld{^V4pGE1F$NZfI;&S$`&{zNA7qhcwT8{6bSUjcky7acc&gF&p#-Fi_ zs}ZeX+%J~&qhQd)T;MT6g4cN5aYB6}UnN=Tk!!f71-7iERD#t8%Q(KoovYl~bnk*3>5a2VbiakSXe2 zhFP5x2w!^DUnkOFNu%LU8O6}0^IK8RFE;d6)rgZT4oL5k7jIT=F`21k$z|RH(z`D( z@*3NmpBY)}P!Ya_#MS}_J71UCKOF2}KGqGUKpxz1Q7l1iwP5f0TX5e3xs>i@^F6d+ zr2D-r7pxF7t<^{PKVQQd(H-S07fbX%a0TqvOtXK^BGxV?sP%s}pfrYJQqmrvo)kEC zG-Xf7#h~;|(Hp2IMWd^{*R2jM=FSiHi)WOP9X4CY>n*<1YIl;SCQfHYOd2Lmj7;mx zS*YzTI%_LADI6E2-y|aaF7yJq<~%O8y=wu}2e%{sA9FYlf9n}rh|+vqsf#JH+$E;Wa}5v^5H@?9V3W18RMAgy+? zE9mzL93+#n6V(FzJ2=c_wL-5X!sK!38~ttWvrbnj8FO>&tUGt{vMken%E}EFJ?~~c zD%N8E4ejPaZDs)-Tw0(zvIq+1d>|`g8(sHzivoWu4*d7;wO{7v2G_&zW6O;)Pj2Nt z_=_x67iKb6Vw;Zgjidu5Dw)Ha&%Vpq9*JtIEt=*-*mzTd*9FC@W5zAyG)&U&3S)Sj$sBr8-6FyuBfnI?V(sP+Ay zBSkruI~X%1+y=1adJ2aCq1sxuVx9Cs7?p#Dqs;*-OUl&t=EM_YMHE^4(QR5su4b&iH*59_bpM zNYdh}Z*JpjmB}YD1A2Q40G|0fKv5qd`l#!os0SS~_g9rwFrZXGZ1{Zq^x)i>k&kzd z_#ThLZ)mc5{I^F{fp8`vvm(q%F>JLBEF7Lp5;~dsFaU~p%?ALb1sMUL^fjO~eH%PW z1T3L*w4xs54H%MMsay;3x(X<93NHcc>mz3z(x{k?1A6(QZwTn@ImvoGONV8WiX_cA zv$9KgYs>Yuw-{m@Xy_x#j+#0%Qs|mxJ4seT^NcTt!NpU!&lomUbk*CjI$q0A-bd_y z5+v_IKBc%oA3p@p#~*>wdVqd?LxN28T;x+t0OJt9FAqdaq%tTDL6X`_moS!iB#x>x zHsyrr+-Qp_5Ha)Z2y*|7;dCED0OUC8Ip$ zF`C`^>mPaJ{_zj`@yhq4Q!~9IT2Q9ur$N_@6~e3QJ4q1n6!XU21L`7rAY<^wxH0ZP zzzB*!Hv}N@Y3LaVzKhUVl3ki}+{ic-+8A^<18v2e{fzN7N8oAq)arvpC5Nz|0zdE+ zfU5rg%DAqMg%)_;uY@SskUu;N;G@5ENg@glc7fWzbAj3i0@?avmdk-ulwy;Fuq5PxIx!8-ixg zS}3E@cth0sJIGl03M!Hel4C|@Xvuar~$df4@obKtP3QkvsRb2qM zoza6|hBJD<;k9Nc3jX`7En4#fzS*%QYHx?6Vg0YYeKY!pXUtJQgtG;9?2Us?__|V=HAO|sn z4eSUcKd^xf;_?;}KK`N#4gJGhS zQKZi?Q7?aBZ~g;mx`JX?tLDAo`8TD3YVfjv1U>f@qIbw?n91Nn2z92Vj=7<&jVVLC z&wH^a-0w4KJpXBA6Gdsq*eWrEO ziGxZcmTC^W?ULxFZ*YHwEh94_4PpR`$2rMgp%j|wr`1N&EnyJ`tH;&T?F~nR>CoF; zu7JD+va=d2m^aXB%d8+3uG8RLdZDBaBn zuocHyC$z^YJJZtQ0gx5_Q4Fi?)}4c6%w)J9J@X_Ao-Z zAG`!m+&bCl5S>Yk+h}MZaapT#B5boJ7*Tngjoh9Cd7w>b+)j=^+7C1|^pBWE5PAra zL!b0mX{B=^&rq`BdV(6$g_`&Z%m24H-2Yv?ujf+#)ZTr9&SwB?F1VY{=$QnaM8C4I zRH}`Yee8V)@emo^F!1u!jIJB@_`&J0iR_qK&tep<$kli$cW4up8b0=O1x1s_E!%bZ_L&5?G_i~3$Iqu1E7uT7-;()^t`#hx%N^N`( zj0cE`WaTP(wLBB&uWLsAB}7c>oD+;+fL8X(QCM#=&Y{UxH8*yqzB|v}S_poqioLlq5hw}- zd@ojel}&lR`3OhiupLwY)wxKEgk-~KqPqY>wAUa9io&zTMLMpP<~f_Up0kJ^>3cu^ z4NX2P$-Q623qg{b1&6hWAS0m}G$V3QY>|B_==WY(brhh%D_rN>42^$!>I*I|ZiT`P z0OZGdbmHj;Sgw$ZZ18VrrCO^Nr%7=Qw6&4vCN>Y0ftVJH9S9;@nqjVleQd~GX&J&* z-8o@C(LSJXf48^rF0*R#dg>BLF1MfS0KUhSmepD%lGJf*o(S4rgvMz2ZjC_?x$wkM z^X|yZ1cFrs1Z7+zOBl&lL;}1u@^>Qaf3oxcZ}6ua`9^@R*fW7o{ztuTtsWEY-4h_}0n_q>YcS2D^3yr0Ay7Om7`B7kwt>U2hN$eP0>&TOU(+V}TFCh^ zyH*A9G}#>Xl_DfPymyBDE6tOEuo^V*4A47L}^&o(XKa164knX$iL=9n={fR0-m)gn%VB5HvnQ5<*SrLj-GXWPv6 zs-XLriPb^1)xt?^OQK93l}Key!juG(;IV5m4^9j!g|jMuL-RtS=8Inv*>=flYE(M? zh88iU)Ni9=)5s(qxq1@(VNIs=M>Y>_PP;<91l>I)vBh*bMXD|?J0SgFAozb%?c~4k zSg(C|{$W^p!aAPO3JnB@)k@a}SZZF+A2zL`Ase%u6DJ&J#vEOuh+qxGtzjF_b`rVF zbhH?Y6PnY%=sFBLJ>>GFPO!(8#r_B&9e`Emb0Qx!fO%%x^BhTVaz(4yDPEk3c$AB=C zPo)UJgy7T+Yfd#`%5Fhc2@PH}daPwTlgstU!ECeE7Hl>B1rmyq{Zi%Bqa+LaE_h^! z2OFgCL^&Koa@PmTr5yT&yhOBD8raw9(=)f;$wi0_08m3!9W$BS@N3Am(3%JRRI6rv zaSA{V1u~XUzWgN~?2CmADzYSq-)YPJ-g9x%r3CFRW=;w6-!TJyZK4@PN*SVZw#GI^ zMS|?&R*~!R2?eJfR7lSGmFdOO8NhI~*5bU$#KYyIEi_ zlA}mI8Zvx!@1!%m`S(Jl?zYd!4psn`OZuM>+nDXo^Jh{;a-foFm(D6+2bh>V9XL|? z#%$#>J*SmXxxzJXB#F{Ap3+)I;I8e&?WDwo0CN^GXQQCRnCf$I=Czli zUmO)L-aO@24dzp7FR1X8BbIsO{?$0;FS!@C+o48sc9D4ilHKS+uk&|S)PMg~K~e7B z&;$((3*FJsQh(vu-OaW?A>l-OK;E{3M*Z;_B2~St)WC-S@zXfQXf1692=9w2%4?i8*ic!??EZ=G=jWcy8~c;Su@U$iLcd{SX` z_&kRa#_%y1z^jWD`l7YJeWJ{7BE`cazIG(3gnB)gCY_THv0puo8ki8i_&Ygva@Fpe z+LCkPObgEqhRto}-Rb{AD*SE-9!Fa?%%r(zzNLSwR%VahjyCZ#3z=+|0nY`gFtiw; zl25ffTSQ6ja-26YZ`YPXqmXy6{g^hQ6v%h$takBskVU!01-wP*8Je5VjEvFBs!VC7 zzvF(SA%P*&dwaw93jdhWfkja|Z?gU58tO<-9#1YMV0F|W5FiuFasMkZbkC5jvn$=b ze)7H2)3S(d79geO8}?X&$=Xh=?4=@QB7vPsRds1ziQ$&+E%$wzUZ=?A{yRPQTdB%% zO_SY2y~)^XE(o5DjQO7|%a>~iLzc;!jFGhbEe8N)asoO!da^~D3}Lp2k!wY*7Bh3z z+Oj99n6bb)VQO65gy>xswfg**zCmgA8G$#&MF>_Xg9~1an@$3ROK*gJNv{5yX#Ma1 z|H;I=V#M15=cRnL4r>Q@Ml&j{=XcSA%X?mec?#8+V6)Tt4wa3N!K0&Q$&+}_{hJs< zBy}VNG{opurNlq5J1WN|Z@H{@d^1hx=IVL`@N`HC8D<+Q?>llj#e{X1b+_s(n>fvo z#BR0sqd?SqW2D~-V?th&Td%vng&0&Qewnh3{jO)q#8N{Y_Za?y7Rn zF@|}U{u}D2x`PiRhr^&;A^Pc~>K*X6S7#paH-sR+68*JX}dW?Pi|RlV2gJ1852V6%7K%5sk9O1+@7)Ck)QNRso`JDFm& z-H;jq3;y;kEd+t1^2Y3J9cO%oL!iN49&J%6C6v^qTeOT+v@Zb7^`^bEp*C)eqcwJs zYV%3bt@4AmG)9hWI+Qtek9=^lLyoJ3(H$v5 ziM*)p!U2Ztxnu8kKW!mwl4|4=LnB@KHd~lx%3qvYoIPaVXOjYP3lVyG2$by z{~Vp52c^G-LROgm+!WE%4L%!&F^X7A*y@W<;WUdL)g;DOv}C=eF4ZY!dXGv3qPcwI z%vE?nmeTR?-kXl9SCpi?>!rD?@{_7#Q;kn5OFZYE3*QJs_Yd^KtbVWQU?5_}T;%nz zR5F@LKzNP6Hq2^o?)h(Mn6(r=bDFT+9xb-S$N+P{$q2VNj)BEvQss+xI=Of!k!bTs zL4e05^78~;YsVF9Yul9J@qR9oc8TAM zY4%Tq0^b3nDNfevS{PlkQ)Zp#eH^2azy8t4xsqNI;S$whVD`S-PKwC3CkI=|Fkw~c zzT>cyURY6CGvb|siIeW_k=7-F$HU3>^x@lPNAullW1-YR2!nFys}b3=(+Keza-;BcinDL`H{8dGB`!poZK4WR%b1LNj54MkP3Bz-QI6Z#RObZ24%|CCQvKoC z^D zgR*jz!=4>OnWnuKRrco6kj$|HPLkWB$jvoFCjxtQScl@VS?ztzt)u>W5YOHcsX<}D zz8IeMYRFrNF+>qLB^8^hXU)V`Ks+=~QHGIEJ;-@8Z})Y~-QMt<9}K<(<&(EQ%{^%{ zQ8DDZ;^ z8r8eaj*T^V!Q*N}ag97-hoKc!yJ5xozO$5^4}U|OL<7}0Fin+_-*hRRoxY9mYvE~h zmn;rn(hqStX5LG{w^E({ad^o8DL0#sVD|OSl$SBi4->OPv9Fm^LI(LPm_3+8Yd2Ofq?jvx@J-`*eo|@i7U=l`!EXkViWZK+a{>Rh~EVC``Qva{< zix%!QKialc;BM5f%N{x@Kus!4X59C1U5-(jht~#dL3pg<8K8pEOfyN_vzCcdXzWc> z$SR3#s2^i>C|+k~p2Snq-POu$%YAqw6n6qc|-4o%pPV3 z44%G$mmEv?eSaEC8}RW4dQkUk=yr%Yr~%hc={N!c^z}LVHqG@J5^n zb+2XJciWBL!5)Qg?gdHC5Up9dai&;H%san~K9K`7LVQvVbL*xcyKh92>b(S`zwRe# zCAwSMx#0Y%$d#xUv-i)&4*Y*|+R$YFk&vx!VQO61@}a%SU-s0+?CHylg8N;^|*o(LCtF6*$i+WGQ3fSKcTCTkR zGu7afH4$54Pp`5n6a!zM?#ID63LK_fy%QD zz!2#4O&#sJ==6ONX9#rqUTaxYk#Fcu3R8c*uGa^R+CK|Og*^956*bERYKv5h#}R1> zzFXYaFQL8vi;KwoR~~EHsL?QdEHGK;qU%y7>`9!}jG4i(5KJq~Lob+X zhX{{w0o^-iK=U$0XPo83k;awF>h4Xv;!mq<%Z%`um{ zv7qEBgA-6eebM4J@y=3Xi2AiPNc@+8B76UdBrWhOvR+UC(FJD)J3I&FYBIuCccN)9(Xtczbp@% z5zjf2{2=H;mbu)g39-p!cF2sqYIpo$h&h)^TvsJ%-00=rJ2OmTS~hu8nB2E#h0e}63?C%0^Oh1_hGwL z6<FT3J#&C|W;c>@OEXuUxE3@fhKp?vN?1!K<Hnp7d%YNOBa-56A%4Y1nmnSyk`RayMVznK|Wfj9Tsgh?Rly)|3G}ww8$7dOd zr^YAY8_RcR)ev+fBYdTtrFKOYQ{w4D!^KQC!!%?lO>W(%{Ae9G)O*Doe_gbf>;-mZ z_H)8Rp1$azDJI+7B1;1l(yo}-A20X)%UZtjybt6+pTEd1+*8u1UqjTDy?25#k6OzF z|ApyDE4OU#Cyk~n!s)5O95jR+&L+xiUm$+P-Q7UKu<+YL)&Ij@?YlY7`d&bU0Q(@# zmvcm9_oy)JrQGw8Q#{q-;ffZ|D%II>Aklpcaf`R+vxc-8Ux$Tbkz2HXDx>3x++4q` zh&`D2VC`9P4xB9N?V=jF-sXheLoUJ)Gk}OK*)X+YA-x_lUYpp;$qIq3LHI#t6KHuIiI+1&J%!IKNpi6XkqN2D~-&YhB1p=5EP61|y zc^*Kv}M{FEjP8|(LUnevp1YA~4Z&>;l6_nSf zwV~6*CgIQp^!R0G-B)xKmL4>fm-@s+#faZl!#VA##TfNzvBcV6k=ff4;kGVPFn|=B zQViQ%`x)^>hGASXe^i~zys|V_z|0|sQYL%UpoXnie<(t;Z}Z?;vb+2v!q>DvUv9Ay z&C{gw^XL;i)4(pw*Yb%3BPA9iI}}Ru2dnaLB9F%3P0c?hF8u1OPc*atDKehC3eb)z zLO&5B?AjR(+no&P#zKBXX5M)lKF|chWCz_fENrpNm>5@i^~8U1^T@lSbv`3akuSEK zs9VG&YFq|kK&4eIT;^ci*7A*Q$u*G|Jk|1IUAn%!JaO8Pd2X9PRNK(;Jo{Fu@uD56 zu7K*^7e^D^P-k1>BiQV*Z?22lY%2$SHjjal-S>Xez7ywHSsCuwm5&99>8_2r76#3d zcBr~BBMx=Sd?JN(^61?wO(;3}EgS?wd)!F^3LNk|Ug>M6tBUiekB6G}fjD})TFCH* zLm=4vGGbRZRb{C^NC78+@UcuT_6VDAWU#AI2LD{`327DKg_I2FRGiUvcVWK{9VJ4}{|GPLf^?*P0RdZ~z@s!aOvp-)V&VpU5bbPCGMrtQu7Tqbta#p2Wo`dsvX z^kgzsCyuYkOJbx5GWpAX^qA0PX_k?5vgxLit02{XUwfY&iU{lA2v0ZHNq2mI=VY^_ zuDbapoBpGtZkD=bbdHz24uJBs0p{LU=BU|1`uR%I==&$vPLv+TGp70q;1~!H4d@5X zE0&k!d5aKtjBnya1hpu>&2Z$mVu|)06NH8^0L@j4uL(_?>u{L2dxgY9FFm@ zwid1F*J_%g3#{+oKZD?z!P_@DTU`nlZ|#kMXph#|HbD*B1e%D_hOb7-5S!ktO}3=* z#JwXQRYeR^yO$qX70`>$^qTcGp@g@3Vhyd8dqNHzEd<8FpNs#bI!e>h4g5 zdv`a;6$IOqm(q?W2pGpj1-^KiO09^;G>)w&5VPHpy}f~8&wP~wkl ztXxp8aD%RH=Q++-R`Tu2DEX02ILA6O?wGKpllsYiauHrLYk&Ll?L^AEu})uKWN-;E zmE=h&9%A##7E5v7C<`r;q(+Rp5xbO_VTO-(vnbR&ZkYw>F*;;+pE@cjmu6m9_u(rv zWops0n5@~FayTiUD+>}R)k{G12*oLBIE$9{ri5d^`KUpZclezEqQW;6m2x!jjTM-kfP|s(sIwEuwVnDf837jCh4Trv)8uf&z0>@a&H; zw;`O_*&Z5-cRk;a`~jL2XR`Th-rGw{FxRA7zbZv?kL2Z}q9>ENVI!trPxRaI7V`vT zHwX2{z+O{y)w||ij|Ov!R^wI2^u7dP<>GVXG2c%bQ~VHzHFi+(X@)I-$H4+YLq z8{Q23nS-;L_jTm3Zst^8;I0v8+l(Y5UW{k59_6aFnQZAR7dV)U%t90*Wx#&O|K!S- z)Zn-huz0(OQRQD8Df-`83D93TOQ|1US;)f#_MEBzgT42FYO39{hhs%SL=mKeN|h?T z69oYQsRE%R0@4hT-ifGKr~(2~BfSQsgbtD3dk?+W&}$&^JKj5Y=DyxK_nS2{cm8*+ zcP-yUb8>Q?=lst8mA!XCOrG742~+XA*}d}JB^7JtAj`DKE=q+*G*bJw9JKAdjiFCt zb5?0xQ*2{jTz0r?9i_Tmaw>5AVw{YpDE?WNw_IpFAcGNwm{r)x&4=lpFDLhSjxi2t zo2m4fsM_9jYIL~cW8W^no%lv3x8fiamZZu}8w0A~S2#=M@Tr+bT4|+UEK7MAnh*jb^Dj< z^F#}J25(CeIgZ4?9+|5qN(SpLV(M;FSnq6DXM&z2-n>Bd^iSDJEt8@s*J{h7J_sdvg)W_K~1 ztV+jgcIxVbEMuE~*`ngE2>Z}>46CI!HFT61)T(i{9~&QfY0{v58{VA5(D1mAgu%~< zVa)7y!P=YJy>xHt0cgbcC3%A;qyhHmjcUzpP}1?V`)( zF@yKdC|e4~DG6UY`lMq1Gw(_L{^z{bH-Ej6b2&Gl>gIw*!?{&p8`8!Vr=_l_9!2VQ z6|Q*BpicrGtM_L!$B!y7wZf1AA*{ln&*zhxdqC`h z2iy?6pl>pnd)bj?r?a?{A+p}}q|#$0*aY{B;PgvlL^RuAy zymPlh1Uw`9(;c)Td&VRwk781OfEx1jb5SZ@3@Yu{OuL~aq#FX)7^M7IBv z*0r#pdEM4(fF1Jgwa$sQ&<8crYF#~ z24Yqsw}9YKjQW18SgMJzCNQ~YA2!y|ztO7bPxrU7Nno2`iY``e%e*&MNO30oD?`SZ zIOFvyRP-#M8mKGzAtv>vRGk~%r_VWKxHkB|5>$JqV2ixpgHBR@x^b(SkkT;DZ1 z9e1c@uo3>+Kd=|@%W66g{x&JQy?raHS>MZz!9Tcheyl3`NHa*hcs>7k^SY@M`S{s=f$)evSwbh0$@W_>{RxASGmTnRMjcr)w{V3)?3HvBRXOS|ykgdFobiGS5y&h23ikNwh zoNDFyPr2^hk98Xn?o0rR=szNlv3fer~a#;Y{`YsSc)+Zte_ zVqGO;*s)i%Fgx#nGV2)lI3Jw`Uj|c#we0MET}p`xRbX%BWIN1UcFLt zA}mUJYPllXh3vY4fztea+KuJ-OG!%zC%qDdwFX>Bk`3H(i|6e}`k{g=jxB{}5&?a1 zGSj*WW3X~jKl2#$l9nQ`$8%il{2ct+vM=8@Ya`k}Z=X(8cJ}huM3PG9^E_47-*d{C zBc*e|*#pQtHUl{4*y15PI?Q*HqWCLnU^66s(js7f)kol?LZ z^!|1&(SrGa4(v!?>H$#C9{K~9wqg~aGqM8I$xEIH0+9iH8?zoP)~L^O6(TO`+5+$; zDYl95WCs66s|5e{KVksbn9a835HlwM@ULGJ`s*8i`n`Z2@1g~K zE5Q8?;XU#XbXcZeUb_so0mHm10FUK7g_( z_mi?G0Z{fr`meh+_|p4$s!F&EO9o4<);*7+`5H{W5ccUc{q;k=gIsA9g(2rkg)&5Q zEFw9xu(+M+!AbTyEdOgimT`8bB$;4=BlDcKtQ@v?(c7ny;_=BG!%m!Sbb!&a&#(k2 zZT6LI3&)11DVe~wjHgz{SCfzTN@Q~Y>_2i-fc^LW=Pz~v zej!8poy`xYVz|9n=Pg?|T9?7Mo!o*CXvN!^Z=V-q-YCH8PyHP`C|2EJBKf~;&sr^4 zfIqm{krg9oS2evCT=5}2dp|%WI#Up$B*0`Y1TjByWOAtW-b2P)RChbi=iWv}Xquxo z^qEcVivctV!!<~fxOBg1me;SV3-F5n_HtJoL(CEX&t3C>I8G>RWqfc=sF4`>*i2LS zGo0uQ|K#lc5&-&s?&Fx7t`hr;DZ&ZeLBe!gX<%;IzQM@%CoGMz1I1g=)wK%ytjwdt z6HP+5TR634>1*Nf?P81Y4;d+p`$f6SYgrA24<~izjT$+o-z@v&T$VD-RTL{QB{epk zR6tkn*qen?AA}9>wd8RMM$%*pqtJCH$6#>BS`MWy;}D|Lb=Sl+Y^3+6V!&_1mRr2# z)b%5W{S~2=>46QQwVcbbde2`AIYlYRUV5oGVU8(6|IMgvBa}E-`S8Ci`2NFz0sh-S zp)>qvf^lzKC6OQ?6!G>+1Jj=LzpDV0ZW)gb?O_$Qw|O?wxfvA$E+fA_WSz6_a=pd= zUidE_h;^{oy13md0isssCGwgra!_2C__CBQle(>a!fg79bkS-QmROy(m9j2tWL4oo zvpv9Y%Pw(=Q{vm>k?mrrd2!J9FfQccqSi&YMBt-u{6}_b->Uso8f_>Xn?{~e-#YK< zLfOd8#JpHv0~uRKN2^DMqmdY}xwPuXcmr4}(iX&jYLAnKTmVsxPr>D@RWRFgvGAcg%mLaB|aUonrby)P_d?z2fOnvDt(zAJFIRDhD zooNA38#?w`tm%tnn&GvYAOxOvOo+-A_RCnOOo@6}EQB8UQl`mM4y%2>s~APxvY4Qx z9sPSx@}xhDg*7IO<*Z&D-Y9$2V-cYiYmJ@pNPijjwVfZRE4{m0rRmOVPnps;1Dkm0 zzN-#AVFB&*_Kyt`t{JK(FiRX7m>mHvC&{br#uWqor1`35{V~;9uEqh_r?k&jc*b+5 zCQGM;$bKylrEDhX%un)nt_*`_U}aU}W+kD-_%c#|z)PPe`YBNO``YlA+MHa7E!gQR zG&TpWy!=Q1Kbu21ROvH4u^CjyV={8#Prd?2`735lEknglp5z<7RXIl=@$PB-P;VPyTVeH3atn5$~_C|sXPv%fnUPQ}c$)8k2t zN?F}rEoqo*FAH6WVjfgKf=Cnhzs38*0#gW<1iS3xINdZF#WAh1Y@oE>UXTK4f7!JC z2bJnSs8y5~1{12+*|@&wYD<4{#(8S*X?@m?OwD^27g!-)(!0-aJ1ADVHk>hi$UN)_ z(4f&VDA;C2HK4636^bh8sTZbitizj1veORYY_jAFS1I8N%k6u@_51q0!=zk%fRcSh zetXL*u7PvK*t8P{Fq)2|mhvVz}qj&?1aJyr3(hMu4v0l(a5q|N3nbD zwcVviV<`)x>I9v3-tPRAQG)NoZcy>KX9x5_2(aP&PG{7 zs4+^$npEL{qUuxi*}L`&A{d?`qZTp2dv$!rKS1KST+eKJk0$diD}6s;A} zFF|Q6oKep}=U5U4J5;$RIjTg4)_Yf8mTp?odt~&$0VRvYhG){@%A7l1yg(L3r3+IC zErym_Bd7LM_%HrDUak z0WPXa-xRIHQ-v>6t~p`&bpuA>#;JN zim?i$YQ6Ti?8&ZVWFzGCX4cx{Muv`tXDjmuorRFfA_v7aR7W!1dpv*r7PMNG5RS@3 zqXWrZBgM>)0Iv_(fPl|4T2e(2d0EG|<;m+^b_8}$s=PD{@aF7KwmBC>ZRiC`9^X(F zQKeo_R*#-Bpp491zG?8rqw+BobC{43{RwWE=^IkcfGidaWZQ}aH;HN_=TxJv&+zK+ zJjnA))@Yfiw&~V@N&?T^NgGfR0W_-( z0-Uzf+ zHOyE``F?u}qi^w;DVL6I`lrAA;=Uvz@peCG=LAV zDE6C|vjZ%Ohk1iMa6K0nC>52800ZnD7)l8+z`n86NdXvO-wQ|yP177Kh5`oIT?O0n zfB|-o)CVXOFu-2u4Fwy3#*;HwMBD&lb=89`z*v3O`#2deR!_gK?XZ_EQ4;*qSba!_ zrj1NUh0OxSW-a$wP`HuQq&44-j(gmOn$OxME~ERD@iO4i`>G!7bFz>y||IJVFj32*NJQV!JxI&kFzx0$#fASnjs28e-bNsmnT1!UE$uv7U| zg=gwRDTwPIj;L6?(1G(6-h|m>>)s=TP$f?d0iZ%Sdy)>N-04P+$MCm}DZC8a2l_$2 zF4gC+8$P`AS~!HiSNo}s@qd01>^#M6om~P3Hztv%9k#;>#;m25rQ`qD70!Eet^unwtCgWc3EkK^&B5mb3?!09suT{N%6=C-xVr68hFP2TOG=60 z;&`zqgI;9q0lQ9Dny6(Ab4uKvZfP|->0_D=J)A^8o@^qLlfkxZNRWXx%StOA&#lA`{Y8u>45fpht9Em% zG(7-}#`in)leqK~Yz@mWU(Hb%E;5wqzyw<<9jd#G4;0swRu>s&T3oy%pfm@|l!>&U z4+@D~5Qy2Ek$ycn-ra0YE^W*-X1w|HQdj zy2laZRm}-i&jFSoDadx1D$AWgGx#I_zv-Y})aF@>S@;#*tP10>k}v*0~3 zKfC?Lr9VLQDTJFh0VbyG^Pj^Fu2ZLuIlYRw?jV95zWcQPL^j_N@7>PyH@}Jl5RcnN zv@-$HVPD|ZI_v2C81r8b9D1Z@5RJbaiy&MWgKxLx%QLOX2p*dsf_KTV#MROJyWGG2 z1kE2H5)yz~nS@ zZ(mGL_#dvO*kAmaPgZKcQM-Uuc^vxcl%DWzg}hKv-CAUD>aDkqo_7I#O!&{CA2J*c zSVq94y@;=STP*%DM;Cw5I^sCCr)eNdQ$8(gRa;Zu$73b_Fu+?&N{IWq^IG8XX1aB9 z0Fk`7Hm?%z{pF16XTX5UZ?~hCx9ktlr{TBoH5?qz7WFkhw#zB@h;}tWDApD?Rk>>% zaz&TZIX#7<^vfS&n2ZhS9!}X#E4cHdDKv!PD7U~G57|MinJN1m2sak`ChWexV z3eem`sxfr8D4oUKqjD# z_-){f3`+v|(P%(vb2!%&2g=#s>YsgKWYHJoyaR-2GA2z){l9BVBK{DZ0{;mDv_;|3 z0m0gYeM>i8b0CKdHRec$FV2=w4B#u4bn}zCn`spNbWx|QRI|t74V(MCk5dR|C*|j| z_cu7hvS;XW#uSGaZRm$8(pOS*u}6Q;vxv*|50@tfpz-}5d=@28`B!O4Df9IfuLWLS zf3BAn;HDsMarF|%qviT)XF$^r=pKC<4n(iv$nb0dm2D@Fc_0RFIaNGZ2z#Ez^9vW) z2QN5yMu#pG4eyh(_1BjUW1lUDcT#L6FQn*$&Wtk#aTVwDfOo)0XhIDVy%)u~(COe* zXO>`VBZ4eXypxlr+Qx?dn}PHA-`640?Y}oR$irtxQDO(re&<r017y*I*Cxwk47%xx~+rcVs8_19nm_M9A{ zCJ(+BWw749d#c8MgIF-=#R)s%c`-kVQ(M!<)-#eq)6~S%N_rMS5lTvO-dJLm;I$IV zk?hy2rb800@=49F}5 z%7BEsnT zofrOSPg;RUF7i`5ta;&XUP!=h&Z1L3*oK4Gehugog-cp*U*!EP&TQve;AqNVD^e!} zR7RieU`x*SJ#ZraIn4?b4h}P^w7BJ(UhvR=qGs^LOHiwnl5q*bJ@#FkofmCkTEkt= z9G5e%#J~w2B>=d-LjY1PlR_ZbyI47O1;7MZDF7x20^>mWUt66x%a%I_l?=vA_g`S^ zAdwd16_k5WgT<2u6Xt?AtB8#8dI~xSk=+CO_cCZf+KFrt165Q=N|vb{x@&+{$!dI* zE=oAaynjIxk~1bz_rF;@>hC@`F0O{D9Zox@{W(#>E9-N*Rlr;^kgLoBV-h$Z`|Z=q zsov^74TuHf-Rl8_M?4)0n1=KHG!2L0Sp%xnZvOzijLtsodJNmrvok=|d`vZ(8ke%Y z6C6W1&iAEOiCAzT-Z__n1QNc})vaH?&6+W)nq7^}x%9ejL*QQh1^$-$8}YjNKPl2` z!{Bu$Fd@*kX>AKemJVnl6xES#(q6T z7O$i$aJtxZWcpMyOO&U7TAe(hASuR(ec(-_lP@E1$gnv+zy~%!IKE$X225d#F)kas zS%|jjFFV#<_@H3g@$TSkz!&Sm%sB?u_KQ)iCk5ywconOvvaaE%ukwOzBOr zZU=u1QM%ZQn*a3s*_$6N-j>A1O8UHUV4*n&dc>Z!(`+iYY1~)aB+RZ_>TW`CH3lP5 zJ0i~E_R=A3lRHBjQ{5%y@2oxW^Y%3EYb~>12_t=n-w_yHuMAc*M$bjrtCvhX=C3z< zE+Vm2=#Le-*S?BCB%Z0dFbdST?qok&3IB?>3pP}-I0BUho`1GtB+)?uyTV)Yn#?3# zx401JWV_?L``YvX=Y=y|)V6LMGsYezR`X00lczp0lBW;3tm6hMIl~Ux|9+l8F($bt zEqg;aa)65eTyd2E5g_mkxjk!W=oR`Qk?h6;x{ntox7x0*KfDBJ?mmCCYlF#Xa7fSp z+-qpqO54Pb3gXs(#c(npcaElCRYf;q=ja$b{ZUhu#{~t8|dzyUd`(}093Xmx>f;|?cl8z2B(!U zBXp?ueT?ZlzQw+MJ+lF5i7!^4Var&1LG95@(L;=pFFj#$04Yq@jx?BUT(bDA_*zqK z2=n19{q|ZmU{|``%jKa{UQ-$B z(tDf&ZiV9EKm{tbZ-5fqz;{gQu1G)wRk=PJTjC=%_8%bc&?CrVH`6@=Cor%g z7ziVWL*3tK_vH$Yd65;daM=k^r`~9rj;ObK$;44NJ)h_aQH5rWN@*nxAlae@wT%TH zrL>E!*0bBc^cD1g(4FQ+KYEb1J6oKtN$LwM`4w-fvllA@Z$k;Bm>u!211 zL*@@OIT)n%vLB9}wo2LMsY9o|66|CL=J1mf;q`^u-YwU5TjtoeDcJ+}dE_2PJP7Yv zZ-_QXy;-DKpf>+ZX+9|A%iK^IKv`m2oifE&=2bSMSF8AcfXvoR@v#-=@BJmhGAytX zftK0IBIUNkJc^z9Ei0CT#baIrg>f3F5bSmdQH)$^uTpE4{C-lki0tY)S1OM;9dva# z>{_Vj!l|1b52`x)BXaisBrV zMH0IMBCq>DK+fY-wLqct1vQhYXO};d9a|+EVb+ZCxl2@ORA~VXl-H~*h#_tE<(y0m zC51#f!s#V+8jXr_h;NGE5@m2F5^i=b^F6d z=&S?&y=$izGb;c3F`NLr(Upy{D=$qqmTS`pf}VE1=`>4IjF}>O~8+Clum;! za#28F^JC1(P;p`?c00(I%47*)jZu*Z`}Ez4#XKIMpwlsk>*T|r*vOidM4kJ-F~Uob zlBk0kiX*tw`RmP+rh4b@WL}X|jA}jnjx;0_)!r`s$h|O_m%G8t&nZVPTRT0%A1WXs z*2*#$8aEmL?VeJrioDlpYP8DXx~M$N2RuE3?Rzx;K{*6TM_OCk!aOXj#bf#y(Xrz~ zqV!nV2qiEG_ z%KQXNhwIP!p7G524mu_U?1QEmedLW*A!^}!qpbKG<1337VpM`$j-6tstN0z9pj*;Q zMclm1CFLBD?1ash!*j8fu%s1C={x5HSs!0~F7~0@wHMd;qNbmMbZ?DzQ+KmZR&YJd zQb^q|){cijGTLz)Mp^Po2h|~68wW#e_l6-a$1cto=%1ZGc@DtH3%1d*@t#v-yoCM( zsIHy01t#UtmWs*w%;wR6gz+%Om(8wc95HEt8B0qpwH~qm6nb>s2!BCw>pUH4;k?f(AA7=yxMr>Ojyn@v+*Z%Yuko>H1A0+^y7URvbeLsaoNqNk&b_YjqK@}`R&-j9sq)w z_QThP`0yZz^!B+8;JfP&&?myartjaf2QI``RLb~XmMD3ihwx%Zx#2O(W!M5^J=;(o zOnopyJS{%){d@ONu{=J0*io<1>r9n4cTaAB#e;qald@LI4tIc%ar871G>mlgk=dsq znqxadLwl7B{{xhRtFo2DV4bJbVqKZ;gfH&E{Y2kx?(^sln*^*L$J!Zd+37vMp2_)v z;wq?i^SF0<>Kbaw?*%H@kwD==t1jRX^UQ?hDbZ6gkhaO0lS!aq@(n*sXxAYy>dR%G zaupSp8~1cYG|i2)%$7FP#p>|peoiVh6l*&;35X3&%g{L%%!L}NsZiEHWe1^jXRHJ{ zf|kaYen>QQBX7NmG`riPqbT?3MYZg-SI@xH zECYyVJ;Dr3isfg)1(;TfAyqocdS?y(;XM8|H45hSNPUw|REBc|9)3^5sH$VAVj!1W zUw7vKGybjRhH_TeQEa$frJWbNSlxj_!(7I^Os9EZ+o<#{%ZW+OY;m>5p}Cgzh*XeWAY8cyh9jpBx8S3KXyaKPD%I%{cc|WH=>B=ZfDERpKD=Wpq!}v zYB1^^OKJc$!7XXT-YnILXgm9mN%S_e(cHiZnR50Kb-8@)`GX;{gBgW_BJ&Bi2ue<=}N-vtqWkJI?W^JOM z=!!ubLj=doT>Vx;Bs4Rcp=6CDV!0!6+FO`ZmToah1T$% z$7G}OA_%#Hs(}~I3%b?Sr6sy2=^i>~0LPMcZrG(53xyAP)6?_U*AU9HFQg`(G)=M; zN9$G;l?63qT~Q(-dh@hNs%2!3B_c!y=9!XbFnZ)v=SnM|Fu%Kk7`tvbDb;axg;# zr1Cq&>ErjY#y$>-w*!Qa!lp+lDa5G)^sYX-euXT9eK)VGu#tm9%R#paez0=KmK9jl z)m8$K8MW9oPVM7~2odte3oqeh@Ok0cO&sA3P%}!-+d(S;y@T-;_&ad8bQ@D$Xfp9m z|F3;ot>2bSr)QZCFMH;9G}E2lq#Z)Sl5MC&56Nghq&$~*BU!wsB^$KQ&cH<^J2+oe zR5WDotz>iX#5Ui{w(i!q%AlZowT~U|H7rr)0Qw*|P~9g)-GJLSrl8`K?gyxhjL?%G zR9|=Xbw7m5R28`x%6r+-vyy>p732JDq$bHWc?~vt_&B9&;?UJ;*Q!Kfh}Kr(wYLhC zb)$WxB+5SOPJjqHmMW7*ifpJHtNHmzbY^*6@BlA~PPGy)gwGe`<6nUfs_p27?l!6W zu%aWr4GL4>nWoi~Y~A|Y(G3_M>{0^sDNm4O804ul_vkG$`1>UURfq%DcI7irAR$2f1u=#)RZ9=&_J`c-l| zt+1_Fvyx+NTA}Yc?09PP@)NDdr~}jR@(IkunCbl@jFG$pi52<7nE?WGKy1#)Xk*5R z;plgMitD*{iDl>LPX+^aS=&2~UBxHzleCt!`_~t)8=D$FU^c(Z-IlsE5*s_aI@!l3 zvgn=^DmKOJPlJn{t@^8X!;>WU+eiM%`nP$~(?h17jw@Zf|DF8us&~O~&&ja!H-})Q ze&{3>Iw4wp4eA}7z!Dc~URYX%sw~~b=9$GZ!aeBTnMmx2Wr9nDs<+q=A`@3y^%N7! zKfQTPmFRKaVX{2O3&uBVpL3prG_vbUgt1~!enM4txP(O}*kDe#Z&U`vGh0ht0l8Xq zfv%ne`Ss0k1|^caCLU9TsG5$(*CZ06jw5OLU5CAV+4EK0py#tFhOa9!zt%DS&htv? zyY$O5V1JP=T(`#CPD_^y9rv+(SNw(nXxic)H8eRl%l@XJM`8AXTb00@%IjwZ*WT!d zOJQ8Iw&`th7KC(%b`%PNpynhupAz}*U;EzV+^;p5dRil+eso9siHb24-qh?Ho0q`4 zQmAsGTS3!7A`nfzwzDM6+PQ%I{7j|t%Tw8xv=HLUd#!oAQ*!+%*p(HDw>bQXKIX&5 zt&v-%Jm0^Aavu~W@+YcS>-Xn}n+6zkqSnp`>3$2m06$(-uo&CW3&KlhTfTi?-8QzM zijGhzO_+l5OQ;w+iwK)i*m&xsC3HksIaP-Vtx|u}r{ ziv^`IR{ZVMaxvdPOcI4l=lz;|ZP8i%vF};;7d;P@Pi(=KHzNTXx^p?j)qv+nS5XDv zIWp({`+S~nZt9vy^i1G!akVbB3mp_usjKKsr6c|%TT`HMnDTtN}4*a%)g1htzele!Z0#)A8=^`JFPBt6hPJNi6qznuN@*w^s>k6RJ6#grTF2rUQC&w~1ZBmbnkZ8oN1wY#r0*T~eg_ad ziP1+Y#fWmb-&UEkjX$Wu&R1p4Zl6D1H_KTGfA9k&pia9MG3L&=WYB{W18_pA#kciH zCV}%(J7TS;@=CLK6^(wD9*d$%e%7I^3%R}PPs*voJ(!u(#rE@9;H?#d5l`UN^Sm!JGis?eqmpXLqqgC}c?PcoHFxcHB=lXNB9r^v=>t6GmEKUlMGfqp zmZPnvk2Os7Q^l(Z|H-Gj7q@CEJ?AFN;S88|D9mzJMTo~GrG4?6N&cBx`P1@KQrl%M z#jI{OWYgGeITvM-XG6@=w&T)Dz^?HnuM7IpB7nJVe(6O1)JScuqZ1q=k}bY*3-z|s zLFi>ewTS|8%C<18Lq+5&Un37^-|=e<-fyx8JRy2v3qEalk!X0bC`l31n%9ah`=S_= zV2NBnt}7U-e6eRHvT=_W>rR)KznYaXK2GP(hqa8i;!kbsY&JR5>1ZN^dZV(n`7vK8 zxHZdI{>H=$$Cs5QQO)wlZupm~e8F>Xk7&Dgm38#7^CdoFP2v|~v@hVYR*Z$VK4s25 z$Ff__ub1vro4YR5CbY{3DAh)IDG~?$FRnWv6L8y(unrRZ(VdIUmVZMCD-o zfVF_;2KTEZ#>xr^{O?(y(v9$zfa+wT@2^0>Iv-rb&tGmahG&DX+x?>v6_ z>Q)oc3mxt&^iLMu4owN?u}5U1Ts%N@`>L3|(l_1=Wx(fjb6#ft_*|HF6=~pMr>%9)eM42f2G9DO>o5~ zN1k-I?K}2uLRXB&#qnvTEXKjnaXlNs)mW3%6Pf-?7DevkuWF+|fo9%a^dT~6a<`V1 z>MTjXX4n(ZuZOi5gd*_@)-tU9-tu-!`bH1+ioXQ9Poa!5pH<%%BSL6Gp6K+aR*NXX zZHMK={hn*hG58uSmhoLNG8*$H^H^^u`GCz+rd-YN+L8Xgm^ZfU>5805VAnvHqU|I# z85-!$lPr``gD5|UI5i2j9_9s%aB5~rLn+xQ2Um6omC>A>90FM{yZXqN-T$Jjkl_QR->k`-$t zMuaYvnQ$HgexP|O9el@>yqVTNSa>W_NAneE>=h@&rT!$Leb+#AUWIS+yylzDj9F*T zzc#K>#^Xeg$Ey&k0YuS5>Q+PZa~uOGx#d&C_s*Eg)uHRNR?$yNd5_5lfw{438eUWCoJK$aMl@>JkTZH=SB+Q?V9^Uu>A7@=f3q}AKRy9-4aMe|7 zbRws^DqE-5!eabV?fEPsigszzWgtod2K3+}Ge=Kjtt9S&`5@@{U#^lSOY(KJxGVPM#aU$=MkDiRr%?cc*ItZ9|L({ytrieL1g>%iO-pegu1t!w#q zb5_-02nW5h(?kmP2*viE4_h!R6FA1Q(G%1QTsVupvNo?x_L!9rI;UlIa z2#bglQP<5Pe7cd9WW`38V3)L=V*s;Bu8Wq>;c5x|)|2qj7$+*Hx3(5-h||_Vp^Z< zx)8_HsOPu>7DU64&6eo9QA@dWzDVXxYwzKmLo4yimDVdGUJyBX;SWd}@w=7P`%g1i zHolC^JxbA=)5uH#U45kk^#1^m^A^0ex&&l%mO1;sj5=2S8#oet!MC0KyAgvl#_ihE z-;OM15pKgiL@9-v)azAEYTRVCZ#uuhKRFw6I%0zY;qf=eUS_`TVe~Q`Ftaud z6uyr-MKyYItK>5puU2OmwyY`#bneLA?CT+4$lZTX$3U~V(|f&Os?aZ_{v;3L!J-% z#MEQ0rZOe@q4hD%BYveW5+6~yQMj+OelM2ifW2OCe1FK2##l9 z(qzm% zUd)KFig?gn?C=2_WThM#%J(o#J^uEH;T_3Mt6MJLxz&iiAw4;DYy)^X)w*4L@N%)b zsE%sU`dGCHmVEQ)1s^4h+?L% zNDOC9k)f&2T1~#G#!|EspVro5WR7v8n}=P+;%yXLh+^>fz45o?(%Hs*^8L=*kwR+^ zOZAOf*~U*hymleWSuWXPj^dK8HGbV(1~>}gzg)0=Y4F#cwl!#f`1>b=z8PuJ-U7mQ zf82<-(=Du0{W-c`08BFo!j~`B_SwZ-rS=bJQJGmf*$PtGY7=?H zj>@eut}=}M&I1)HVv8S)TlzW!W0b6<4Oz?B8RTW7N@U1-jBF}g)=PTgk;noYL_}9l z=c$|&?w+AO&cpmF{q`4!$%_;uD0$+JYZ zMckO4$OZ$SHwx72Ry8gxt}GskL}P5l)@htoeU5g!{iPa?mDK^)=uRS741$Bh4GXV% zS4Ksqdkt z{DTuJxu?6Vmzx)+I#;*i*jkYG&gx@=*F_G6nf-KAcQ56E;>f-{=$UUuXU^OA<`Xe* zgiXcvCx;_7^|Pc!hGF*?qn!li>T%FPe8OID$DzpmKo8Q6n8#c2!1$2zwTFyM{m}e; zm|2KLSvAP$5wAu2^iwz$H3NTN@p1-s6*Jy72J1UK-3MYb)QORd#JU8>Y~5->r}j+g zwGyK|jrg1ci&Hnl3J&~UDEPeS0W`Kw56Z2dP_PokGB=iH+bh?g6K>Nwy1MU)zULIt zTXb=$Hj$xfkWLGs?7#74VT_xy`$YNb(5bv3dg~xE%k+Vli!I~gg}d|t>u>K8ZWITF znui%UKfyIZr4Q2sA<64Nj5D`Qr0Cg?`;f=Cw-;_5ZTl#%_Zwr3mVk7yVT5A;^FFoD z2O4ptl~TcIUKxam(;6f*b#2gw*UGY_(vwK47&6j;+Ir`2_J75d#odzw*M`!2ge4n{DQ|Ek!EKT-#SC#gAG4ctbf-Y52 z&WxCaQ1&zPH*>^LPKfr%a#l(%klj2V-t_wUmol_zQBCt4VtV&-9j*56y6H=pyb_kC#(yN*hd2B7?Htd)%UJn*oi9&+qscZ#hup9JVGn1&ulxQx`6Z&|&E z2`da_^d)(vi@Ua8a3b`*L7}f3KrlK{sRbmN=e`(vtL19W!E`Kdg?=AwyIZkuoX?() zsg}OD#13`6Tc3t8tQT@nSX+v3Bbd|B&(ZKDOo**DTKz1OBmwPb_%oky_4SFdn(l)b zhxm=PO0jOSePkO)uxG)b(&1G0=&7HY%5%%k8LWuc3D=5w%*G2p!=LxeeFd^@r?6pZ zo1u!U307IgtM_DQS9j0OV64p8E?(3Q7o)$*r5s>_K_WY_C3rqB9~|zzR7ttN`E)m# z4Zyj97Wdx?@EL3FXw<3JQNob*XycmS2|;~kebgG+!}-eD78TC*JAQJ&8Vu<8JTH|% zNLH-5!KUinao7N@q`CgA1B$Gzpuv%q- z5>k7EcwOb<{zSs)VV9k5fO54?apK{`=F|Y-ewCEAT;UpM-(z!-LoD9DqaU~05Dveg zpYeT?&NA}qmm2NR>ZD!M_KEm;Hf&0U8P=GTV(3n>#eDk4;o$4|hzfJ_;5)(Q!LrFg zAc-^D@Bg^Wls|apw^%E0{rB?@o!`(+AAfR3d_Q|N6a89d$~f}>0lJ-k2^fO|h8u9I zJP%ASH~au?0!1z2)6uy_rbZLQ;&TIcxLU!$hA^Fbnqg(UM<3lsx{mMPTeua^Wl)SE zNLQ$0vsax)xh5|VTa4GgWH`qm25X=lOs-;r>7I^v9hBfDJ?Xu7syEEgZ-uCO;RS^6 zRO&2(a`btAAH?ThU!FhLsaLag5SPW8ZK5cY+$SGJKi)=MwmeGOO0#OG6#&;mdhOPn zR8Rf@xdVoRe#lkGW%dJ}^avjZo&i?f?6!B29zkXxZ14q;#@R=`8hB`@#>b-kd-iA?q14YV8D@Pyf z@$?2eYuQ|Ez;HdzGUk0t>CpO1s4+#@Qb4x0@| zlVgA4_5Pfd(up2#AQr36K<~`9HFX(!7?50ql&BZCNtKvTyrz?K?N$~xCAN+o?0`u4 zziyY3vu$Gt+g?`w^awZ9NioD$==Q|T1eW)^i|X}xOu2tdMZ&D;?a1fR`4@EjdwARQ z&>1MzeK#M0+FP#F-1_l;!=M5&#yZq8@Vw_J%aW8H1X2bYgJFTasjD^;kHf2jV;dL4 zJ1R#%-xwxVB0;>+v17Uj}6a4%kabdcp-O{LnarQMGtpRjadiF4ZJ-|RF^P8-Uc zX_?1ja70o_yJd}G@Vw~&dTq!Q4;uEGK^#*G!ExK~e*&CZyT8~>#}OzA4C*?##30|ip*lmctNf? zBZ4!a+Emx`!T$+eO3L5N2MC?XF)__rNn|bB&i}y2BPHwrxAGAZko*7Gd+)HQwq$*{ z6%YjhK|nGpAQ?n*Y*0y}WN0!X86?wWXcPfKf`EXaX88ViBu?F_8zZB%jml$|d5$S#XU^SQ0Ymg(sg$iy$~+GdD1;KfHZ0KRZV0WGYT zrm27K?l~ayKS(eKa~DLXNauewY)sf?(1-m(l$=7t24E9ej{k$~+AY!;R;MrpSTFpg z1k?>$R5v>BDLC?URqT&Df1v~as}h>mswvJj z3iRSdiaXUAE`AMT<~uexBMbq3Id99BLoy-_7R?iW)DWK9{#ESl+G_tkvwZ=!ABQM9 zr!0BOIe94A6YzJC#27aKGoASkq5%Jwx2|zN-eO+}_5iy&Pi4Qs6(&y)H&3g0@nsD2 zHR$qeyoOVr;;)ArAqbybR8=!?9CIy5dX;vCnIS7%k|YwSqyz&dXi+#FHmL)f(aY~~ zK!baEE!%=GZpil1Dq2!z(MK`Ohwu$|Rb0gVOs%0#Dgqop-B&yKg}r`{1)!{Zl~51l zgQ$DKn;hV+JYn)kgM0@yR@NeTxv!3>P+qJw)?bn3vf9s+&`DU{$2im<|Mq|PtID+D zsCr^xEINjGxp(2}Wuv!gV})-Mji%J^D|F%P1^z|mRKLhPTOeT8&)xEH{(i@&E&wo_G;@WgrBlWcYD*vJ56J@FB$A$-13xekC zr!VGu(U`(GKp=B&VSjwc%X5ZN>TjsTzxvJ&D69UBadT&oI^0u=@@QcAnfMzH9!8 zWq>bDia0mfq94y`Si6&8S-oVb#vE=O!rM&#?A3(`gbHMZ6-@*Ks+7B&9|OLl>^rEb z@`-|Aua$?sY1*(>%VcHUW?fjm_KFS>F-GXPyV}Qd>+LnflX$ zAul8Qpu0xce#&5+z1GPZS{HxG(#=8LfEmZ$Rh6muoPf)9}v#}8C@uf3FN-w-p3iq_dcKaa+5M&jF=+#zN z7Zq;??-ptbA#Ab6O8V^it;Xh`ek6Q*rAf*T!u6wc24GXCL=*MHJVKt8JihkZ-%hG} zxZ0W#m0a|_uycYs_eK0B5&v;#NN-U)cSC7B@8;>;gkLTj+p0;dY^8VswjxSIHjdS~z|IC0_t(vI$HMXnf z8==-{t$hyl``1NF3FG?*YYIQPG!Bo{EAvK$#T8mUPT;A@ex8w+@!m^mkXNUhql$Lw zn6|)txa57{H$7R3Jqp>6d(O{>OvKU?j;g)Ukb}=>(`7zqBhH8Mzjm~8KLk>L{oDOZ z2=*Ug@4q_6`S+UbKPMvlhbuaAdlmmZ3O4?F(Mfs*eOQq_OTpb+k$w```NYdiAYdaX zB?9a##o2*<~fWMv z`{SO){xU#9q1Y_(;_vIi_JLhFpzB@l(g0gCEgn@%ef{l^se$hI-d@hNsf*H*N;4Pr z%Sv2QTG6t6&8hT!=0eE5f;?I~pvIGMxYA-y1Y+o5;EWx(o2jKeg30V`9cR7f5cSv@ zdc3lJ8O;J(2KGNc$sl)d0;u{VJwQ6JAqNNpZil%6TT+Wg(Af#ZWyIMD1Qu`tK@XeK zDpI@v9J2+vz|h`Wrs#{(Xp(Hn-_d0{H|jSC%t~plg~;xDib0O87DxA+5f_&gFUhQe zXnmcFTRngh)UbQtBQ$Vs*i)W~k^OJ|4gP1g;v(+Ih?P*2`$$U^r5W8lIXW5=@fO|& zltP`GQ{n*)ec!GyQ)(?pOH;Jqg5`6uN!B16n<_-!O{Z)v{}XjRMxpyB!!!{rEt&kA zqoYChB-XR*OKZAUbmM+U>--PupiW2rbVjHmkdY|SmDBn%{vjVPQIx`z*13=AFw|h& zn`f^bCcnA&w!HRz?*7nKt}Itmhi*b_HbfVM)^t+FL(-WhC2tWa%~&=hTjM&9AnkKgk*HEbG`x1`IAa9vq^fvH z-FKQA+`yN|`!0w+phQReapj#-ua(<0sz3{=A-LAU-UO~>*4Uja1N$SBQk=EGBN9x2 zO~hVu>9#vf-0Mu91mafS%exAMBX{C*_>aPKo>WiLHsZ#kGrbJf1rzb({|@2}{CfyA z(*!XdbB!)8Br+aI#^M#Xw26qdo)jFfQ#U$6<8sZAH8;E0c}A-11pQkVZM(|^Au2=f zye0NX;-*ARmiKXkIn{?N#^2XbeonclDBW@|biHh0aGg36Bl+)%#d1}q5GMhE3J>fZ zpd;ILb?p=I!LyG;(Y1NCySWyiWuWN^i0NO>rHa*(akKx z_#Y@btlJ7BJCZWBmb9Rar>kv1vkMbhBkRCKqXE$=QL*Lq>6u2MB6iZG%$wTo&MW2? z_{v}tuxoV(*w35Kk)3p;{)o5cgJpU6O*xqbPS%JhC()q*`cq4(zoT$Cb0MD9j6U8~ znaNPOlRk(<6+bniOWnG1njKt^>~3*c-+4U3zGk4iqXRxr!lp<2l#>Kt`xnz*@1#C> zqh?qnMPQRGb)0-(5KkD^1_3Jg)H99hTq%UgGHAlG$5rpFY<^pDJ> zj}!NDqounYPcam<+4+W^z9K?U73MF0Lqr*l##?M&5C?&+I*Hu6SMVo2uTFk6@pJw-xG}KQl`y#SV?*|IAh1a-5ijxJg9; z+Q`hi)sGe)Hoxmz*2T%5N4jJXj+}l-#Da0$#B$~4iy&(d-=vf{?;^j~tuiaIy5sxZ z3&^$HnH|iifon}9wm1><_Z9?2g`O@DU#OHcHmitF%RB@KXejVHBInkI7CNuSn>c)Q zwH|E3)`8AcQy??4%*Unoy?h*i)@`MS*XwDcg8-Cou0uqxsNkA7MUlGto2+H6(1g>Z z#L*=^y|H+Y&6asnK}6JA&{HTvPq-Uy;T9D)3o~8h7s`vNgoTN49lA7e&kF&w;O>p@ zyO90IEdEDjQQYQIC|^+$Kha3>kewqfgIP0CzSJpoZu-FBAo?^ZWWJb3S3$YY11Ji? zyR`6Vdw+Fj!B2r(utTKuxgkcb(AM~e+~t9z$965INuw5K4|w2s4P2MS5scrU9rkXf z89W!9<@>Av5Y>cYevCW3ragPq|Dq9tuvx6AzQ6-d8&Hg3ovch?Y+(c>-L z%E~I)x5qBsiSFvMcFg0n?i6z|+Ts2<)8^E4S7;`WdJ$PHDkr}UF5IPzgM1sSZH^~9 zsWK$rp?3HfAG6eH0qRdKDEb?38f>Z3SF}NQ43;SG_h=tE3786PYvM}a8W3z*ivPHu zhzA^v|1pc-mn`PQs9?=8w5KqA*pg0v-HGZJI5O zRiEI|Aaqn}}sy;UMVY$jTOjnt7+?L zG)Lz7+lJ5N6JfDS#c&1X-m1{m(@P6#JN!1=jDBEFvL+9N&H`h)o5Qd-uUQWKhDu*C zE%^9>M0rnDb*0GRRK&LqhfwwrvM(zkRn93|fN2$OMt62uL-o0xfEs@n!|kmg)BiB> zqL~=#fvw$8Q8}d@0Q1B{8OPc-QrHKc^jNJ+@bFoaNAY)%O;+>Bigu3_ny7b; zO+PCr>(Derqj_1QL_s7T-|WKC4d0}mT%*1`Yg%Pwx^AI<2k0rdoMJZ^ zJVYCCr*wt2-fbEc`oLdB&fLla5yj`HoEDZ6mh$vbI02IzMStSOzhe{o$2CleI6`6( zR>|gdO}(&ihA5@#0%ut|nF-Vmh4z^KlioN(peHwboS&L$oMPBYOAc^WYcU`+;jetO zVfxY9`kC+zjOPw56hL9}C)VJtxH$Mi!yDs@5}+LUF9W z1ES`p3x9)FNVO}xJ=ZL@UVg6797e{J;+;h1<9+uozZY+h$2FAisjn^i`WEK&F!H`$ zhqh1tGq0Ri@gVDz3GjGs?=b;zq@Ji7jmF48sp!`q@%Rv=YTR9EFQ8r0tyPTPp$XmB zseQkz|EqGka~W^TPVnd5&?BrN*rX85mv+!i`t;&z?Itz2E;JK7Pq11J<64BBtK>Uh zy*UsMGgzYC#Rbk)zBmF&m2;yh!oJZC*`p1+fPM)-)*}GP@^b}Mc_Qfg9`ND~0IBS~ zgeE>eq8QLc?>)F+?gRnA5zbeR{#>)?dg`AF>*4^a9WL~qFr6FIQv&E_05fho41Ms^ z!&82)P^#MbQM5|u#ys*tKSj3z!p*RP+PGVnk`^|KaK;rpoQnKxYb#AHk!2Z#QVqCK z-$pj0_E7sn;~99nA{vCVy7D4hm_}RO38xK4WTw>I-2yjnU6wJnOvNM|3__;4MEe)P zg%%!1nXj@NN9N^jZ{N>RIC|)lMFPj+$SGIJuc5?W>5LSn$^Yq{rGKu?a$VS`0S<%2 zD>+ZF6*g0Td=&_!k9LQ;_H0Ecr={!jF1vi_=;xt!05D`7nlr~b&nSI1I$OtQd?s?u zf!a$ENvqUFp;G_74b%5k*F_Sq2I^gT>wZk{_+78R6R&DM-CgQNElcWXM39ahma_Pd+4aev*NEb zaX~$fPjwQZR8+cC2Y70CE1e#)JJXOHC9Mp3gMD2yRsp)Lh@{1e`0UJOb-n>-hs#8Q zxrCaA@)?*1&tMy2x&%MIW2!E;w1jU@j(7`rYd!98_xG%Cye6HHUMM+*Ftt?AQlD|u zmB?nAR17`SA59BOrw`lOb2PmU9A0#n)f^N?Y~XObVbNMb*z3>x&2o~O%%NOo_lPOE z9RFh6d|vue51aFFa9Z}~)|?#{B2n=>**_U(+!ogl z8`qVTS5UfkhZ;XDq`y$)f6F$q>H`jg?@z`N$YP!=rv5EPN@)-KU+&ZwGTIy(v>E0ucxbknD8U*ZNg> zRkB#rupo0*b8xM-)hi|AVFv~FZ;leI@!Yb_goK0mBA-d?P@efW{U4wBX9KnFc&cCL z_T&|xkXJ^^h*n*tVIb@KepN51<+5pXrOMRMXdoy)-5woh0TN$WePtkZlFjwNCzYdf4O7&ZADqjFfUfS}?8$AW zpWp~m2=c-Ob!D%@9lc))6UHk1EZuDwfw>m6BZjSaP^*nqUw3ZSR|DvLi>=HGIpSzDCTUNKXlxNUbLGqv+wrT+6E$Ld`Sp;ObSRfuFPZUxnmX$8t_<+?DHnW1eS_5{l}M;X4CP9vZO#E&vT(2Lb2<(A!!X{}=&Z1U7y39QaItnW~E zz-HD6ws#7^KGGZ@5RH#h|0`fq#EKXG$?t(hO)Fc{%`;_aUfV*{#xy=8oEnyDbK)9o zTf+jd%12_KzW5H>FNS(u>(7PO;-~G6cM6P-HaKxy8e9nH1$wcmLYSxX4w)5!RQGp~ zr8>%Hx?%Y%jOMnIq^%ZXzrnW!k4 zi)D1lC;Q0m2@mJNJgw3-6x^MeLdJ-N(w9kIo(ch7O$}NIqN3=@rEPjUrakQ9bt`ME_5)ly zh5Pc?uPs!Vpvg?Sn>zy{&kh1@_MVVVNNNkOtvO^#TGDkAn%bJp@&H9nUYqI2X6-E05pvxbXm_J+cInB3 zHU1lre9iH?{Hy@-ucIXWQaJJ6;daQ3ExoKPl>Je!`VHl_6ryM4CAefv4b(woxpp5h zBBYDeZ9WTa$yPpx?y9k~P?kBO8uz9u%M8jVEPOHdbrWqx53`Gtou#tIbOuq{z&C}J zaq6iDTT(3^9H8DTML)zPc%Mi^-%WYF)PA@^E>FKD|qDyI5v7@l=m!; zdb9TA#Vi1#U+Fw$4zIjR+u)Tj>pYjxY71swXzimV9|uaM8Iqcu=^`@T0JttlN`5-if>T!XfYT9;p#Uvx>&31sS(? zG-c^8+nR4{-=CT-tFV9jfQDa2oXsx$5(slPc7DE(H-EBWiyo5Lxo_u&G~wNTY-G*+ z(WY7NxFRqqTV^=#%?XJ+*4j6v@kTqIr8DYUISESbM?59L7xN|=q zhh)KwF{EZxtu5{WBm%MU@a4XMmG)zP@xu!-+TTXLGUidE7Lt-wd)1Kj!B)Jw&w<4y zNgrJ~7%r(!<|`~Nnh#E?B<(&7qQBpIxoPyOLz6=BlhhTrn&L@MPL&t%>3H*tW9_$a z=mK;j$0TN)ZU~4tJW*4~*!9cK=-xY_jv46IDfs}`0SB9Q*B}qngEQWg&4u61$?&SX zfP=x@Eco>Z=`kEqX*?a^cx8{7a&6A2cm10k0rREDfl?DO-ego%vx>{v`m^{|R_@UJ zQ6zsLc$NS-eg`mQOOK&>6z_u7;y%83*&dYO0CbXfNkNNmou%{>(C-kEHdAfgesrjN zyRfiT02=PX*!I!6k@N5xK&!B_4Dg*kO3rZ`M5Udn*Tkvk*@&uYp?dKc<}@9U&1R-t z>4tdv=!aX>!PTn;LuNbohpIwuPSkW<%)jZ))hUnm?oH6B@(0LsLzP077gS?COP4dH z!(*O`E0sEprKEGm<3W!##&Qu!Yco!jF#Rl~IxixCeLBk6uV)*;-)Xzf76%Cgci>;{ zmg#*;zBp2XTo})ndaf_!8U>^WIPn3n6Cq+XtSjp}7Q!@a&f2 z3!ijT59&I4+mwvh`;DU1zm7kOd}eFc<+!5yiLon9Q_OFVq-F64crwf%dE^TpEbM0p z-mj2K)9jCNy-I*>GZiaBm#y#OD>lU2m(TF&jrrQ^P)=L1V=4~uS%{Ck zJ#wB(NM#fR{YVvX4cI^$x$j*K$(geN|Kco+gV5IncL*lIVN1lo@*_HFY<_SN%F3v0 z;8xmS?u=ZW1cHXpB;XLlyT;(}@EP!6G4xIYM5}fvFTNF*HNEh)Bn6kLI6t>FcF;!< z*O!yxV2w6Imw_O-p%eOOHnP3<==R_XawMMjc#40TM>S8YMY z^rj+DCkL>mb+tBhQ2NxgTp8uKyA#YP<|Os zXzOC@pl@{8*z~s7FkU7G`E=@Ry@po6b84^i>yDMJhQmBNrUU0ZRA8R~P8F7+2^-6} zN35DOE|G6o!(E3#)LdVvoI823{EpcPd;$#RZeJn#IQF42K5r#F)yh z!1>!O&5b2PVKL;!>cgPPAS0ZtzvUi6E#R<~&+ab6`{+EwlH_s%lI7S0e@-~B0hFzR z>kjLOr}-~~BnH0*E|7l_^PvtHa+BhQI!Kq@ayJHx$ zmyB(=Z7}1joM+s+^ypm0bf2Ec(Ge8f2lOkH5J#H=M_^3gA)Xb9XzNrxz+w8eFZ`=qq%EST-~2y zKD{nz6vWCr@!9*ncdI?Vtyd9C*2p)DB3)pWRI-xG%!{d=+8T)#uP-a}kF>UAjJ=wBQ%6Q{zC$n#EATkoEMf4!=Mm)}Wm33Qjv%vEXB-&O`rus?&*NciSNV?h5) zw8+C;U@K5p5nr3EskywA0pG==fsz@Uyu+enu=j}w)iIg}z=T8Zh>J|SI$~psLYz7u z)^ryKKU#Rgx1+U8KdhBF=etz1-Naa?T z&RdYRz!#dAl|sxyN){8y)Cw+uyAdt?aUp<5_kniLE3y6L^#f9<9yb!xFbwkb-$>wK~&T{dWs_Dxp9p7R99u7+32%-HpBlUR_${>McFr zPYoq6@dTdCvwr*f;hT5{?Hz%moi(+|&Hc;@M8JBiwu=fRfX;V1@=cvGpLl~6P5I=O zt}d}Vulsk9wuXF=c5NKcYT8Ts5}J7HY6X6PQSXu^M+Efh)~AgT&ggi}{I~LM7Y0O- zr^ryd?D6KDXtaH%$#`m3urTL=uy8ODL&VP+O3@ytdtn(MuL0Qau0uv+-A zk@s3@6tm`g#E>?NAVyH&L~>vmB2uGFI*NtqZwKTFKtQhfn*(z86+(;a=D8I*VGNTl zV^_baU4|%sd3)unqO|;;5C5whN?XKkY5m3U#ABY%qtceQ3?1e)OJI`sK@ZVH8HvS zVhzvdL+tA@yf)ijv~(JVyi1YltDO?jwSDuzf=#SR_0iYwAf(M$aM^wnjAC&yWhk%D ztChbtuEX?k_nsUr(aKRu+6U>(*B+^wBP@pZYx!FyL@V5pk7@9ciJKfBlHrZvV}oOG z#Dr6zExqaR1e3s_zhO^hE3-Qj8Wtc{$GLT)JJgCuwzQmKDqi@Y@DvF)ioI23MR^ch z?XVOPnkMlHrS%=8(UW^82A(qTq^{jk=+Vg2%U+Urg#w_DLH4p>`KCb8cUA@!gytE2 z@TWFU-sunHUL3=^sBo+8mRQP7+PcCMC15Az0))v&XJN7-Jho{|`sBsT*-i?4${Z3{ zTNj-|0=4)DBWpxWQC~*CF^y#Kcm!`j)BeG|ySK%^$zm8@yvzc|Sy=PHX7Ke*8P-8i z18j29Z<$*HNH!^`_C~xf*?rszTxZHfJYpYM3tcB%&ypKtQ%AMinYnAI9+uUe#GM|g z<)>IGqDlZ)R$5lTH}JWU{Q4zRpQnr7Pp`=`X3h>MNYb?4S~aZQ8=NW9#b}`5d}Y{u zJGP-hrkPPz_fbQK=IcX+PQRN@)bXd}2~f4%dC57%1yB$n3#-W_Dq}nB7PEINw1nj}p*dzJrdn3YuV7J|6mS(l+k~ zG9_0TE`;)LK~COff>#N?gNn15&spq8`?jz;=`Mh_=8HaeVC`mZM&4qSx?6?$iYJD2 zoD{ic+69>woy6Ih0dhgOuqe^76q1)Uj%TFZGE;V;>Uwkc`!w|J%j(TyA0d*G&$A?j z_k>8uGTH~qn=HohD?fa7u3&#xvf&I9EH5vMi6L$_g3yB~L+)ZH!U>4XD*Q^*me3Eh zA9xzOl1yh;@;q;e$HnRp+y#1`ioSa=h)^5TUKfO2GaGIyycAlxdRtrcKI@PJ-KQz; zZO&GQfhGL_=<$ALrL)-45irsyFAT?s6EMa* zpZZ}yCm1Wtt)`yn(TCg!ROh@RGlW4{bcSNcyDMY@3@j|_ZxJLQN=Y_==8?ndx(~3Q=a>6i!H6T40yRrvMeP#-Bs?-tO%;B z*Mk!DD5pfh8a+Iby~UdB=`#2uuFoQ4f;=M&Qr-$Q!;e_rksJo9_C)h5!b5#)w{L0z z&EFQ#k#ZV)`R9*9-q02)2=kzxIq@h-y7~vXZ1@80j0hM{VZh1B<47BgH;NygKzn8b zI5C~;GAG;NK|H5a$#I$_Cz9wM1KS?){5j+a3fF3-?Z z^_3oRWB_@P$eQYm7BlI=;E&xM8c%J;$!!G%SM7Q#9x)&D&@>4hjV@dY&VtpsR7{cj z^=WtpWk&jZ2jxsuQrfKrp?TW5rTf`DxK2p7mRP`-XpZ(_cK$lbhhn0|$0u%og4zX*Z(2j2ODv~-VE z(GvlUVVmuT1+Dxg@=NsQl;1%#hI*be!fo2%0Md4-!&RJN2~gWj9rW>dCFg%#z9O|F z1LP`6<<^9NT&01Qi&`^{cNj$8BV2jaTq>g=Ho@>4(3gQH^aw{2HhCWwOgj=QeTv0a zyLk)TR88~K~D1N3RdWJGoUIGp8K&B>3rRwm$71C+IO0GO5T^f#2m_S(97a=mU}D>UKehJVPUl)X9r|UI zKI&S+jP#EK=AG4>oO|%EM3h%MN3bRfV_r!3L6o6uV>fsEpTquhv6fdWOhvu3p;YQh zF7CBaV81?`gDiwq6Ha<6W*v3^Sw44As!N>pZRB*uj+P&q)K%U}R77J^tgZwVY&~+Z z7uek(GACS=$$0LpUJvyNcNf5DYiX(XJ&Ij547(F-6wp6oSFR~9L6bOu;p{9@vc4`< zC09S38)ua+clBUD|553HtLz;$y3^Yax?=Z-+hBN$$}&B&$xGi>Wlbzq=_5vg)~~EW z%JYxrQO_SFi5ca(PW&jD-zI8ZeSKIfP-1Bzcc>upqptA4tj%e6iO&j0>{mVCY$hss$}L)MvJXr(~cJR9w!w46Rn2@g7k5-z!U zHYISsUgZs7r<;urlehCe_1U#C?ECY3#!UR6RX|P3r=2+a-|b z-UbR%=z}ufIGB9wztOZfC*C?JNRAzns1ExX zhUwVVAjF)S=a7f@#__tW-a`EO)dl}xcz^`&PZ&$4qVJ|lJT3S;y_Dq0#4C&krK4#d2_|+*m!v(e28pP_YtUt+8gAK^Os@MX@3;d zZ~G?xiRLE;({NW67ox(R4?ymIfu~-(T7cELQN3LHkNFEbovhi zxL!#d#-~PTon^t9OV8~5T047_ zsWNWYwQ5HyB$hr60{s|(5XF-AqQbO!Fyb}a?kPw22yEXgj^Z$H;-HiaGePnFQS351 zK%tW@=dw20#nTvYiP$reU|C85S(y}U7Z_Y3D&+bu35!NI1pCL>;I~B^p8#vDT>G1? z$iLOv|2B#I|M3_2HB=WUeGMc7`fx?&!S9BBJ1c!PP2^oGj*6%3N(nOxmF$h|dzn*` zd6}+E0RCt$PqROoTU{Vuz+ca!UE|j~hi4AH#7g2=fBgQY45&t2wB`roOY^&VSH@V# z(&8c8Q+|eYa<|?te0vbY&Bql|wLk)s?B!(_1`vb;xd6-sKoDLAP%Z$1P_n~bXd#1o z1>kyVMHaQoDFG-XGK`V zShoF^V|1(43aqt@!-Ubv8+y(18gG@*+Sr0`2@xD-d=;Xu5)pKrE{V$!+YfgjYq}zj zU}#+XEB%2O-3Ki{Tlm#bVx~;N78Ro0GwOeV4ehI+met%9*YZ1x-x zF|^Z{fjvWoCEoou)k3*S+%jwUM`L5(`m7U4kjnoRrTw(8M_a}RDbZTbPpUBpd(CP5Ttepy85itERle_|K@-P11~%U6@JuPe=ATjlb-7dL)LSw5Qp3R>j=rW zjvbWwxg!sIi*d1H@`{h+G>vf#xh1YX6EPLo?sov%qbxO=w-GzkZMIX$5(lbvH>tf? zzAP$fCuvV{!~5 zyH^k|fe9sH02y0qnE-Hn+M%O80FA#sF)IO}@$G^RUk^4BJcce5Q*gupXnfedU-cOp z-^q3J0f5F|pwZ&^MP~uxrAzAn&8Y!QP5=B^K=?O5b!?B+g)($x@s8T-j$NJ(XX<;X zKL50VL{WaGrNeIIVrA?(pl{y=INUroDg_`CSQ!w4L<2+um_XBYfJlJOkW7?oAimG6 z!XzyNI6WPv*z3riw;Edbdeuvh`=hH_d55&l?o2hnlS8ic-}_YI#1|s}9cIzrE|<9? zB)VEb9hJ}j8afiG{;=GSY5KFg16w-19=;UuY{^}&7C4@1!;R?J2$XxhFn!+_TFWZv z7=C<4CCu%<*s>QRMsDtlsbesKMiFV{Y?S&AhjqGD&!~)BWvRmvzRyK(F7}h_&ZcBh zZ4J>S!yjj>Pf?|QvK%7-h*WAY`y~kkgfRiPTKr>C{(c4B&DGmMkltCA!^0o;bXI&& zoSgqPgzh`&$sO=*&wC2RAoG>KZXX;zqh7gq?JvlG(CHIV`&2R(#kc?Y@U5ZXsG*uF zHr<{3LU!ulDcUXU*J3t=%M*r0Hiv%Z9L<+^!@a1h>sD#C7K^Is=|<% z!ok8kN>EvMO~hB;d9dXlXkd_OslORAEEKjc%1QKC0(~-TE1e7y6p8KKbbbY?BXt-? zWVSe8^?l@D;&(gMl>U9cTtrxSL%djV&;Omz@Bfl1`b{KR|3HS||J?WxZrEfnpCzJ? zr`)s~$|z_F7k%mKkB*g+kOfbQC%7$TCgMLh)@~G|J%k2xK|cVM_t9(2Tz6^mtmPF> zE`;`!^4JiX)7*TK*0V8HK4^EFxxUGh8lPMt|6;tb50J`q3n$2zidJUOkJtxG9E6rU-hMvwxPJhjQr z3xRtq{Hvk;1B0%q{eMWaQbtMSrm=}RSAVsUtW3D!~jpK}Li zB)-u09nf!^U8JUz%CyJ8Nv_|Lm;x5pZgc^YKG_*7N~nCW#6 zlMpgPa##ANpHliFzT|3FnRDbcab8(cUPcHUKY#Vrqy}w34nu)#GPBrE@XST)^jreh zJ>9nO=Z;M<2^02j(%w9Jt56MPNm6C;CcX+1!U4_JP7zcgYh9+7w!f-KN^4sH$}p1oXG?XufJ40y1Se)IEnNH0d?ab z``8*E&-7(wlz%YZ~i=&Q%UBxT3*Oq|oa-pemkYVf<$n(!K zLMeG_1^^I&oFqf1qjIV(A`@nVa?@CY7AY36Zs9HChqYp(g-#)(cH-2$ByA67#;6@} z*A$eUdQ*FAv{&Rp_dbJ-5nI-1A_43E(i971?0&HMr$N~oxqrI=Io|i=kmzgf^ zogLPluT;(6BA6!f5G&5FNRW-)A72{grnmPc{0#E87i{i4-L~{W8El!1Z?7n9rj-QS z?u?G<$SxcvVnU*11L$b$D7P9x?!kXL0)+4z&(z}UTe!8qa(gGs7osqHg2YJ@r+k0C z4%>BCDiF5v6FW}cfd*y@N=Vc|U7%B&eQIiK^rt4b95Hx* z1^zuV8IGEp&4uIA?0LSbet!L5WqH{gU9r{Pl*z;UwqEEK+-?edounj=^XV>Vml)J$ zslf(+R$S~S@HF7QG?fL3nzT4*kwuFMC_-fpZ=)gb+Oq9E#eC=Qc?U;J=Qg&x59Un>E>%|-gXKe zaeH>JmeKUrO8z!>&p!f}f4aOb_++DmC6OGGt5NM%nW2?1lldzH=u2hvXR%3BA4iqS zWV_Hj+PzZOUA8zl?cM0J;dLGi#(AZJ2cgD{HvrRn)h1k2H04b)@z` zk9_$=%IdF1`O+GVwb9N+f{~W@?Hs`as!!m$cR$^4)UwHq3*uDvumDtdObcyc(#GiJywGjxCV2AI2IMpCM^iJ1E8g^pe|926FwB3TR&GeS!ek zqI+NM=A~YsEs@gNZK|LxSV*k%8z=!`OQ!Ck6g$n3N4EP1DA6T}jwNzess+C<)Rhr= z$0Dd+qf#2es^~e-0aD@62mM`Mw0DpA;clUS8q)30LqgcWUk6Q)Q^<%0h^>XKK$PHQ zVd)!ue0w#*2kr}bZlb9V!s1K+zL_Zm)=&dU_UI{OmRUf>^ksBEHo5T30>)_W9N;}5 zkzVa+>K&UseZtlRHWv1qjOp|Bf=AxOurI-GrNFFYzHTAY06)N&aAHwVB|3?xP*0)1 z?-V(MyJBg>J3APq-e4T6Q)IM2wlNZf&$J%~bdMG#0TP-|QPF7sYB|6NSbNWT)(?%*e7%-6Y<)IQay@)4T}V^}I@A=IJFu08OV~UZa+ab3 z%nfF6E43Yu^Ci-Ul0&~pN&^q(&g(I$O^_{)Rw);a_G>;X*q z!T6tFIdqkl2;h=G$bwDc0d;j0DZ>v1$%h~T9_px`ScQZ{lpYkooV+Wm)>9`?Q-JJ= zVh>~(WVt9bxJ3q)*rN_quYF6S$2GSOf^^-x{rLpXj`di`%;m7S`eA~x#Oa^JrLY;@ zq{pJ=LF0h)Q)OW}97FqcFU@udF=9Z^odg;}dI4=(CwnmH5SC>sRfkCP<9>+d7w zi_q15^me4qPnO=n`_m=aJD+Krkk{nWJwLUgkt3=x289)|yjSQ-nJ!<|b#@v}nIQ2E zwstI1r$;mR+HkTz&#{4$KleVCDN=2CE_Huy$bN7EH=?&sjdg-{QQeDxhKWeQF8$-h zr>~>b$4V_Rf-{_{W@6du7MGcF?I)U?uA{>7@a5!=&DT?0*%``wjGS*tX&ZFdV+-80 zQ@5^?vmsdsDo$zZ5V6}X?J#5M*droohZXs?rfZth!I)XiZreuXZKG2p<}8sd z0r~WsyA?vQtnW7$0#YkBb>j zl&;OM?p^2fp_D4u*q2u|@>Uwb#!h+KZE`~hpel$)47>C}t;9y;`|~(?KKr>?t=Tu+ zr6(+V(PjG4l#V=`b;gra0dHw|)xLRuI4BD%nZ;wf^_!~H1p&Ob(u^3`X98Nx%#pLX zE@79WySQcwy_8||oWnP#lAFzF^4{HNwJwI?f;o8?TuP1`^~m0+mG`V)du-|$rX}k8 zFz3^o9qVSrOOL#r;@yl2apQbRT2R93o4a5Ft73Mp^y02WSxv3A0An?$QCa%<#F_Z_ zM{3bqk#UuSl*g2h5f#`khj2T>e@5SZ_`e7z)W&cn6cepw9b`nxak|h7_AA#9FJuv%+PT(LU zo5yHL;xo9)#wlx5rPg!}Evz{isBZ@>Oqx9s>ckO03%kyfy=F6oEHp^Z799;^z0C>1 zS#Fdr$B`0UF7>auwTNbT*)!ZT?#ioF^yC?GJ$=Z3oUOr1Q4k=Ep!x{0lr;2Yv$RqW z-i#adBHg+XxZ7^;vL-f=)sm&9VEdKGWfsHN@GEQ4MWKOP({eX@&{t5Ov=gnG;5oc= zaaFafuVY>IgEQVb<*qtis_qE%7=H6^2S#e4&b|ei+JVoNOYG0H(UaBJCCPhsVjwdY z#qJbuat59}Nkx6=D5mhsU^tEJ_8nuC<$8rBF$=mRmRzkBs57j=W=q4krO|t5v*TDL zZE*PpQTBKn=$=-hP9~1TV2^28GkJ5Tyw(+iXdP6V;q%fbd`W2yNV{+0gip5O?<%x<>19w0!t?N! z^)4bgp3iPh_&=vq!q1r&<$7ib-nilUURkgSU$#k9I=Nf=uEq`w$AXV}OJSNg)YUp4 z2w_WRZZ;8-UWH+N-OV+uL(@|bd}+TpP2$_!=kX>W2D8>cJ^sEi>+&V8_*J2S(r$(K zY_57AG6y-EP#kpS6;ZhD7%eYd`#TdDVo9=RNd#A(}Zn{JAi%_1HX*%+^s(Xj+ zoQVS=&8I7+9SXQ#sV#dJQ|A)tT@w5qM?wj`+6hvZtQl&C0Ca%gwEApIDNUL*O?j?= z|{bkev->C#GlF{Y?Sn(CB>|)sWRel+tf}Ib)S3_ z55+r_<`4t2fBaTQGaucQPgp8T$|9QXlTW`b1p0+n9D_$RqA6{xH;NfikrccLiV+@W#bHw2t1%G{EU@CfAY;OHlVRZljMep<1R!L zIE%=54PJEX`wse#P1vvgqVCla>;BM@=kK~$Y4!d8<7?IVeTycpTMpw`Tt|&41`kh9 zvu7Dw21#=_5pX$QCYX^fFjylTQ_?v&xgCEK*nO_3{CW5Jt3RAw+133oy%LFCZ*xcl z;1=>m0FM!Sc2;zLebfuQwL_SIg&DAi48L z=6_$apMPa`<&S6o?d;yq^n2>#X?bQx%&#pf;K2dWVj2}?GNm)*P1D>#p&e!4LH=TQ zy0HD$RsoR9M2W1YYr({t-C=OWvT~Wu?Cdq^%-i0g|2Agq-^JYikI!=_0j1R>dxv!V zMxSY*VCw5fg%a*+ zYv>U%FZkq2DZpC(4p!F#s8Z5{v*6|f3@s>Kns(B*A+ZNAFf^m*-PlPM^naQKfQ7_; zG=uiVQvkB)z%BqaM>8J1!&!~pZ}}ZWcO0`=1d03As6Rc`-A(^n%`UB4-bv`XX>zw7 z^jBlj{`}OnCD^^=F3;cbix`7L#i%Gc>lgayf_OV3Z^p?`9h%PB?JgotTetIuj_YfO zYlJ=3RliZF-Vs+&;NAWNW?xuNuC_Ry=IV1i{y>?4ZDE7e@Pzoy{s%9$7ns1#>VXcd z?o@B)iylo`L3I6%gBy?=>}(PAn@l%7K2_D&f*63<{nW|uhQlA@^9>dXIm_~{}2Tk-K7SJeV>}ENu71=zaP>-r1bdg6{tiiFztuC zsVeQ{Yx=-^*in;#xf=QsoRKt0xb^6JuAX`c78^f0(G&&L+5|dP%{KasR6N7^AUs3p zo4kXscE@QG8j|V@scCmYA+B={^GNRw7M6~oCiElEFPnFH;0=10)j<}168@dW{pcYM zrDStcH`hwbT**sDEzb;3;FB)GwDf~Co$ z-@rBPcf5?wF8D_CIrEw`+^D;qw06&3*SNEGU-TeFtjM!ysoOQx6X7nA@%;%%62HJD z?-f);;^8Nj)+7@}>SGk2CxJk9_ZwfFH}yea2bB3S8tNIxBd6$(x9<-7RD!~Sp-^El83Z@J!5=7!A7(suMOc`}jq5Cvvty(;qk>@LXyv3>!6dWz(lpTu{0z!3 zza0<(WwEu!=~$i+i9fV)P|Spk0RMcMU3l09mF-Rz?l19jU?C%_!#(x4Pg}0PL#D#u zvDZ4dO1MZ`yDS8?Lb>v|x$62R3nIzn&_rmgC~p2Y!g_w;I=J7iRkf}zGH^B`Hv0Eg zbG4ip@9?ttuetYIE)3o(b^84qWc<@Yb~m3n;;aX!<)CEAkxe$Z zbht-3Iir`I4vA6;&v0oD-pglGR1Q8l`(D4*zE`SgWYM+2vs|LwVcQeRpD7J*7QF&m zN_=2Z3g66jz&vROXmsGG0a`0&Bna|&5=HPb^`8`-;Q74xBPYfj6v zR?IXZiM@|D3-%cUeXgE6(FSAQ!=_Bswg*%=s}iQXeb$1q)ftz(CJcP&3u4@^3f?y9 zPB^d3!-%srL^Dg2kxM@(->5GH_^dZ-VM}bGXNVN?r*!9;acz>Av-pksHlhApJl`H_ zr(Cx>;i&uiXu!sg1$RqQrLte%UbyFYG9nC=IZ`%2Hi^<6aE>y-s0)B}&Kb6|&c2`n zn3opphLhbKg~P$o7*D`$voCt?^azF zDp%{>?XtKeGip&I0@hNath7j4sPYSjk_(zdE(sTEs!`2=3TL^dr=QBYP@=JP5|kxf zXOTsN9S`3r%M3Z&XHil0+WM8`8eg2|8>W2!^ZJ6cPO*bCI>gA71J`OK1uDI{z5OhS zq;~Ai;@Z)%S3Bi#O9hR_PL416jXP{@5)&XB-}F)6cbgP4NvM* zA!(J;5=p)%Gbn5KcnP=aRPxr1K%^K+jeLfa~e(HiJux=KkLnZ7>Ub8=#k6+%gX(Z*$Ae zKMR_x;c;%D#3jG=!c8a$3XTk;N-u=$;$~o5vXLBUPfU#X8ZPziE0=k99&wT9PMw?%MUZ=n=e zHF(8?O=-}zS!n}XRNx?Kk)V7Le_kFB#MO7eHTLwfCQ76@hOp3*SKE}Qvrp&QW6^GC6PW_;9CA+Kya5*_7i_Ci0E z=ENeCN!xdmNvW8BXzh%-?8SW#*db5Pq3NMfPtt0ree!%z-w#lRqXIrCZ*&nOPv?#4 z?`Es_5yIho>X8q+rw(IGO6)vmacA(cZ`O0mPHC?74Uhdsy)Bs{bp7&p#K+J-Li=ME zmOA{D$Vc&$EPxYf0JmdZ%1G7q&7vSFSyk#IKG^Q0(UnJkgz_6$`o^_Fyw7aI zGeSkGv{^ZxKavVtb%OjZ{AWbO3_}a6<<4wDhyNWV4l^D<94a}A_upS*jC(G< z^=FQ(aM#w1;+RT>7Z^9I&XLthe%8)|8^6wbDB;R@#kGw>UNm~=n3jfxa@Q+%^K=U2 zM`;sFcQ3)RF?4Wom_Nga#7eC~Uo%L%sI@KG{}{ZcU{;V%SZ(^TWmKE^^1GmC3DtH@ zd9@sVyYlr$Cw*NKsqWJZPugDI^21@|H0&Z6pKYWQKc&lg>++xY$)@1TqgkKEgb5#D zdBkatNW~}JXOIoy;nbKKc+SWKd7Ny_`R?`8eMw=z(M$#!g@q-(8l*C+xpp67Z5Xl7 z3@Ixu_kDlJ)^Jux4Ya8uBc}fn^GA&I85xv`>PkV8{FqL1-bZ-I6=>}pwr#sFt2l1i zOP$v! zp)$Zajdg`aP)vHpkprZCQrUHhkDJ`;%Wm8jKjJn+JFba|l z2*!L57{D)waYx>bH%Dv-3Po~;vP`4!-)9VEyoZGMFX>3_>BS-zO@KphS+NH&#lMR- z$L6@L^kW45(X^S-apDZ`!8PEi*ia$?5U1JC@~+`2&1YWwHiW_(Ao|xSu=6Bh1$V>% zpj`lV`FAk{=r)S9K_&<@<3@R808~?H{gmchDS|r3`?G9Oe)Mto=+1WZKbfoEkT}4* z6Y^`WzxMTC)B(k@=Rez(JTcbnSZAV{k4}w>28uwr$(CZ9TDV@7UO}Z9CbqZ6`an?YrM|zVp?&w{BP0Ts=q6?y<)D zv8u>^tZZEY z&i_VRV^@F}z|_GE03#p(dULW{O64-u6yGs( ze)0Jc{zmu^Kq2^W^I~}7;P3uW_j9!x@I(9kwtIMdbNkao{8NP6dP5B*q~2xewq_M0TnvbcB~Y|bJw z@sIr6(3vpaS$18Hv5Ts?*+whq6cCp)j@e}Q3Xq$BH;_-{u7~<#P75w>FYrkvYW>num z!*||MXBUL>i{??^nXk$r2*N;jw-n^GGhKHh)x=+@x+U5WCDiJw8p+pFxPSO!;T`N| zRIWnbZzy$lyTrudLr|&8BXF?EyJr6nLXXD#5OQHeSMwCwumNo*|pwRRTVEOl;nxW`3z2)7KfV!P#|qp z=kV~bm1niRQ)9c5hPuE3u6Rl!Krf{P?c)mLk z&A;5&_J2E`1l@V5uU+SYxO*Bmp{W9+Kl(`jG(1M|(H>;!^}}Th55SV}k&3#{upt8iPo+jM4Wemn%Ve7@ccccWxVJ804=V>>2Ue% zkTJF0E{R63;juyO3#9R*Z4lIFKCs;_0xjN;Sf^xEC8O5^qHXX3Gsy;cfB_dtBV(1zA0qAZigz83T=q1M%J&xN8)eDY_{(AxWl^DXD));o#-ndJn8V)~ zp-^K25#0Z@h?&(ps-fkhfYMj&s5;&6ffHZq$L z{z4&%KHHOSqPVX+3%fBq3?mPERNj%2K7$*=FNEqkH58Lo-BX%LwzN<1p4NZx&@1x+7#wAVod5Gs9RIR-`)n^;K;m-gF{)sVN$W`)1R{%l%*IDkhpwUOjMc1MD#KcRTxV!8}oh8Ro zPtiJ4bgKDkXQOSZXwvkj*Jwi>RqF)&paHKT#ab>fSM4Yd(BDAM_!G%M#qnQQhwSN^ zT1f@S6yZ+mz0DM*VcB9s>DcDr=S&A-6>%;7Yc_pT#o!g{ssA=LvVLh~oUEv>gcAKrY*> zjFwrFzvzF~FvE()mv;J#7i=s&iWri`sW28ajbWqhjnP|E`fA#VZH0X;XT8X5`zQM; zHH1wxZq@A390raRP(8%LjcUd`IIm3r%{kP!Fo}NY#jD*LWWhrP%8j$;sl3Zs+HqZ3 z`u1Opsx#J)0CnE+PIZQ^4OTOIzcw6irG%+FLC(;K*LVtptv^FraoE}3_A0w^uLN`zpG?K08m{4T7fT`&Q=13$4 zysBL=hSEpOi}`BOO)3~(dEItU70NV1i9upXroP&XYVMk3PF7x53tUs1WHj%07&W~C z`PDvRMl(Z|145xNWJXhX5&3RJ!AbXhcp73#%fo0(NW#c&9eLl{5q;8Dpo5#n_$4|> zJO4m|agMN;EVr##UM@tJ6)sPL<0_^&lR`cK-hlO48ZPAcNEn1WIw#7Y^&S!Pey!A0 zP#muGUV7}3?Vnw#>GrO~%$lG6-*4K}&Xb>2b9B)+7VNkaHkK3rm!zkB|ok$jw5G3oiD!G3$C<`H8BwTh=&Cc_EA5?hjBY8H1j=Ju_;+bNp zh67_L-zkj6=_PcX-7N~_Q%3#aS_-Rv8w`~s_mQ`ooQXGM%u>PCx^PxU4#0#aIj4!Y z$`G>h(8LcADV`LB9Tv-in(-7b3hKmDHtnWOQ)K?+th+o7Yuzfpzh1|@T%Ufgvf*!94!s6qa(C@@)-Kl{D55 zrKUASe^c_%nrX6;RIFo0!Hvb>rFq&mSV8%9xA{-{*^pQ^x&GsLJlf(C;$<7r|(II#)o!z_1+97zG#+xbJ zP|oHRT|}eE*WQN5G-KP3g2OaQ#KW_Lx<3B&l%E`_7pMw9S zEdSRx|Cdu1_jHw1b^Yg(iTL>b1N=J}jhX%>M2w=sM9f5trpEu`KkxkS#(zgf2?u-E z|5a^H&qU8e#Qs0t`(M$&^Z$@>{9iI6Ml~--01=~-u?672h?R_;0sr2^{6Ehz$^*=- zj71zgiFE%#Ohn9VtV~2)+)PCJ|5YyfkMv)?3(;2p|N2&AR`E-21nKg=O$<1bZl(y@6Gv{ zL|mhDOA9br3KNT)JxgQr>nZrx7y3XIdF_y>u&`%BSX4|*%)YUMUhF`sx$%K1454pA z=x;6kpcVIL4k29&g1|Atrk0jp%zaZEgF9Q$MwTWZY;UyTodxW3FhusocE%14CouVF z1@E(T0Wg5VkB31BS%?In4G{dF=0}gehNP%%E^MIR+pwgNtgwL#TkGq&Swa1eh=6xY z@f$N42Uu70*W_fmsTHxInd3U^Y$M|z7>pK2Fq|K&oshkNjE>ZroUEGFpOJK(wam{J zpuVBuT;b`Jfqrl*@^{97z@8lbzNNVhXsFxw!GUklEZ`hq+nA{sJvup{d{CfMd)t{? zP54TJ=f2+Y-s!Mb3IT(knLSfum`|6WCubH{aFA?#{lI@<@~a})Sc*oRly^f5sJp$`sT=S+>v-^#~%u!}!5n?HYl z=y!esyuWDE#tbvQk#|<_5O4?f4@hqf;9C}2?%CRY8#aDn7MR+$uK^jz1q$LqpMJj>05Q#NZgBrV1Fv;)eE@x5>aX>J^+6apzS*!-n(|8i zHC)=&pwIvc*%OR_?K66G7(y^&`VKb%VM_nR{kr=gIDJJlfnehL3i+nH`cxzi===$P zXN7$UC=x`yeZ~D8ZI9K*!~cnRx%qlH(f;J*XnB2@3KH_jXTFr3W0%@EV&3u7w*G{f z&4Pmx&)WZ5jbJ2PZucvuMQg1uuhCuH+(m$M%tUkXA8gmR5==696f}iDE23uD_b+gF zJGMwXg9vbib(Wa^1Z?Q1*^lBXckG>fn}Hd*ZOxQfouh~5^pI{*4H&{6^`x@u zIS-rtwHi*rf%9mzD@eisR6wSW{Um94^1NrIj^U)p-6fjG6*{)8jToBn=8`%gwW4$k zGlLn4&#MvX(S^A3^=oT5{eVG8OtuNfjk);`yZah~om-@d9x%^%fmNN>*3o-g3a_7| z%Hx6I{K{@A_D)fxBQfS#b2kaTp}gB4<#PYwLWYwe(K-%1_*5ZMW5uJDj}ENc1v#o# zl%L6imjlvthZ?R*QcN&>c!^0oUi;IkSx8sSzxmQ@SD22 zy5UVI-nj=c_RJ!z0`4Y$e1~P7!8BR!JLibqA$R`|Rwf37nc>V&nL@dg8d)e3@U3w9 zb!#jHNjPN@5XiI9R1)}m`o98Eu7)TH=<#J;7+(j3(!TKp3GUH73I)Ip-9|(ouDT>} zZwY?G$GLUED;}v(CX_qyvcs3vY;H=B-hCPn_xaS`W=5(#Zm2a=)h+UYDU)5eWV<=` z>PcG9;9?GoMccvh-K&wv^dWc5FR@kKwSN!1ds5jt)XPR4 zW(?-R51A?4_jSEFNM~$?I_{c5J;MtiO(A^__BIopm-{3+VS+`=@@Fo8f$;SjI<8T@T~`)@(0`2?+>gy5r?T1MYfV35be%p^16 z;}2H>2Zx{(kw5=9&mvW1`(aIh%Fba+DN_} z*Se|bNyU`#Rc-q)-^4bRE89#IU41r`XI)i!#@6t=uLR;}OyKxgpwas$y$WibGvIn@)l=Pr z?nU)-kcNjdVgd%QlEk(rm{u$p^~G1>mkTik23ck7Ixi^M(c9!nOC(w_h_J!X^7{je ziBL|c(y|7+SK)v^1gqI^|UMwbNtKE;n+k(OR zgG%x-x#W$zQ-n5n3It3x09gwz*PIwH%8IJ#E(B)fA&u!91PvG_Cv{v^E(%;4`asz5 z2Z;vc7PV%>N19A*(C1-%v^~?l@@j61=tK=6r&Cj(q5?0m)2B;xA|utctOT`;xp{g~ zy~vpe{r#@0;#MnJ|9W&siRK2jm6ytjI#1tH(<1&Bf?SHeg;F>D;c2;0;an>aJ#@Q3 z4p<+mNSpB6D;zPBBvdL9CSr1(MBrbGY;O_d+k3D%c|c4=k6{=b5;vzdh6yG%>kv@k z$7R_^bAXP^V}Aji$%YI0<}*K%wa=WcNJMis@&=&~Hf7TWS~?L6S~EC-#L+auZ%Aq( zw>k9aDfXwaSStB#wIj|~8{<^?x&;rMo0@Co@1^_aZWD%tuK6fj#G#(s75a;K_bvhx zEu7I_@4yEy;Iy!ST(g(Sj$N_JKPt;`{*tbP#>qe8mvR#9iCxALC>K_pAKMcUle4CI&ix;& zts2{Eprr3ze*wnIp~i`xTq)PbzAsJ$eRWHp67RDIdK7N0N+~1BnhN;?G%z zZ4-zj==$&$$~q=%cXkXdHf{g_P`mz&?l-i(`}-&fgYN$C84ZOM*A_@OELQk!lLdwX!J$8s@Pd}9 zvxJ6@n=HmczVv(0u%x244SagSx5B1O&|;Gt(+&C$Tu8CLQmrc8s8f6Zg#i#P+YNDu z6W90I5_OyFmw8NJK9_-5oZ7drJS3ojoSLb(i>qnSg&MwmP+fKsuP??Yt#mYBHb4x) z_L_^DT09~wU-km7s-P^cs25tB(nOUo2U1C!M$3*L80+e) z;r{(c9Hj+?MuKjpht?^U%{3>33=P0|M0`%-=G~2cG}x)QxyfdSueIum_H@`xC_}I@ z(F%8ARRFJqr<>J-?hX4dl&{M{-P`HtKcSanEjWC~BJ=M{Ypg|*cmx$I-1ecD=CIl{ z!Ge%L{oz;lzQuM9b0@z=!1gkW(Yu$IWdadtdKC;EPS4Hlkh@HiF+`s+E0nbsfjrix zEZOboX_`eeXH-jCL~JMnO7_S>PzA1-M+>}`XFQYNqdvbq+C2?|gm#`HjKtlG%etou ztN9KLmF1+~fEmg^jX2Qy|$QC+gXp73v0c?Y;>>aN-kmnkP08v^QLHrH7rJqB$0n9GWz z1#UXRZQczV1Joam-5I;z5$IX#7m)JPBMHHUy>@fnP^hrf!xF-AZcNioIf@8GdjS!H zqWm}2S*6X_O)-(Em=<#_7v1a^=Gk*NNg&{oKsVP#Xub#&8=u7o5gKc9!2nxuj;wyc zgRX@L8%7>4i=bjlONKd5=GQjZ7voqXv$oMgcBsh&G;B%4wYK>bY_!mN0nS$w>Z5h1 zG^gIz~TcctHU&9*~Q=tmEs_dg=73E!ykLaNZsExl^tQsRQkZvQA_(%x& z)@89|Go|Cl_yuJdW`gzBrgfrbSO(iue)!a#dGxr)@Hc@NdqJuVf6eg9+hC>AowATb z4vlQyIJ(ZmL`*}D;*4JVep&iYg1u$-NAPS(>{TU{Q;!ahuqFSHqmDiDaWKE(*T$e< ziOAJtIiXa1BIzfpnb1yOXtEd%%4ht2di<+P?itqahoVKx(b~CxYXLjTiM`T=>Eg=S zh^2J<3S6K`ywF`j?m+SSJ7+qZ6RGwW%<^39Fy!_IA$D#_X?q)(<`O=S+vH*mWBq)1 zvw@hm`T$$o1e>UTO;bLmL+ubqF~Stb@g(5{LEii1>O}1}DoZ&<_A6GW9H-vX9l<%* zlB;Q~wS+$GCHn2*n>9MyGyf70H8%Q{G^)R#O~W6Gf(#Q0PY3N0g!dBw$yNHj8NYf5 zxfL<$5+u3gk4;(5dz1)6ajgtwBLDL>>899CLyz`d8Mx-3^s$ikm+!kI-x3C$;kTShIQnKL>PiU_V@d(cr8o$>{ z;i39rPYao09Ol}`w^nW^C6z>5J3eag@g-kn8`HSeK0>LQ<;g#_=f5GFnwZ-!VG3a7 zXso|qiJp8H5v6ag(&2<>&N$!EtMw5DU#Bg}u!}4?-@*HPU6OVqO*p=%iN?hYwnCRX z^SpM5@&n*8%+4Dm$gXp}gNsGKEa@gdY#?>^)%fSUEt<#KM)A|zO^Q1#{1SS%p6#J; zcFon7%aXrK_(&!T(;bB5o$R}umQHO%pXhqaG6qKqa3ygQ_#+T(11^Ys27Fp)#~D zV@R>~`)*_Ib zo(WL?`_7;5DFs(9SqL!&y-12u&Jf6)?~U@Dj}z#ej~qt5?I>m^FAy)}XH~c7YVK8F3$c_0SD9CNK6Vt*b+9odPW?23CnB+p zHuC;W*adCY>Y{aas(ygFSuEpuE+O*-ku<*t;h{{Elf9#0GbzpJMZ6bal|f(Rq}Djm z2I#FHuX_qHpK+pu&)j(phYvC1~GPzJXWb zLPAiY+Ea6_5vu*U|JvxGY1%;h^<1SxejRr>?sg)|M2x3S%mELx<59_0QqBhrY!`n9h>xPXXjhTHuixn9Dx*w8n@jot2GGM25%>C7*8QoT zSeimjIY#-RBz!X`L~JOi>Omt=(DrnyY@z(u9_nm<`Am7P46B!U(O8X@{nUW$@5I5K z;dBf|rX-jY3yK|Vf9-=~rCxON7Re&t&H!pe1cTmWQ^=T>LhPA2tlP_DS!iHo$g4w` ztHLCk=-{;5%@dO%b)JQHxCG`jiZ=v!dEI7{TMIdlu{0w!3u|N%k{Ktg;Q}37Wc_!iP?2$1IORPww5cvFWAY|0IF)Q3Es|Nw|blI)Q zFCWi4Uvly%bxg2>8lAs{=CenJy*#)(6$xfAQ-*Kk-g<9^U6sc2ZfFW~tTQcMja>Q1A(?$cUgt@V1jfl#o@(Dza~K2* zy)z?;G9J~f^V4KL_~M!eur@kx?yA+5kn!t98KPZT|Gw!FLwRRqO9m`Qe1vmR9Q5Kb zun`WN1#G+JDy&0fQN{NCvU~JnScA(p{N!@+H49+OQ(bPoiLD5!13F07MlQKglIS@D z60zDPenxPl8;a=J(L*b=FAl|ntkc$bxZg*=pn%3dvBushvTIb-9#;hjG*3n$N<>1F zOpauBQ8W7QOtsf#D*0QhBlrg+h`sFm4cB()^ftDgiTu;l2>IKO3(9bQzg#@6csMsY z1IZ#C%w^sWpcTa!Qm@-Z6fWD1-$X(*Ja~?9H=8w>ptex(V0HVPpC_h> z+>=6Gc5?-n60~2ttV5d6EZ0!rjA~m@zKumfkR$Yk`;g_*blH2C;VjinXc?M$wmFnX zDe20S2&z#N%bK`1I2LfV@ue>k!(||W$X*!AwGp|b4{b8Nk;`RJYzI2q4(85$LBUyC zux&Fzns|?`p9fwg*#*YyFZPMJ)*=vG%bc?!IJO!feaO8jv&>5#_pdK?;;(C=%LPbJJf@@%^9{tvjwWyabkgwM1}$ z4;%elA*;MK9X%^5E-bSy7e8`%F|zKM>TIiJHc#Of%TW8<2oMd9 z)0MGsQ|H^Y_K-B0nRUyKFU%xd%@_Jxsw;J**|cgDaRs?v5RhpC-bt% zIpsO>#Y?hu^`|ZE^AMARVmPSv^VBq$ORGg27PND5j{T3osGGGS2>2ZKqhwb|GUzNf z4C2RE!4?BLVKmW_!{YTVh(VH>+!lBn&1ZuICoRE}Yg*D}8;bGh)fCiP7i`6B$1NUN zihGNR-ni0CL(i++PJ7QicKv%ZZRw`prAbt44VhGLGa)8+!lsYdbQcNAN7?5s$dJ49 zxOsDV=l;FIZB2^gIY-pKS^Tss3IMt_UJ}R6lm$y@nP+7ZE4i?hN9yE7CLPjgyl1bw ziCS9_7J2S{WZU85IY8?kUxUJ}(B9YUI*;1lO$H?hh4t8RMy?0? zgo6X-{b_TQ(VKcIQWN8qsyL?@vSX$=kFpl{%;f;Es@?e4Mo8b;2Pz3!@XIxbN&0E_ ze6?d4Tg{4Wf)v5dOpEcA=p@BjaY)`P z{Zq0w&WBg2+KzfX2$)Q4N`k@wU+ySoBX5mccQGix0@m?Wwsd)sTY{fEWt&yRpx^Zi zGG&~77HbQ6kR}dv^{L>$;v~xc1$mcehYqz__u)!9kIwHp#gr zlEMBWukh=y8=;%6?3$WBo?~pWD}CA-B~XhQ4Ch&b+Wk|CP^idec7M>uwB*~mPb7W6 zN~G42e18Bz)lOuP?tpL766#s(19Q0Y1i^wtsfZWvVVp%L3PC`qS3Vl4s7F)|g@NSn z{c3)ts6d-G9Ep+R5ZWYod@*UMtb_M>?3lJhbqXf9Q-H4#C#x8~Q@4DiY`D_3dGva& zXHVb%s@wYYyL;D7@>tstrH`5xi_&M-$S~5tKd01^HLYTh--yqLGK(2sgyfGYGBS6= z+dyb|P#1yswhDSPsIaPAl7sq$lHS~c3lTs!^!z*tqkZR?zDQ?#%bx}qRusl_r8S5* zoFGz3XzAXRw@&X@U^@T8+&+(E^JDjr#wK-pbzGCUPrPlzqXZR0pQR4u_*2Oc9a?^4jd%dsi-X(A;v$gzy_YiZ!oTVIwv$LS$)_7wzgc3{jSNXH7c2CRpOa zJ`}GbizqDVa)9YNn!&?_{63I(5Xlt`u6f#b^7s*heWF~fb1j>Th+hY#VS5i6ux=ih z()nElANAHi)gTayL8faNkk8vel(uGEMYb!*FzG1X!P8MyCPL89Ht4eVSKDgHxjQOF z49qW5jtW$;t3si7gRcil=-NS1OCYVO=reHXnyxiX0$gy$nVw0EcSqv$H=JSxc;Zdj zTbqoNTt`*(vN+T=0-SD4gcKHKlf~Z~2fGLqXg`~m35IQv3U50bPl97OAQ2n(&226zecdx^scz# zzG`vD0WdKdU4w=S7y)oTiTiTg109*~6^8>a*kF>=(XJ&GDLb+_MeiXyHnDKSk3wo> z^Z7Pf(v!huX}0AyzE}0-X+*RKLl@N?LV`XTK@q_trT-!IZ$x!HY^21p zXFlHY5c?6r&7HHUcXW^P9GAy-&^8OW?5k&NRLX&OqG5)4Jv7AlSVRB~gR$mL|JD~` znVEmW%#O&-KF#Gj6TZ4-+vM?CZg{03o4iTWaxsLgpwkyM0=q^|L(ZVGc?y5syS}Aj z5Q=9!NF{*l6{|V{&WZNY1OkccSemp}Id$1$*zT(`^v}t1XSR?G#4IrL0GRS<{mhYqOMPy~rIegjR_$ClsG?ghQTk zA(QPSax)_DjY=@Hwd{bObi9hGacU1U<8Lzh1GM5%r$09GnGs@vU6`TJPvnPb;=iT- zG$&l+Z`Vc{bx^?Uf!gRxEl_s(Hj!l9hw;$%o+kAKZRD*{VV;6{Km`)zWF-*tA4YC$ zCGGZHSbh|s5Nrx(!5o9d>CM-T@{+$i!Cn9Iog+7qR2__znn!6Hh0(@QrMr}Lcg{?@ z@>jCyVL61BjHX)V9%ViH5<>{Se0MC0zVFOa5%5C%y89gNAB4GrOB%gW3o6r0jbNkY zOwpBZHXTA($xc2j>lwcQ+vYF5Zsg}%0xrL;@TlKih1B+k3&~Y6z{b}f4;>$N{F_E6 zy&t)j$g=59runK7W9h_87^C9MZJfKsEJ>aqCca*XoOZkam7!3~O`(}gho_BrrO63F zE$?>g9t)>x^&SQ-BEVAxO?&4pRTbeMbt*kGMKFos_%yg_hIAsILTQ$mNSruELlHEl zqoOAyCbWMCFKp$=b|IsO_r&i7tGZ9(*+``f_7FD{w8aO)iW9RIi30u z9LCWPJR)SIGFB0rhj0bmD>NtP4f(lvtEJsH@P`~nugC~CN80kQ+L!CDJkSy5*;&&I z(5nP|H*?+Yp35;qSLHINHNm#}TlM^Y&qw@$f+FCTSx*LsbY%~CCr!M|SH@z8+ZlOL zqm3{oA?aMQX>(rCZB8m!1X3&G$O&WE6YO4$P62&Z%mSVSJ*9^%|J7WwLfPMl3PU5T zgYfY?^HH&v$GRb!lYFZ9tE^O|pWo3aCTm?aSl->t}od!Q$j-bnw#LH7QrGOY< zaAFGPdcQwrMMyr?=iJb)rk#>ev)*{F%KOCcG_yGKXnRH9ziP?`u@G1Fe;JCAy_wW~ z46lNh$)2JN2#Q`<)w6cBRhH{TPJ$Nj(286}7t&b~DmwZxyfp;=TFRe*aoh4574x!w z2#CIAvuxG3&ajH(jYVjfz8WwQH)GyzM!qn0dAMe&>EOuAv_MoLRdLg4sH<=Zh+7)493F?7 zH)OhIF1e{Bxt!Rp_>HD06cp)H`q0!0AM)&LHM7L}`M~7zLrgFKkb6z0&1{Zg1&bDT z=M*5osm<>@_w@Tt!DUnqU%chFzn_e7poh`b672o!BV)Dk<8s?Kt9dF6i_HoO$-o8~ z{UHnB7mQ`n*=`vXsv6x^jZbqIi@6|9ry9X#S0SJ6So&g7D3HGn4rVQTvprtP%J$S^ zi1O}&sic%vIn2c>TMvGdL%c`K12Y0a(o$j9j?U{zdn^lcJO3aQQdVOzdx{A9mrgAq z4m^IN*z-1~I8Y6l-Xn-tt=)~bxA&5z(*&QH?;@Ao@&`M9ivyE2R)_DeFc|4%q^ERH z*s8H;XqH4qa$P+;KvM|#fH4dvlq>FYS)*GW%8HRcD@mvj<=D9_kK6#nyFYHNQhlp)?hpqv<0I*^ zv@3gfIqr<;QV^ajNOB){fE6vWo-%nYw6vhp<9@Fat3E6>VCNYNa9DRP)_gk^2Zl5> z6{4-b&PNf=U=~rsML&(_GCS)8cXJKrWw+E!H@qwytLl@ZP|xB~kwS+mr8p=&f%0b7 zUHq~)&MbbNN!U;Nb0?9@2*}>JP)|(Zn_P=K$V+Fm-S8!JH5V?Gc+Z2s)JeufuA-UO z8T7oluGGfi^~sc)JlOM+qJq8{%nG3eG*sGP}PmZ1RS!;_4^ehGd}fjbc!Xgt8dZ zO+vloj+%beCTEiS7HO#R$yjEX4=VpY3VJt z`VGFbOV9aBT<@}Z_qru;u{BH+w)yUM0KWkL-etdsW5Pqc%KRP z($Z!>ih%ynXa;)Hi6w>tm&@*@cuG$7f})ltCQY42n^~aiR?>J~_*N0erw!`enAD^Q z;9KPm&g@g>C7o?1(Gs?*IwZp&Jrj=J_)H{GDux*I*u&9Nz_SpT;MVC{wO}j@J|?!i z++8i%I7uCLKylEd=GiW-eVgRiz8kmz8dPiFO9Vn^pr9PI+EkH+B8|noMYbRrs7$hoF~ob1;nvwQp=ED8$DIDct}@;{aViTs3GPYqF+ zq?|*|mMSmoNBCKNw=_hcwGgy*z_S<8<8jl?QI+y*0=wjeEY+TD37|I54;ZWYxQu*Z zAsO$7DgCg3xX|uIHWB8X8J6bYuF8K6-C&?F-R$24XFV6V^qBc2p^HKmMx0VJGp@A3 zHI?=#g_$gy@pxuXx$(%YLXMeev%k$z^P)h4J0&#lRSsa*1|O=O0&Omhy%&gjE)EV$~I5f??9{e%$0B8 zYz&aD2Rv-Z99>RgGa*6L&V>>?yH)kWrj}oXoX^=_svHeqHurqg@J{Wdn==-zGw2dw z#_JWDo4NTyJ`fRU(%=QC3z#xU_wRG=CP1K7z=zQqNCw+C{}#5zn5cbrq2Wb0j4R4c z>pVG)dS|I-M3;v82*Il~#y~z>@h>Ymxms6$qbq)`7o*iimFF|f85j&HuE%bYI}}uD zL3O10I}W_rWkoTVyBjfD;B88^Ml_caQb?PF?y83pk`RZweC33`q1;(L10m2# z|AF|~utSUC>pnj_K9}{`vaY&UPObO2Sl-Di?KUU0tW+z;F>-|*Xd&mTJ5drrABfXO zui8d7VFm8=iuQ^7-`Y2Wv6lSoo?QbT+uB6<#%OfzimYun% zB+w12jcb92i>B`p-dn?}<(eZbLpcqX6Yd0#<44p!5`&UGg26lYEG0|w**n-pvrGzg z!!YN<;`MbT0O-mmdB~}p`q?S~&9FJsLu!I4W6X_5I8jun7hf^Sc<*X^AIguWcN@*` ze1t}*hitAJVCyR>5SejsLi9IH-8JwJ80`qR>4WvE*`UTZI)5sNf4r8<3;0X{-RM~3 zTf5ubm)tT~ZL^JfDKKuBIB#kx>=>Z_Q7w#m+Rjd=(%8bl zt&6c@7t(RHIdbB&P2l<%W>h}woVVS!QSin{Mu{sv2EmR8hOcs- zBDNL~5kj}Iq>KeX^Cjh89M?Q*-Neg+XlPKN+^X-?#ut3pDLWr{E`q74GVs1TSAO8) zEqRMHHLhf??#r(_F{4-3#De`WhGzos;pQo+Iu$akz!zI9DjgU6*NbQ(*8|NT)#6q3mL{R2IsfRSo|J~MeB$R1^&!V&-RDX zTFuBB8W!{)`qA~O<|uiY%rFGDr?|ScZhHL!vA{?gEpmy2E2LO=SC&kWMpN|v+jE7H zn>U&d{g?GbqfAwJ3x@H`7diP=G!3DH$e`&c^7 zH5PT6wAw>_X!wHFKk(H1{jH4OY18PH^!z=g9np~4?3=?PjmFcIWTg1&QI zXVY@I3Eqjro43!{oTp=yTk)3Ba^lZKITLFzbT;1m`b)8X7F|nYq})e%cpRoOSrg)6 z<0E9c=6P_nxE9LG^C5t;;<){~6>iL8FMonORGcioK-qC5|OFFi0pH1xDf&2&^2 zoT24cj!5Bc^tKh$wry16*pDM9P*)%rxgYwf2Ns~m{^rh~>>3ro(68CG=-UipDQzO) zKy1#aKr4E%HlwWB7;JN+dkd*@ThVHQL_H44pW0 z+|x2x<5Z#AsuY1bjX4#?Os`=OSaqaFY-S)LAXL6w2)7HtNFwi070Y+Da>(&iV{`=h zC$;SQ9-WlJ9F7sbMzAT~mH?KRsO^Pgx6XsM$Eq^2iTzi>B=T8CzT5MmVvaO!wba-S zd>JE=iq@)dgc@>*5?yrF>1SX6N+$Ql3nlK zjhIusVL|O*wCl9TM?Z7LCgMG+?3sl=ztB~1R%t4BQ4klYc=l&fK({axh7+VW4LoWN zr?R{G-HzXWnArKuz0HC1vxSbcPVdlrzvk1EPtLK+!};Ju?G3>+IAA@q3#KN?LHl-+ z861_IfhdNsU}EW|lntm-{{$7a-mdtWL&t`0ar&8G?;+GQDo})6TNd3ozx#7vA$3P+ zVtD{hWofJ-C#A7z_)t%5%+Jmm^dZc^Z4Oom_r5m?;C0Q)dH<$mV7F2wk$F>s+7^$^ zJ5h3+UOG=vKoWp2QCb(Bv5efn1QujS(KMrw)UB2sG4qQv_;c|#L-`N`R;*fn zpr|DRAE^<)TPUF+*+v^pnagWf=BwA$&l@><$i5`#PuF^Se^)aWV6v07n?f(ujDHYydn@_tN|b2EPD>EB?# zHfX&5=AoX%%0iYu>jBZ^)7af0(H!~(GRUkpi1c(ltWqTgJnaW>o7(&Locj2g*?~+A z+NvzVr_Hhh9gyUvun1DJ5nh;8EqTQ9?}{7w6v`p%yAEAO{AeRHN#0GCP_(GdK(XFSXae<-wEJuZl#Zp1q1R{l zdK40>0`PUhVCr~CFc%S{LO@8u{9u6!d{w06-J`&xJ8tT(X`0)-f^iU;7M#QYw-K=$2&!VS_Bdk$dUyD?AQIn|jK9;Bf3;rV6Od zFsE}tuR^&w8q%t_AqVNTLxO8SRh)Zx(wuxQWBmA3vz(NL%Xcuz?Dl20MH2F3_*o$d z)297EXxWRQ?+cef9G{jPh|ZO#3vG2@E^(DPG#+cIDEO~=BL{vDwo{0oR>MQkMAK6s zILnY47+~jH(2Nd4bu_AJldE|Lg~Zz&A(nDJb$nClUM}ofM+5(ARPVe=+5=|*BpuXXpuntK@{m8ZQh z?J-BDN_V7|MN4t8G7tPP&M|>Oexi}EPNpE0hA!(md;vwbjQFl`19z8^!EEBYserbD z%X}1>T41n4%EK1S0HbByqD9U#`iRj1<ND(nh7=*M5bXd}vppB|>n3w+y9 z?nNHLQWe2FblvHe-O-H)zY)bLn~xQSAB;mOwJVN`G#a3v$I|U!7Sp;~esg+OL&qiY z`L)C6)L~cLgY%^OKpp@#0h_FO2S7Ho?gwqV+Oa;hKLHHhZ#4}LV_ltvj#1YWelZ$# zWcW7Zr5T5nNyX7ZCeyLxBE6%_NOC3RA~ft9_l(>#oe)*jL3kYHL~>N+3WeW&t!5e2 zvKe572>P6=uO<$bo^J3x^(^&6$@WUdT>b_2fGn1`3P}QccCbG9(3u~8-MQf-+0QtT zj_W(4arg4H^jKa_d#aMdC-FVz`KtRZxZ)&4I2)P!z%GWPc9KcB3{LrL5W) zAsVWnLR+ZwUOkk2g4S47W61U05#9KMcOG+B**CVr~d{j_8;$xUCKWIMXKfx9~n| zxg6AkkiU;6f8BKF?(xPy>e)6;ZTInwsDC(ES?`LdBQo)iQsjgP`h3+nJF+-c+=nTz z3b8PF(u_4ue8=(#V}~@p@^`v{5JwA!x>#oblC2E5pnc`gpkD=vW*TFJ?xZf}fdus~ z{e`i{qF4SrOZG$UCJ1tD*U;Kbwj>qlyByrsWC!1z^w5t{Njr9LjET0~u7lbhoT(aP zU-aj?23XhKk7g4YnbU$)V~X`>()`bpo)!bWZ{W}AXb}hkr`731F=Rv;($8^D%RGs1>jTH56s*UbQ%(x&N;faTa!C?dJDjO-^ zQ%kHwwW6|kzf!}lJQcy|gIl={9sCc*CjC4Vh&^NiLH`cU(^r0~{Sl zAfd(}E^X&}wQb1Hr1&9P)G^Lz*-Sf4!b+1b%7H#*4 z>C?ahuLZY>2&`IH38K0Xk_;hf5HUsh$rPvRxjmtSQUX4p9 zXxU8GRCP%=GCShTT%b=y(8f^Sg5KTqqE);DS$dtY%z{PXp03rhDCWSTbQkx*+LL=G z6ii^J&Q4k?dMhGm4KNQ6v|tPop&YwtDkdlio}nXZ{XnB&Ybdpml5NDDj>yP~-dFH3 zIr07#qs-v`9n}{>Rk|QFeFkq_>_&^SfEKo@#k^gRS9H6sf&lAn_{2g!yhltC*#zBH zOa)R}?Y-Y-91Gp6fd#)fVh4?{>`UcHPIUrQG1g<~76s7S$}H!d*$t>BGYBrWlFJ5h zB~DVmA#K4Q6#mlj9dzksJ@{1xE}EawmI#VdocGsZS_`~!Q(F+Ol1K-WpRw208j5Tm z>m0pvu4K?b-UG`o7WCQyY5+;yyx|a_Sq%=qSxZ@yb2f9CjrIwi5)r;dJ>$6Xp~@Es z5s?sUzNL(PzYl?R+~M}!PcnKYMaQXi8x{+i4-QW>)xeKA&_=o<0@yZBom+-p0vkjZ zOc9xVqFzd7;d^}``edrBk3NAj_DsQ;Kvriq;iAcXyusDvudY9eV{d`qhhq_n$iY@i(eLlJBOFPV^@~QNA90!`AKTI70NY=ARW1(YQb&dB^8nx%J5X8+0MO zPl%mVAit@_8QkbJ|l zo}hs?&#;PAofmsVs1{ENqD$dES(2w`fPP^j?KZi3@*f;M<9~1S_DoYa71Zs~XE3=n zZx`sqdwyVxGH>bav>70f2GAG=YPsw8d^;7tSdQS|TfDQH>Bu77hZ%m76^mLY6{>iM z4E>S`d%tzM9fT(EI-j!B)k(BVp28&s7HQ>?eO`!C&NqKqw#JG8jp>s=cM8O>I9MN9eO}wKnx~r#MtKA#A2R((5stN ze~^?Qw2pyH;P6fC(wNWjwgk&gv?K79OwUuv+3ts47Jh(t77a6(!{7hGb9izK*MFTz z#FpoX=}SL0S@q!Rrp|a%U{3yWvet=DbyvccMzCF<7%(hCXLxrj;>Fw9Koj7=sqaET zWQ0)fk4}RrIdgwrr!^-jD8o>P8HWj0ol7NY9hcye5^kb-27 z*ALdU$YYDt zvgTm6=n^#BXGSV}+1!0v32GZf#XcH4=Uv3(pa7g(jX$ZkZ`S#n^i7YMRg z;dQ>u|gh72ZQ0xG02?VI~E9w*zaz5VH`GfoGe5 zDPFKLa*Sam(0+~8h-o^px(zwP{0Uw(7aCoL(@pND?%G(;2uJ&4RjTT zAj&Sr=+uEU^6PgK2H&_vPm7@`8qChGCVt?k*zABq51)iW7*nPl|Gg3};-{xg8)s>- z$yY@OT85^weP#UXj#948+0LFT@r-)W4^rb)@Va=4!-7eDt8i(LAX0YRN&#JmLYd<3 z0a>3{N%j2)yq2HBlgrNNPhkP(gbib>HA>-W_5<4C8#nDvq+MQ0# zyaa=bt@#l$lIQDPa!1yAIPZ577Hz*tb|xqiZkI&^0dWW(T$G}N^24?&?{;Ehrpzx- zsPnIPw}RYyG{yqZ_c{sAs~?rM#-{>5n~g7;9WpO~7QgZf>9NW+LjC>Drf1U}&mBh* zd;!-5WwB>nqddmh*16(#X`F#D?ua^}5>@fJAI3{YMgk95J?;(YDAL&5pR7NNo_4oH zzNM6k_z*p?mSi3!I0NT<%2ZT7Y&_jDCs;g)y{F{Z^Nopzb0ho^ilmEVs?jbbNB1J^ z7h!W>90o4+1Z|pIdJF-29}g>bW(f^gyC5$@S<2ON=_nzPI&0drH&A(lwDac^vH3Pe zp$BSv3{(l3LY?;RG*c3#mb-qQx7g;{J(FbyI>HtWXaM?i*?8bXE5L}8NfXDC38Y12 zSwl>WlC8Q_0uC^h3dtm-M+|7CoZ)mmI3itN5SZe_Smj(~n$EpB`mQGRjQ;A2C2Ay(BQqDuPCoJCqQ%l>^6 z_KM`rR}ox>^L#t=su!lCz|bQ*(6C+Hj*e5wZYU*jR-3cw_>(tkYAlkBR}uNS^SiR^@pMW$xU?&Gcz4wq}+G9YQSS*5;QoALckqBlkwWrynJU z4$)r6OTez1az!AlO=yV$7RhqH5s^gL%G<_WH6b+V4!uHqDu3x8HAFicJ{V8fSyP1w4~Q8Z~FM4FE*$m z-$}^e7$jB5geHwrpn`)QK~n6Sd|H4}j^Ol(*Et$j0f_|A)Pd2V)biT7r1ray1HN)4 zm=|zGc_grlusAigh_|`n*OzK7fXvWfS;_^CU0W$CClD#)C|FcUk8@`@%_1B9eBon{ zn2_rwMaylbFl>p^iJ)8t={t4C>N&z)?A1v#(*2gM^v4tB+Su6Ui0=hGQ!;YIfJp zX5f9WjJkmy;}#)J6u{1mW06SatAtH-QI@LrKL$U__$r-kpDAU`p4o~+ZVzgvfi29EFA%}n6nVr zf(pivp9vYy>ge_f<8}>3DC-!70m5~l&6u}BvFJk8peb=lOm@5RL~(8^TkvWA%dlG5 z7ia3EAlTWmW_-r#Fp{`~l5PxoJD__E1Fh-*rhG&gkHe^aNCEuy3Eeq+_YeFsBui;v z_gIc&lKdGY=T{)2zJBoU*bFX&uSK@a_T<@VU|-D_U1nZs5Z~9(r#l_ZT?pn)o_yy- z8Tq0S^xU#QY)yrpbZ~dD%w;Y*9TMgdp~xU3C6V?wji?f9 zMMKbA^vVu_!b!3QQ9$QrRSel!fw{b0XAX!<6Gf(7a2ki>w|hFxfX0rLl^fG5h%GcR z@w2+E1Z#GGD4K9Xuf_}{i7nB~1tCN8wG*rNljEh`w<6-^Qs&A9rk>`~b`G(dU= z=NL`!4lOv{fb-GPWZ;g&n$>r1PHN0T#KNP;JZktPLVY{n-$_2qqC+G*3IBO+u+Jfij0;b zfV@@zM2v4xX`qjESZ~d0Y2+XG@j!Z=_a|A9>*d!2{9$D-g zNr!by?DT=EjieX5k^XQrwNVFk5TT{!0N7D$d)rttzupw<>e$B}!3^SU+;jzo1wicZob51pvX!%Aqzc3!*%0A${P) zXm>?5S&{Pekdv%H_LaJ}kjewbu%Ug3U}fT;y#C_CgNcGLGZ?!O&I6m2u%2P%_V6T#P#P8(FUQE(wRbjjUB$M(x&0g zPn++2TYW%vG-~^{Ky{czmkKXf|B-IK<~uxhmIN!`C@0zPXG_QpXvZy3oxzBOX(Z!} zb+~9rD;rxSgkzVg$&Oq1Y8>BNN;v6=6mHNMJ-y76DKsK{+pQaBjO$rT|NOr}1}r?y zlkNbX9jF2GdgG$=zSpm`-@LO|gI_bf>#QBVX(@2!eC`r{k=RN7XrN6sJI6W$aW{DS z6`+U1O8!5qkT0b@o{P^bUtz>3?^FwH6J}RAPszRtDJ+Z)1NOi3n`HMXD0J!WaeW=g zH7-liHXdffrdb@9o~qiZ@EsTfb5l!zf}K*jd}AZPmhfXW%#@f_vmO4ry;?2usJ(D$Kn=cTnuX&f6200)g*WApi+4Ndpn>~nf1W-?maJnL zFDx49=f&3KWDbJ!tVYU|u#rl4UuNkhMl~9C5b4#LN~8G@7&BBhlVzmjD|Tl|B4vsz zSP=X`?3gvzOK6S;I$s%R!$@!QGj1gzpjB__K8GQ0psCFT$tLoY$$!Q?$ijFy_#=NQ z`E6leLmciKNfkLTy&(M|^0*vr>PZ54@RZ4%wVz2zj(lbKlCL*=73da&+Bm^jQ&BKt zjiPn^Pfv3D&-#?;BOM@bwuurvHt48YF83JmgMg z@&2u(3Xu5Xv}jd5YYNJBc>rX-9rPttG}1JGy8ZB&HxzR`VdMJW-1EGU%cvNNg**3D zd63BZ-GMIqaPDeBx{D!((^4E$`Js#+Mj|kPD_=G(qSB>us{)+H6vd5t$1K#-~ZcW@?RUNg6#TNZ_VExPJrfNhR17HSo@ zgYk;6nv^MrdIi^7a8Qh`xB(`iM`-k_QH-EJX!Yedzv+WE+1E&ag3ghEEJrLN>%r`?5q2>b?$b|)93Q|#%1s?HEwyuvf319| z#9Nnm8H@RkH!LgX4hPZrvhi|SDDY$-TSKS(zs+;$Vk|q7R;UJr$HM$LWjB(myi2H}Vi~8E6*qGGP*v+Y zZ>cSdomysrT(jPk$tB=2=j%#VL$@m}mbZ>!aLMFl?Nb7vcfM9)#IfJbbZ= zp0d)mdJkNKC24bAKlAoSh_}-1zG1foK2%8f$>eV#J9wI2f|vljTjGzvb==d`+t^Y`2i zk#QzAZdamGS+#E$&@>(1M2%|;o#ns$XB!+xA|WQSxM*{BqsQ)~SOTZa@fXCOp{eS| z+54r9jWy*w@4?S!uJG-x`f0T7N@bet3lKEB_TVT$>gX@V=zteSzbN3YLLCcOJmP3a zQP0Xjn04=xIx+P_nIWraTo(eoQ2wdW3ZXkBe3wXOy+^l!OW~eVHe$} z)1nnE_ws@W_<7;$XqP|9KTZcHChgbG0bah|!0QMRKwu3O{br}c)2l4{WJTQ2W>o}h z1s&Nc2E3C*9{e++gsx3L+6ese>gjaOo#Z&s_^2rQd@DQf5kmMrrT)q8t(4s59mq7X+}=Y2WM@sxZdVpD|jWz<8I4*=Z&2P8j0 zmofj^Ry;unb70Q(0LBkwf!yDOT*R~Y1uz_#yA5^l;rY?_FkSIRNTM}9ZB2f5QZsHX(yjo%IG(*7no1xkeu?U_a%&<)1Flh&C#y{j zsMzWvR?iQI^vGByAJ$y~XElg0@Dwl}Ns6^PlFBdHl>@m0w)(gz4D7C_{+A9?7JwoH zMUbyod3TZd>UF7bKV}w1Bxqn8-A)9lQ@%H|&yE&8%$g<@bxHPq_)Y*~j7HDh=0q#` zV}zf%XXQUtf(2*tK(u?;F*l4-b(Y3v+F3AYVPh~oE}=s1gVK^ZDXx>Bnnc<%-dpnCj6l?_+TF*(z_6^Ec-EqWy&a9?&gYA(L>D|p4`!M z?l-tJ+4`oYXmy=$IW`d(@ks9+comSZ4VxT?sCXi;qQbpk6rz7Qn)Ogu@us}WXpt9#qP!MxW+@OD!v$>^axp+g)D$?AE(T4s@DPK{0q~7 zNHcX|BX>=onXld=u-RXT3(+{d zy4t2(3DNjEMw2FpA5qwPL6pftmfNyt9;-=>c@f+I+(nyjE8}x*g^`C<PIB#f?o| z>i5(Fvw}d0+C`Z`;A=A3(V39e(Mil~^7RJe=lPU1whg+OL`k9f`(e+_ z*Z#TYXxJx;w5kVg`1D{D@!`X#zK{g7YonGX zN=AhAkiV*3K-a0-xH0~rS)>i+YPBnJ6Dz#Rp1e9eN*hO8!E6)_*33{ZN%45ty)918 zWoN(`5pxdzd?npl$QoI>-8F(reGyii_u)6gR6-|XfKUUK?^@eTPtz^fp8RdtTh7FaPJIU6seZrw0kp8>wgnk@ z(3xKX3a^t2DveyGER;Yge0z4+KS7uu>Xx)sp%7Q)tauW>bX%QmSO|XeC#G2hZayC9;=Skc^0G!`qa~`eU+t_?;=Uc#74|TJ< zWO+@}XM6lM(57tqzN?xl;B}fM#@;W<^HeoJCx}!?{9Bc0%2;-{e|}tn{;}Cv2E+Te z$Pg!oPzu1icBiOpnwu>)w{TMXa={sF8^xJq} z$2*x}-o;#$*1x8TsNl#zN20YQicx%%b+WJ{i;}Cc&gVL50AP+4BkoAjqmemROyl;h6a9w zy6~_%k*VgMa%(sPy=9eLWpK)xGE?%VKzp2HM3KGLoNvciPr{4hBaDqeuD zGZPf83G5B{r+;1`d>Y7OVOX`U{|Z=7Ys?6m`@q23oh9(Pbo5@w2b-eb zxewDTQgCqhDOYiQuJf2}Ldg3qFqNXr6ke!=0c2Ddq<$@jJ!Cr<5sVPFW&nzkT zo$+Gv-)Ov_`oHEHy`wvjNI+WGNO7BlLnDKkeWnl7*n=KKxf}a(ot!J9b!ZPjvO!yS zDgCoTA^S`QGic3KqT$--#i_4i2e$<@WDYG=D`FT3c_BUyi3HbB`xi?a8liu6({FK< z#2OTJZHVuf&yRgP+(+sW(MUoRKL|ULyoIke9L(3KpPeCraQA;jCwqI=9dl-}46%m9 z>mySe$+&R3u;{2-K+X|Cc}PSt^5Q5kR*f+Fbf2qHsp*9*qV>nSV95BpnQLx`WL>NH z8Ut`ajzA! z;sTh&=y_1QL6oSf8M(wg20?QJ3y8JI@cQvLBKWLW!n`Tu$FR1t_KtaS!SoDqk*>7i zk(|x+Jz`VXUH$&{g96Z5dE7@Vz!5$t(y2IS48?FEZ_P@f;$Xdfj(72CYUvfk+q&U6 z`+Hitv4%~Bs_mT*C?F-cCfZUf1$ro>PG$kh!vR~E5w8ZbQn(grEoqZTWgi4RPY;QL zgV&9C+bUi<-tkq%Z)B5TkXH(U2-$s-4&mQNdYfYI;uGwKaUmU#vjnOYPy9KxKt+Sw z^hfUZj*DIuQAjSmnPB4CpTxH6JSe?@amjg^_Hp}yZKFb?iXsk`pD| zFr>vNy85iyxzNqr0B)oD1LI=pJM3{sdbZtJDC#}z#m|Xu2YmyjW*xhxs}Xw^>ScP~ zGj?4u*`o@;H(>9ZmPQnSgw8@#1w0r z{%5S#tm@5^4lenk?fkj1FcW>v^)+BaUQSRu2kbraQbot!7s}@h(#1$zE__5i$lh6+u3d(%m(wAI=5}VZUFe+ih9y8=(o`&-Lg6QF2PXFi% zoUHj_HBW7eh@h4p77V_A59vGizU`FPz%qcDk;H-jougG`@!4Ayq zSEYus&ab_vyZb8q*FKk3*9ZCi-7PD_&(RYyC9=C6#XR=93qQUh`q-bd-%fHXAd4Bf zc!KEhIKwKWBYsfq_dLwMm`P$eA27+13OWv8rX}kb#qz@Nal)lcwf|8YB@Gl1MYKY_y)W`;?0F?o`lU)}aKUA)ip5z3wC;=L|P7TbKw-&9ALnkZV8 zDSAdu?z+-85{dZb_NA6Axvq<**Ut?527)@q^T|h^i@QXh%|?Xx^s(v2ZMG zQ$B9%a|cGqC0-5NNj>A;`K@?caJ-=x-XXIx&Pjr%;Ar#|A@2KiwH7}o6UI!@lwkAL zp!X{N?Tm?Zk`FUL3rU_;q?vZ)2A9P)nOm@)ie`zdnS7Aw0JsfY*5-AQpT|xFJb^u+ z&L23K(Xxmh%p1W3ycLA3UqcP*FTV8&wM7RQp&6YW+2h>G;`0rbtGus&=atmRpIAA6 z;eKQ2_aUI!QCxzZHs00}C}r>V;75H#UE&7Br`rU?U)I7_HNPTzrLxM#mh;husd$jX zPus->4PqsqOv<7E?8S@vjwY%%dsKStu*fGo-K=llvT?;EcuIx$MyvCY+BgvyY&a+gREcioaVyLYE~EFLy2ILOne z?@PrBtAE7*zB<(x=*Ln7B14Po*Je8ZEz!?G6dD~i|BY^#s~aB4%9g zb&n56dQay4@8!QT5p}Tha`4ef<&zUbo7bA8uzLwP&dq7ZGXL zW|$ZNt3+maUtnyN3BX6M139G+l%52tAXFF}l>321!K9(JZOouR$@o4?KAa~NYOsnu zr%0Fx9d2+=0bFHb4gw(14*CJ#4DORiJpOBa;GNXyB%-GB_Sgo@Eb^RKeg`1V6#D*1Zy=MT7IDnMrz+_6P!EiedEA{iLb(n^Gn0BMmM zFml}+_JM+Ee+EwoWjef1Chij+OpMCrt;176G zPjjz8ddrWhi2m_EFF^Y*NQ~Ii6uqDsd5i#YTAka0^wNPk`Ik3byk^nD4P$y%3R+tR z5Ft+rY`D;sH?M^zxR@2P@`bg=98@y&^X=K?@l}%}J)i9Hq`kpB=0i7ONYgTqf)pW;lZO?RSa@4NY)9GppNF!YpJ zso@Z2Sfgow)b)+mwu4dxaB==o`O9!4spq|dR1{-dtYwi+(N0%k`4f6`UY1B+Z8f;% ziabhxf9)H#Z*rb9n1y$acJ}GU83UsL+mfQHvs25`3`tQM7*o(0U0!t9#%^Ggyosyr zJNy@%*5iI5>?op}W2-BOfJ~jimG$)Fw* z(xcceL)FJxS0H+--xEs+(Nrr&`x8VJVmjCoj$r$AEwY^0D0$kKVWj7RDVLUCUs}eW zK5^xvMS(rqg_oD1HI0LZm2+x18$cD`RlnkY9zjr*6D=^03<~QrcahpG1xC(TSg6rM=;jsE3V9m{*0nA>>N_d%gCYX z;Bc9-XUw{ypT96?vnL9Tk3x(sbsEYRwPQ_&??o5;lg!-XP&4gTN$h>+-U~Qng1A9Q zHHwC#l(HC?{dx}L0=t3~8AUU@2a{Sg9(ph?t))WbUIgboBoeYCip4AQkM|OnIJj0{ z7#7}7@l8u3v4%jf1J`mhqzA6H^J3l;$e<7^VVM(P;4bkp;=C?LKB5bsVG!DfCAXPn zlKTUd$X*7>YQkmE~CO^QKh& zSgTuck|n)Lu;oJb9+5Ijvbopznok;45>_Kw z%z_?)ZeZxiPr5pHCevY5McS48p&YnKz`;lZtT3v0{M$OC=-8nCHX?#<$UMX?&CP@MK9-tMh$>f{`b;$!#BNB8J#dZasI7*p3F(| z$KS`|qdCp|;q%xW{m(Fw@ODuSm$s;;1N!qb!Fi+?FwVoPUr~b79D(JaDsQnQz+l{C zu3V#qL&Ak!(Kcs`9{e8?2ZbtUKgA=cF-RCbNp@pbx88H62uI9X`nC)=#H}laf zjInfp?lJrrZ^)l8UeE@rQlc>w`-Al$FzjaO-_?}cyaE1bCYv>?$W^IJh_^0^7^W9C zr{vbOKG508MfsQ^6L}m38s)e%Z64s`ADL3lX;O-*3c8j42$0lpjAQOB2O%NGrb~WP zix4cJWvI{m^gtxz8namX-3k(`eP{7SpI=6jNeit&rIJ}24%NBbBC`};gGQmn^=wf_ zaf<~WBa2g0+p7FWQTNWTiuRgkpc^6ncCg)gH2_+rkT9r8t+P(nTcozv3y}W` z<{nh101D(+Ui-tCPSXsEr>D8?SKiil)Z+nP&PpzM_^{Z0#7dYf*U{?$_L@|^f>j{U(g zMDCtZi+Q>}jOa;V+}5Br@y&Dq{l$PslqgK&_WEvd$0nmp08l`$zw0V|3`@EwR}vS1 z_ZD7YvM6+r{s>AaYFKTwmAP#yxA;=b_D3PEJ&zO{DZRY(dbl3n5H*SF{rB29nXn%|N@eT2_K9Bc1Pk5B3W3 z51k?0w*4>B20+5dkDM6`5QlEqZpN{Be#h1m5O%sH?Z7CosbM3EHgnu5Dy$e!3CaN3 z4e8dE=ou0WGz0XlJVSLdA>jmJpmckhhDo;TM-xJCEC*Y9T$T&2ntE?uRmx_(I<}E# zSNz(4OT{VzeDKSqP0_@5uCL?O!h0%fGAls9N3^sCuSd4de8CCU4 z>zMtCO_{{Lo^Ir|=u;rk+1OpWkb|4M40S18)^&P*7*1gJ6C%lTIEb>Pemy2YtUy@W0hv7g>^^ zRcTT7;=+tnQL%{>21CFB43~v21H1-#YP#Gm8ik_2;Wrw!#We}(G=RW;;FX8Z+HM=NCU z6}WH0%jFjmE7e+|NYXc9VNp^0?=Uc#74|TJ;y2#^F4QbP1 z!~Y^qoFp*NNN!RGmLq~2Avs~&B(%~~)g5ZEF*q(xGSZOwzHiBWBf}+W7Q|s8`!K{B zIP`G)qb2q}T!q$SCN432;sZ!y>d8To3DkTF&aFn6Lua-DszW0lMYrkiW79|Zjit_9 zR#fYF1Kgh^HR6qbM&8SC)w}&tM)>CLuw?Gf3~2*ttj!o-P+27o39de#JdcRf7J#$f zX?nHe!MuuZ#V?)i;VnEwy5Yq8ZGv~guR!$V5#66jtTnWu%cV|3-;h(IHhZ}6Ke0+@ zl=gJ(A3elt;K`Ek`|`PGIiif=zs?( zZfg0E4J@niF0a#?21TwZ<$$xUjdJv_r0d zTrK;59eG^_B{M^)nH<`|KE2FF-zvr>pxEI7SxwOIu8w$~XAQpWvgET5C;8xv`n!K)u`#Q+7b zU6-vc7yT7cWDJA^$4nSUxTvWer*>?R9vU|R4gfTgIDfiPK1c|5 zMT@cCNHgyUL+Fg%Hf}Z;oW<2%SIlThqhOglT|3(o8(}?2OX+EMHUPtDbM0sb_691?%mP~&C6POH{kyXw4C1kh>qw>|3!Y2~Y zqlcM5(7H~2NPomp%V^e1Wz;-4hj+Y zGw>C%30UR&%jBFDiW{jmV+?Z~2QsTZp=hQSaTNWzM7&X8PUpD5qIfBmMFY1Pwq_2w zO?R9?<7%h3TxE+H7jJqy?wEyCiwqp3iGSG4gF&x~|LW)0`<_ZwX&l~FIIQ!wQE?4! zK;QjF0oF$V9WaP|ZZ2v@1YzN&As;exdx_;Wdu3h*aB|>IJb2o<82MIACzq#tW^07J z6S-@bLb$+@o)K675{d+iY@n}-Q4Cq;B^jr0&5u3jP`dhK!c zwaq87Dahi=toAqmVt-g@orR8cRtd(07b&_=vTE8HDP z*f;&UYN5m?|8@2xcJXYjhCgjzN{agGkvieWS77`Lv09WrKym>DJll|tLwC0=*&mau z*1$d)nX*TprEpfIAdc~v%?ajyk43N)3fC2vgaE;Mm3+J~k~Lr5en+Qplx(-}+0DfL z8oVM#-Y5E)(i{(Nss+}>hc=3i2rkRcivNpe3sp0|h}{W`{GhN~TQ2X7fI;}Zq%kk8 zCkgvJGPDK0(e#V7es|@O^ zjb=EZ3K}~|x+y$A;C$qW7KuyVcETYB%0p_x^#?NMDpjQz3}qDS9b!3{PX(a-y#wD# zr;s)G@sx+?!m)Z*WXH742lH4r?HtcSupoCSzRQLSuzBj+4^GZKKAMe2Zr=$(j5%E= z+gn;-eHn8;JQ;qaqx<+nWASp{vl;Y(f$RW?=YaZ^hu-mGZc)h2L$N%DZ7OWVsAGyU zz)G6=g!g8325YqQ3_uWJLGHfs0?ko{!gE7Mk?|(97MT)q+jz&ReyTe0)ky@Ge2hxs z04W1Zh-qhVs9p78zr8^{OnQ_3O-cHCFrr7%FYE?=PIU|9phr&}(-RuSs^rMRDe|GI zQ1z$0`r-gAjP`p>P&I71JM*aIS-=^WS~;Oo8K})6+eE4X+gm_j;WW)M@I2dR@pQ~A z#?P`!n(d}4`k+gqwuc5Q4mOwGu3pJSOP?M6^8GTva=7e^jDegFL>%LI|*8($^v z);j}{VORMs+WcKVEYT!w{XK+ztv~@34XzMZh1Fi#sBwOldqn^?Bpt$1 z+)FQpVljI4w1iQ_?Snzmi8L}$D4Y@wfrcSyrN8fIH!W6C9!n}#)T(6!-;62{FVx2a zQr*2k$~2wI8mThg;$XG*YP-~1oY;R)k5SrrVp&mQX&(2eTmC8v;7^`Yy~Rgrqmclf zr`FES2Uyo$#)bsp@#E;3=#WIutw1WjA_L4%OIN93d8v#|v{SyU2*!cGaTg)6KO9;E zeuLE3k|q`+$##$XQTHAoxGgFZe8SS01k%nA@L-%yCO&WnXs$iXmcP^%jNTwrDpTG6 z8@M6GSATlGl0)blJEGvDeHDmGSd}R38;Z|AAAZMaOPp5u3nPTPJEdj}D?C|bYYF( zv-GL`ek&yraNenVC7X!{yP+OV-$8!H!O zTue1xvKgLQ#t$B8IcKanZZo-%I_pFxN}mx}z46GBUmQ(ik~)$!JN~!K9O`Sjo>n`# z6t~c^AOe#8tvTtsnkI;F5ygsMJ?*qH%5Z?Ro+q1WDEu26K*o30U7F@;5509Gie+r6 zZ@t{c7Jt%_HlVXo%U`Vb<}(04$(?q00~0v#v*@oG^j2DaY}E`XZ9!ScO5DE z>LZva6yJdwhsz>Jxa_%T2Y+KfsubTi2vlp&>{!KPd*0?LOQ~FzL^veo0|9pTyu|4> zVYK3ndH;#uVKnogx9!jRgY7T>9i&@-PAOn?^Gyy|cx3DbJK-)B0`rf}3OqLT z*#isQ*YgPMa*39ko&3ZR$K?VVwfF2`W0mOY1u^Hcf}Cr7=g^$%H$z=%`}FA@JyY9F zmA4aBG=znJ&~G4wA?d1gg_R)BRy%uhG$-Zd{rz^sV^g9Ec1H4=Y0?tLtN&a3%_Ri^ z{gba*<<-lsf#CM@?t2@{6uKYc8fC1}1uwUDQ~`-578_NwsrdkDrKP^pidYPV<7rw4 za>l|n_(CtJ2AIchCDz)T50gOl$A?GaFjuv-I9Hs(*@l4h5jiPn)onG}8%mYdakwkE z3{`F6oNy)=(2NlD+?z=nvgW+w&I{o$=vKHkj(sS_V2<^x%Ow6Eol$88ZuU)Is0;aS z;y}^^-*#{$NgHd$h+XbWb=yn?pA(bwPC`>>Qh7NHPdLZ|d$R@w-8?v`h&<_c%UR~U zdx6tyMfN@ACk^)EZDxEboQt(G=EGW%^@=EMpi=u-zPitt*P^}m?d$L7Z?N~0>yQ)Z zItK;-rlxkvt_}T{7QH2uHTd1Gf!vUrdtT{YouEd(!QOM)nt0@ zL6#QpF@A-EgaEy%DS|T-xH>35=2b=#d@vl;o|HrDx!QbW9@lpAxo#a_NU-8>x_6 zcYl%>K?HpGn2ALK$Ur8aGmRC7Q~&;F4oz|EoS4@K&1ZES@EL5#BpuxE-`A`<0L{0o zvO-BJ5DFmmQ`-w}9{@RDc{geU=z(~N;jS_~xx zUn3ou&c{L9#5J=wdtuJRISy-u%fQU|%-|6l01Wb+ z4lROtu3^I0h4hd2*d0dU01oxTnT+}YLatcQ*1|<>pNXd#vq}8pcbh?gSIMHD+23j0+GOW^xqVI>>ZCw|tTKXEThOd}% zLA-tj7)w#)my0bu2muN2iK`kwg*o^tx6#xrvMr}cL)}%aMQYWZv!{^NA*XHuJ5Wub z?P_>C)s^}?TW4d>op*S-PJY4^&hOqhf8nU9(4LiI%v>H9M z6H)KX=7uOvh6x9MDcFSX>C@bAI-#C~!{t0%xxO&BuU`WL;L&+zXT!_*7Rnq5-n_~- zp@?z>ZQ~S5O|OoLBb4KFB=(Of6ySE|;rE;!F4~oWhjWX=q&(L|{v9`a&U-qb?axU0 zXtFV*>eBCXdAou3?urkReyXjxDSy7MLuxPQVdFnlt>aCjPZjFRFa$ zh+)Dh_d%Hpd=Pq^i}%pY%CXA?pCl;jt1OU3K!;_h-SxCZJZZ$PM@JG>4@1A}_CXt= zUy~z2^o*eubf=IPIS$nvOP;_Z6|c}P!#0x6Ydox1_Qe4iT{K1u zOto}s8Jf08#!o+Ru3cjjSL0eJ^Vd(=@OR5BR#@Rw`htNTUGL0Iu0N(bM_p1o&;@~z zQ7XKsJe4hymIb5sYIR6gc>nifz0Do#rv)Sxoa%ScliHYXNSxBI604x< z{eW|1DL$V{(7>;MH@{Z1>=>e_OXdYg&`6T=11o5ws`?NLefRKH0HzGT>j`vkHPm(M zAi9tqUnahW3Ihxws~aULPH{`^C0QuYD-1?14#bxVtoZ0%nnP0T&ngVCJA&k7JVx~o z<##j2ZAlFoKOrOU_Zf>)F%I%?Yi95hKTxzffCQF=ykd!#u(Nh4SNd2wV}1o*>eo%Y ze=}T2&W6gOI4Bxf@;bSm_>CD+yZSY>o*n=3nt5su3iu$_=t(WMtovRu?B$p;*l#Lz zfzr2Lp#`mRc0D*C>PvJ&E!MEF!~S7>i?&<-;~ZfCLSiH7fl$DKO+G|;`eF93`EG7t zKMx=0P9R{5kiX`(4UYj9PlHJ0H-oU%i_sLg^nd;0SkgpP+l#D~d!p8(q6rT|{r1lz z-}R>`fuy^3Awm(THwnu0%pvnk29!pGxqO(fbMvpMrV$5Y=41o4S*df0-56c^y2Qd4 z9oo2+2vBp*DtPWNrGIo$7GPH?T9!9wa6xtEQYHDa2%NZ|2t`%RLUFm7JxbV_rf%k5 zmBR|68qfi6CUacPf%FK7n2lOVx>GP_bjxNtv8-$ZfoWsaD&I=FAXZ7rK;#OU*}EUx zw_C!kZcLv5ml4bt0OCM&UdH$m=li=^q81VX_p;1gg@sl@%P~~2n2lNR;)N!XeCTXD z?EL>hGz2t{(K`9CNdC<(%H*4yx!m1>-xtT$v`Jmw54_20cBub|9r=TmS81I_*K2Xu z7|-5--tdu!v7FmS0atDAoG$&SmsvoS6l|;1gBR~(IcGI=^2fj zU)Hm)KdO7C|Rby79_)Z_~QThrH9Cr1lO zRjxPubjdI_xkhBJQ7WUu15|srYv$l-)s)Sa^TKPwA_p2(knLFF=^61S%0hDQN$Hy5p<&lf#QJ%k0NGI(}Z{=b5( zbF|Ap4Ta=b0$!hkupXImTnOu*U9u8yK+CTvGQ=05#Q(_KR&Y?pr6ILD*^GL z0|oGir7a+b%4sg^DhWOK{3DS}&crm4o~W70taZ1OU{4N0t=w#z#0GF4xEJ0yHJoGH zA_3ZW5(Wn@*o9x6cX&(F1Fs0U$BRb_m}HIh{Sb0k37I879<5@}<@6#u&q#B%uAEr) zNJU-dC&NX#5g7TQ$GgNNV5v*JqyT!LVq2}VR&BM?V3!g>pjs)*rX@_c2Q?*-nL_j2 z(pzrGDAzIq9;TzzBP*E;+(M*Oc%@uSeggOqU0us#DRE$sr*x+Mk*=>ZcE^|$_km{u zFq{g%29g*k!PYVL{+W{=yF+@x8?tEcumhFb(2`6cX(As3o`^CmTx1vKK0us%rLo?| zgax)UXFq@OG($#1zJ^=_W_9iec7V4}o5I3RqqQzkSL%8%sD}A_Iy8~ya%(*kE;l2u z#c$@KMVAa(u;0kapne^a_dLq(=H zhF=(o!?NT+3;c+k7RL%eW|AYsSltkkvecvF#Wq%sM`CYy3*%(9Il2Tvv)G?Z;HG)3 zJXm41dVRL~`6|aB;_W&{wmaO&;A|YNImU!{;jZ`eFeo%oZATA&;>ibgxN&{3I%(pR zWSq09tlRx0ovrmi8g|1>tveM3#HSmXF;BhtB0$DAT`kSd>G<4^ZFjAn&mX`vaI30r zY#Xno0|2%wTtv*yc~%E;ROD|)OaV^W%A^?t3vfYOg@C%3xr|IMbl{cSpk$Q|#i!Sa z?@Ct+^mX-xT)av5J2h)1P8Y<0bN{$!b2!eX;cL z%tMsrpx`vinnsP%&@x^wKtAz*6ah(0;ZgU-r3DL)wAV>Q+(^ow?UmOyX&Q90C_m}j zy0N>LG{A=dVHAr!b)thx)1EXy8uk3*GQGD$HmVT{2o zU*cs;7ex)sT^dAeD`|7OBS?|=mz3+^Zm~mVh;N>8N?Sj0n}Z2RGGQ0{wok&2cKr3H zJ2J5mf4~|PCDo=phs#CI+p?$ftwEsl=c`Gq1mg|;F4&xb0t3`Fi zd6_;N2+H6z0Nn<{Dh)XwW%!G10>%oU5M~rApq3X>X+b3xXta*=1cV{+a@1>n{4sA6 zg3X%BX|IwWIo#DFeHTvL8Ho|v8PE<*-ubA@Sx^y0A|3C5=tBE#+QOD;Zcm4^PZD}Eh)p&XIL-o*NVMuT11bV zQVykC{)rNr-m*svS_1{uwntMmLq&s@QyRHmgk1(#3hB~{q7TBB*>kl2bq1vyGqwZ@ zGvB_z6@cH<8tL3R^b*#~AHAPW@EfT<}xCaLw;OGgd$gM!`Rrn{Hqda3&rY8NdsIo^$EJ zm{Sj4@QPP*Cp*L?%!FQv4-7&l|6~WNp7^VF7#iRyvxg^#X}f54Ou?dK;9>d<9;#B6X&psDL5K#sC|`?Qesor7kL@)u?Lgz%EB-N9bHZzEbMC z`I?qKpBm&Z+z#j<#B}i0QxouYOpS&78>!8)jlXtP-ve)4KN9Ujdm~8|T?N~%kX@Df zK}R-{HKz`5p3?#N*}yB&YRHCt*|3)2ZS~Fp=^}x5vHy!9;vT1n(k0N|xUWv;7+79B z^!o9NwBnl??Xt5pr|S@PF>XqQ`cz^O0kMh8bCRk31$>|C1>6VOF@E0|6FBWXR$O5s z4<%c!M3PGW3A}fzVQzk zd#HT_^~U&>cf&J1s*_*ZOw6xEK{{dGdOz7WKbmk=&F3vTWW;dcF>uN)p2;o!bRh3( z3)O6=SIrp``7#jsh3NM?I?dmB5`=3-G7hMqEY4 zgukAv!-0|&zthTER45_kGKVXSR71|WAI%X?6q$9uE|u8|@?qS=EpyKQqu`~A?$tH0 zA`ap0*7p@X(pg*x{bCxiGgEV+Y`N0R4jOBGrT@5d?i7myeimlKVn-2Z3{JkDe@%^4fCHZp!Ro z#(msdkyn_WG=sgjS-JbV-by2mnD^}6AHP4pYkYUr1C&|^*SzZtc{s=qYLATn&jDp6 zj`D~cLVvbCc_2!iMWwd|6CuKR4oQSrW#_d9fR8SpMw(AZc#LVSv=$(a9RO#V zL1?M!;};|>DSg}oIRQT@l8!-FT&2Q<-&k60Rb$$fGLe83FOoc7o?124L#cK=)|c$a zN_W%4YHE}US23F>2EgT#s>y-e7mOJ-YII*NSv^Gw8|QZ)3vcE#>CKtP zv%KoYWWESL#-v208U1cAr`J*)eM>*Y0?ww{fxHeoCh8_P*vWurRDcf`I}6rgp9Gv^jrcPq@sac*uAqG82ez#86Wdw=#QnLVc|K!|D|afNTW8W>Q+5_RiL@k;Tv=s?dYncR*Mdl?qYn+g9eh0 zmk~e~%l*b`4Huu3AH7Ooe7^2PfA#kxoMmVnQj$hnxlJxHBQT+Gp!j;L&D1Vxd8ubO z@+k7x+5&!j)x0%LXdle*>#sNW2k7(}9eGFHI+S4L&t+pB4uBm{*MGM{ovAO_P+v(HMe#!enJs^ShhA05LFk{rYXVJia)F zWO}CbeooI;lZ%abzZG>$Ga8#?D5e}xF%7;%RaHtJn#g--=*iVSIjcefTd$B26A=eT zh5|#QTV5-?NY!$*P%7*d=(9yhG4-;>yb-0)g@3lrDmCDu1UvZLQ3-2?y2j9l+6fUX z$Y9X@@$M0RNSPJ1>+uo$!&j0_iQl#gpRkDC%Mb_(>F7qwqz56&e`2+@F%~B{z8j9m zC?FCWqYV2kV0S-8ud%9|%r=UE@QJovM*H0~;U07lW~-^&vY zJc%~aJ#)e@Yxku3n;PCZ18aR{s)%~8WeXbLm7X=6-{Ma(Ot$kZ*=JRXk)&*7e-NZe zUIB2b-VP>q%EV%=i`UkopJ-nnCHTFdrIq73kc0AE{>|C*r9XcQ(LWCqjEu9P^CN$C zBJ>9RlL93R=Y<=;a@@hE2xcyQS(*wJA)y1%7WMuPbZ?Ny2vNS5Qij1n1Kas zV-__gpngY822i?uWu)?`GKvk41W;>bjO{ux)fJ1`5+710n&V+rc7_eKS)b*v%t7_o z90ntGvWa0UcRR)JRbi9$3fTp^$f~@dr5|<(C!q{t*~nPY-h%KPNS&<=pYIDKhz5vWM zyy~~Oy6gR zpA>H|GoMa#3ML(+ESe6OgtW{#bt|xC-$*=Ue`!M?=HC#u)*Ih=4j1a;HdH9 z9KbkijU6nL(22DMGD-$`U&zt9e9VMgw?T49S;UhH5=Heh^w`DA%9&MAG6Xw!Z!azN zFE2?nnebzTjtPYTrTKq|%yiXNL;I7uStzSm)ZjDmd8ACVjfGs8H68Pp1J+h>F8nzF zMR$bKo(&k|?g`6hl>}`3YBD?NOgnc+>mgAgtp=S05_R1G4hrcAf@)Zlm1eet6U~N( z2OqGdJ-;0O-goaL-`Am~Izf__W5LPy8@%hDMR7pj(!n0z?%(WigD<1w@?HR$wgDHy z9hKO@s$}}Cc~Z}ieXW5qIMs@L133DAb3>*M;M&P9iqiMlPB>yDQs~OB8BXXeisb64 zhI~t4b4B%DoDRs=MWCt@XYJSN>iE61p6@lDEec`LXTFZ6TzJbnv+?UK0bnNdv&9^G&J5uabNb5uJ3KioD6GZ?U6lieqAFw)AmX5j|OJYS2 zyVMBmD0UqdG9IhY`=ye-qgb)1Y>~92?;Q$C zW4I3T!t>Mh+xJo2b%l!ec~ajVzC$_uIz&Yi;9wtqBg$E0<{s}Vum3V%p9856PR7ET z*Qb5sU@fH#FOI-*^VrDP%d-H&-(fLqScAEb`vF(}%x6S}Oi%4d8%!Cuy)m`4M?JoA z|5beSHm1vK>&MV@iUZzXQpGqrKI(DlngRVj{4FpGJQm|@ z6AnUarS#osKD-(ju_$7fsW#bCIaC{uzQ&fvT^>!l%TC7f8+)QbzYmZ9v&p%Y8P!l}-LB)%9( zpyO}Nnp&4AMw{maEr281a8JCsL+xRcem5~CX}F(@9R??kfYn{)0uVmcCozACBb!0t zQvN=27@;`M5?<8{4;Y6tmM0}Z=rW;&n16#Z)S9W3i%3DRW5Gp-uBguJ3-7`6yjI*U zVh>>tZI|K;HpQR1*<;q;$uPac=p-RQgV#NIOEMQN@)bUvJtC(_je;*9S?{U3apgxi z!~aZQ7(H7qYrkPK6#hDt5L6f`sw8`AOGVUDpj(f`gwGFtAgM zZrUpmUa_mq8hLTGI8oH`f+LAolpdI1aTX@|MYn4b>Izb`f{uT_8nw>`rmb-CHVXpr zcjrcTRjc^}?#qZLID#64i9>4HQDrkNk|>q#RQ49Ktg>A@oiTz{a$kFF8mhf0&VwwB z-wy=vOZ9~3vW7F_LE5wjyN=;sTz8+T)okN}k{N>1Seh(*xfIzaby zTPKN&0A1#a&olAIb6%lq&J71xE?_; ze6Am#dRszClLXw^rMdd5;=;ndg8IN88WG0y;Kn%wk9a(+)$>^XM1l8P!}n)xnhP$e zC5ruh#xF=R!dEDQuG-7`!TcJ;qEI-Yf2EJS#`t)Y1#OVmOgjeQU~WbU+|>)2NU62& zE*||VfCKL?0r4{kV`5`<0jMfoBMAkwY-X~g1KKvWk3Ylm$H9TfjT zq>U|eIg%OsT__jwW~XomQ%}-dO$PtJ^Zq*x`0M3scoZMe#w17GR zjqw~WZCTQ-e!q7v4PI3;mWxO!5`2108XRFC7zfeR+1jaSJ1SKCtvGjpaBB=j&j)ir zlZh=He@oo^#>0`wd<;_cC3;LrxezMBqwI7&p2c4)c>7B~VP2$zS@I)o5W-k%SGy-j zpQ4ccy%S9@oA!Isdf>stbi-R}P~2a+kiTrfvL!>PU=ji0Xy=< zQXS3diloK`gOFtv;&kMx{H)}t)gc9Oq=Fql1xxV2+&y9&B6ZoF0d9qNg#0tJ@MxqQ z`bWE4yIl{aC3-C^z9K_AbdeqH=6`K+nj&!@Zt_5@YmM@)+DE;H;4J>(`QY-1?5cIl zW9)|70nWnsN&ic*sKSK+2RjMZf1URR{0W)eNWMC-`DrDc$?n^yfv2@0g>62Ys)dm{ z`F|nX>k5PXW()6e-zXgbf$Z!3NhYUdSK%OKkj#``)F#*hphPDlX{S+0G`c3{`M&mS z87-6^xKU>NZgM)7n9Ldt<4*4HReQSc@_hf2N?Wqx{boST?lrmXGpApkZMsVUZt~Fj3<+TTl|PrIe@TuYYngAuUC&K z+p5v>Y`v|L1g~s9?&S=Jec+=E5d!F%&+U&9vaXOMbXW$2uS0NF-Ib;1W`wI#=j}o_ z#{-f1M6ITX6=Yo@fY`yG*2TiTd6I()#dhUZpwz!um{1tW$G%->=g_u=39QhG?-JPt z+7jRQv~E}xF)M{Cky&6NEKleN*wq!ldNSk2%nm5LDgNF-@XwDs-Y3#yFzlCLGID;i zSjz5|_TSx2`c6H3*Evi8?{qkCBJCb#44|SZoJEkH(1kNBhgf&Q>;GW9%N-LHX{J$P zAzwy`dHi4!pgE?Yq#eJ%xn;;IVrP+y2_A`xxu;ym>MO$7$oAwjDRl2kK$9jsmy9XZDV5P7BeXM`(A|H)oy z?}gTm7PYPl%4ToN4qaQQJZUvhGX&Z}8_e))F=l%f)_f%0>qPthLtmYX4pKQ94#qBvBu~d!RXzW^LSou z#{dM~wEzx~q@)tce2-s4lHk>Fx$N!~f#xG{L==(Ll}oxvyXQj|QTD6K4~=Vf>MO1U zau@{${jU``^hjDN>Q!MDcmB_A-r4t4Q@`kNy{wU&kGe8FSdVdp)vw0_Kze-)=3#ri zI#*RA@WnxHCuIQ5h7imrX+*v1C0B{>Sw9SC0LSp+QC%``R>?p)Hy7Ze&GN#ogf|9nh9Da&5)S@04pUe0@QpcFNagsWEcTjhiR{~dj&41DUoHwS ziolvDzQ7>*gDiCLtB3oYRPH-$7{-GO3C*1=D?VFAN_HB0m3O^CBF#=|sUiUUdn;@x z?jxxzd7x#5Csk9yw=0p^2be1qUPo&ZG3k(^mH(-5kd65&Cfjr*AT3~xZh+7M(GlBimnz>u;q@~v&WT|lMqw#)_~P9f+{FHI^#;>GXgqk z;3Dua@SeZaXL@aBM)|li;@k++MKCEfE&tR*Rs|8yf|$O@Dt1aH1O;e^P-jDikZ6hs zusw^lcIkKzmq9LHxy{Jc1Yirm0Kba{y0J{dvXkgsxW35}edXN>EjA(s5YrkQfzmHM z|4&*n*9Vp0Brhdc$^x%7nHkrlgZctFhiqNo!sTn;B2ZT#TlTqgO$Q0?!$&$ZWudA@ zGn`A2<$Qa*HY$;+^EiXFPBi-%PXn@D4AZj%V{9mnVibL(4-vJ(du5W zkyHnlaWe%9bFF~pIWPS`mY%?q&iDbtTeyuxF`v4SLh9O@>5+uQ^u!T*04}=|WPs;X z+J2mj8(K|-=p!EB4+i|&fvPeVgB~%BGSoQ)nXWyu*eKiqI&$>5)e7|HSA8-X1P`UQ zS$YX4cV$U@dc5Mo+-{xjrOq*H#71M1YwpPPoL-?hb7?x0SPW$MxfCFPh1>?oaM43E z<&W*OeYdjjvrBG-uipcd5srL?bxnly1s&m_LWqm}$JGSM8A439Au5;+^WvSE-u3=Q z#q;Z_rdcrn{fsIx^z|Wrc?(S!mNFGiniE)P+fYi4n*G)fD?u>^ccY}_Q;H70(EYC2 zZOveCsG$gYIsE<7ckg@{q{v5RmRTT&KWnQTFQ&6GXpXE!)LeXV6L4`6cJ>xBVd`>% zz&J8i%QkQg*Igl-cFl%#aegzwg@{=%`g8V(ih395VJ87jzn_FqAfk-lw3kU9{DJY5 zClY{LZD@IprMM;M0IoD1q3<_$l0I?DeTkKXCYKE+r!f92`sApr50;-M?jSMoG{Wo6hlAUV^4 z?t-RSswrF|H1o`8RPN@J$E$?#th}g@*mSeDg#rRpU`v-zh(6IwdnHH*6LssU>mnMc z(A)?8aiup%vSdSTi>dp*l@PMehpD;Q*{uG6vkh<`&{)QU#9AJFSm#eay>35pW(vAc zn(KjeR_%Rhw7E*c?*vCk-2|~8`e~463DdLE75=zSUKT^+07mF?`eT+2KBLZxlsvvI zLIQT9pwWSyj(YN4jvGM3D10$cb~**a$rBT=VfVB>HMIDcMf1TuO_$z$0!f<1wm^u) z)xM@|%?t79F9R&jYvj-a_K*H9ym){UPi@;Fbgn^zn6dLH09RK*ELeLmAr~A{K_*gy z^H4vZ1W1FV%xs;7CBRW#wAOE_c0B|2+GL?sjwylP?SScWoj2J7bQExA@(I_rP4n~P z*~0GKVhgsFfIr7%hSpE@s}CW(I?OWsKTzS7(X{NtCM zEs#mHqq;ZlW{$CdI&1e_@SQLydR1JK9x!fC*Y!XCS-n#zKz8o5+$DO>sx&zI#_*DJ zFfSx2{ezLMtbzym<>z^DI)rbG-{E9itRmG>0ss^aleIK(PDUz5;XlWXteANs;HBxs zw+9PR@lK(cWY}c?d{(qpM}*5d)$pp30VYnHL)4jEY+4hU-@?WXF=bfvW019hGoHh9 zZ_2u)R+S?26tmm#jQ<-{zwJ9=xWo}17u%T#CU6efl)K^bw}+Eo4tjDf_88q>g*=Wa z|HWK>G`}02rkY21{77zQI#&i+d3`1ndp0SU*N%eE)W5eu_aQ)bZ8svFn1=GovbG;( zEXVj<63QHR-|*3NFzO^sbKcZY*Ebs6j(DxzzO&wB^xy1N;IxPR>$CW(qyBe9qVt8Q z)d%}?fgw+VvWld$pM+NZnGJoi7cO$lFlj>8;8i{gLT>+PZX!;b3Qgp1dYIORzo|up zIN0ya?o4T~{`ipxIz^hr^@sYJZoR+kQR$PqyZCZMojSIh1V2^=S>C8`Y$VEYlvWHl zEeTP5IB}b+26m(hfiUnMb2@HEk<& zJRsBTmm#~#HvgI)e*Pi(S+a;a3XB5QEvP>L5XWGI;d#7gMk9z&Pd!a)iQ!*757S*L z;v-NC%NGxStCtKeH!=QI-Y<*?>G^uVJ|G3oQq21{^>jGIV1rs9=1-fO8Svg(%$vz= zvXWDOfRV{^qu{zCm6c5RbW4;f{_<~;3}bn=E%R+cL0i zw@*e9QE7jstes0{jIQudGT1g39Bp|hu)o~?>7k0sctz8SMOx;}xXustp@?TA!~;|; ze~ep^^&whFq)%b%A6XciF}L4633|BjTzo2Emt?8_zYen@$4)pHtqyd0Nr@(3(8R#Y z(__4!0Zo*Em@QSfg{oryc5f3J6`ddSi@y%|d_&1y%G52jeEY4IY1`Cty4q*r$q?%D z;q?{HgIS3$(kMbt=;EgR5Dg^W`202l=lxFKQs>06eLyHwig#Z%ovB)1}J9!e96y|vR@o#=;~JRqpJ!pyy`x?Bs) z6G;q%jZ6phrgBF|JW`d^DkY?%pk7>XJmDXasfq2a&>|=3_m{Io!122<%`2iu&AIuF~z6)+0bvOz%^Z~!CO;xQ# zKpHAm4zM!C@-PLMW8;kBCK3GFAL<|5cP;X6BkcD|o0{UXni!n?dND@efmjMfks60di6etKjC{WANB3D5Q0z#;dpjO4UPywZ_Knn^eqNsEx z3B92Au61XvtTShy@B7Ytd+&4p=F0Fv>$`oKY_wWaI~2z*zSjzL^pCX^)=i~_7 zC-&w#GrzDrVv@{IC+$}k84Ri~mZWBWrdBuB#(r5*LVTQToilPENHd}=Qry>d8&2*Ss^=O+ z__?M8UyVI5eaB$yy#dBRT!*8x@u`nPJ<|;HRlj81s&fep6ChpQA3c$=8NtV=%TCvo z3vV!rD$gblN~y61|5A+P2ECf}TQMNb=H#;^wdsdrFMN>r)IFvDp4qaC#H_YpcFe=` z-K7sb=H*8b9`(E3GVb2p)c#%xJfHz88r^>{KcnInRv#-qchn>)qoLVOHEH8=w(v|) zOR$i?h-UOt%JcrF_N9siedMe(@Wh%SIxj=rvpp-(TX~{CU}J()LAb-xN9}ci+qwd+ ztYjVE<{+FG0vf&W>bt#~M9n+I^^UvcR8UPx zHb$hgBnKsn$9AY}UeC)L>E7JxamdWBfY=^f+nIhosXlUl)b54HYivK++2y*iW9k!) zLFrmiN>``f&0iswgN417;Z?JP#?g*j&36+A>KF%l>l$~-dYhUiD3?yn_Z_b>vPipN z=>EXlYUkJjcU1cBx0-Z&r?@angIqh)v5Mm&XD{J5i}OdwM@EezyzaC;&$@H&!KIh| z?};*te6wDyk6-X*UyF!%-hZ-YljE=HA&&*)mpr6m=K zLZc(uA|TRm1u92i3H~PA5kX)AAb^hm06+|oL7l-_dfd|!Ap0UxAu0kW zAcpbqNb*{7B#OwfF%m|RH3(l8ju2L@BIrxU7jIKti)aNG`#gJ{zO_7~?=y3U`FzL0 z_Ka1P-Dcl??e}pdviwt%=_ESMejmy8(1>SUD60`7XVuWk?BD>m>tR*5=U0r{T6kv% z{;6u1C+TeHnGE1O%z;d6uR;AcnKb*&MuS3Et|BOGyy^$rQuBH1diS=6-TQ#io4=)O zV&s*X>44y}s(xXX{b}X!%Mf><%(Fp)R&^)*(q7$k=!|-WMdA&$%9_r-=x^l_r!KpC z*4mjrWO(#-pv&DOJ(VGrJ9gTJ)Kt0tdD&=ubalg$ZSOzwVK1>OFiWe;Em=0|SMW3k zG%wwkAaqbpv^5TB)}uWqeST_lEM8(W$in_N{(ffs(~Ui2onLr0ybyjkO}vplJ$Cti zmD48QCnec0J6EP!tz>%$x#E!h+0&P@OUfswjCQXrb$MGJE$4cDXcTBh9SUWZJOK$& z2quM0impKve7QIZmC1D~VGtLVV2fO_WJipGCzHTZu8ifYTaAQ;3IO^i(%V}W4}<`2 z6b2J;qcSO&-ZTopqC&tnZOrE=94Y2|NrjOJ0BR901s{>3c!+k2?~c#$WG%pfqt+o} zp$G-&pf1B7MI>7Q=6@#ZW+wROW3EB~cdf}VGwBUV|F4~D>-)#-K()Q&4CuD>h7|~2 z>VLE>mMc(>2$t&`@P+kLsxEsjhk%x4o#6w;;iyP~(SvmY&}cO6NOfmwM;wD7XwLi0 z8jZ;U+_AXX>-gGC4LleBX*mYS0kX|fF@Br?`~dRsL#5~8ZN)diLPVh@VuLsw|277m zqF0L}i@{i+%q~Ek#Uuy212CHaj(k`wmjM(Klf(oZMJO8OK_+W4p&X79iTMhWOfGbC z(r&X!E(?$0W9R<=&YJc4v{}3B9Ls?*N6Cb`(s(Zb-s9|^8e00Nlr!G9irL8sH` zfB?{8bP5f^EP_9PRF8o)7B1af3HSzOZXewNQ3qO diff --git a/scripts/proto/examples/viz/rnn_1a.pdf b/scripts/proto/examples/viz/rnn_1a.pdf deleted file mode 100644 index 8363ea3acb8e16db6785571b3e39742a80f245b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51937 zcmZs>V~{98w=LMVZQD9++dOUCwr$(CZQHhO+ud`%d*6L=XJRTUGHb8eRV!EKueCEt z<%LCQ8R=M{NSm)~KB1Ti7zk_)ETFi#3FxJbZA_ia2w46>ickau1oWcjR!+ta|9UHZ zCu3n_Lt7(bC|+JDM<)kkeQPMUtZ7Y|*rOK2?n~7(_^7D^v3L+PUKj)rhPc2(PzMGt zJC{|~fNTI4xc5)@T5-vXoAeYkuCP&HTPfNEB3@{p>#*wUX$0OBUST_%r%mmLcZ`DOZj69Ju-m(4ZR6OHU}7uPopk`s$xXnD zTi_uK&(`Y*xmCcb<7of|Lwthp7n@Dg+!#QONqn-_Aq_2n2@H^fU11g@T*q`ComT?F z8Td1(Xv_lP(9zdKl+J!Jai5oU+CEw#joaK3TOW5jcsJJGU*nJaY~)kzw-bJ@^iQ5rZzSh_c-kx40I>FVzz+w8Q?*AR z&$8oVb>FuZ8MzTB-j-Oo>%&yW%I9asj^R^=&!iM2ii$AmGsM4Q=+8`}ICs1hUgA>x zk!lx2@@lt>NX;|&6q$^>)>~AO)RKp41$JuEyx=a5k!fy*NC!*qp8O^ zk$ki^G9Ro%*-GrxHniuH|kFy=Uo!wK9Qu91p zwGA|y7(R%_0gjO#3gTHqKZiUER5FwHf^t&XMjGzBR);w>EbjAtnXB5}-mbN#GlZtg5ZO4d%jJzIZ?_eJ}Vteuq86^L?Bd>7)c-U0z!6 z8^|@yAT6F@iIfN;8#$Y#m7>I-r7)B-Tp#fa4@{G)fJm_dg{Fbb%usfEbNhu$T)P9d+}r~$Z4 zI1-p~{6L#Eaqn&KTYgdTO-37tH~Mw=sZf2X@AobxjS+_To3Bp!6sRlyH^EY1c z`aAPoVoU0AtWr_Fw97*Ojt5FA)uMQsddbuglGZb{!q~AagDte;0^Tuv#e`j+=4`+} zrZf|Hdjo$H@5F)gjz3#x{RM?2P1+Nhm+FqmZcU(KzgRZqta}}ATfIUp7NrrvKP0P> z2gxs^LA{C%ncgirS(sA^oGIlByazt1Pm*S}`(~(c@7am_VdmQ%aGVPo97fzNnlNUW zv5Aunh&VnB%q$xKa+2Lf~L-X~fN(OnMBP1bJrYz_z$ zbySw0?TtyUhiTXV7oJh}xd?+N2nEtf4`67^yZ;R$gO-f3zd$%$hqQ}vm25QAX4FJM zkC%L!$kg4sk%CSW*PIzzFWCHJnI^*agkh?HppA?A!z)}pi2|)Jbb(oUp?Eo_Lz_X> zVJ_7MIAw0@sbh3e(76;=hTiNfbcplt__SOo~pug(2Kj8BvwQsyHW^w8LLSyb%;JLc|R3 zgvA`MAL=}(Id<+%-DR3L=iFmJFe0ZpPSE#p0u3k+)X0U{8Z|}UEF!x}+04B7^1KK( z3Dv0Di_pAM9VT92OpTlBM2&Sp1@$KOcvE(+YQk~xHYd97)f^jfXp(PQ6zT~}dftiz z>&lPAU}awa-tOoiuiL2)(IPvp2oO{Rie!#mMd?x984Tr)36PVL%)G2|$jrj(B5N#W z9%_L2e z_2@UiYLLpsNK;E|oOQ|0&iXBixWZZnY;ndzpea)4{;K)@Fv%QaYlB9ADzMn<#&cko zsCc$q!&<%R+(tByR0wHoVD)REh-fKukVq*?(sOd8DyUv@72rOH>jpbmNTXhF#={7aeu7`PE*0#FNv+uG?32z*fPdoJh2h?-|shm3*6( zqxUk&uK@7 zQrv`=D~4sr#P{4O9E<86$0R+m0#B4U1#dy z#N{5a;i|^^`nT|Hk<(_3UjhOeg-d=X@RGPcd3NN$hSfiLbq5lu^WT7pEMVvKkKi~d z!JGP*@-bR8$!lK%fV=Y1mDiarTTfi<1db|PY6l+38~T-8rMMcGmS*7XBPY;TbXGWI z@6m(uTrM~#{3FqYO}v4N@*SSC%sDn$CYHW+fj%k?TLQkw3C5^DRYu-Eu;VR2ZtD`M z%Qh((p{1&;++TN3P}HX3_mkY+)&>r-b)Wa-hH)yl8j#H(g>l=}*!P|1&+PUq{h*b6 zYM_0xa$uOGbfXHuVB~RC)Qj%HDi;P4ScoCA=8`+Uu7IGGyDbnBAd zODSA&{C%&7^Roc^ypGFgK;l(xS5k8Lk_`NAM{QBC z000G104nHEA!5AKfRZiFiZf657)(z1wvYn(AsHxX2u}--BDpbvs0I#>f1SjtPy}qN z=9gQ5xEu&RV- zr@WY|!+uy$sY1%bx!-4kEo?LdI^V2FD!U~;0lu!&eQ@E9BxCv=7NLs!hm|QEW&uKE z?Ic2iRPk=ZGew0@ymVBe+onMEiYi4hp3&D?h!_QCbxRP#ub6F-Ov(K8m0dOBrxaLV z&D8sxb4L$>Ey4&9sd?Mmk&ms~r;<-6l;+mcRv%Dqn^H^NvOT;bR_|^6D>ylS+EmKc zh^t5|kX+~<1MuOMRB(xx&RSyy%t|KD1owcqy4h>da?E#s))JmV#>?Ja4?b>Z@ z+om?b%*Ry}RCLjsg>>hP6!}MJD_9|cFRdk04N8R?z9sj8eLwX?{r4(b#*|4Cj5HIMU&}XY z#io?&W&7GPHg554KCK;#*|hgfpJPeO0r>=*oiXlI2mSO`)(V!Od}XR!$N#m?;oNM_ zAil-KMg{Bj-VNKt6_X*p#|pT;2G*IpY}+3hZWcl>|XA3?J%s6zT z*`jveRcvtQY%{Lgw`+#+Y`ITr+}#h!cv90vp`#6*H}zK?o;003m-T8ZTb=}$^&T{^ z&l85)t^bxL{Xku753GlEK!Dx_F~+sSx~~dRc0nPCS$kWA{RJX5lK~y^rs6w%O!tX~ zZma1>QtL|iH^dtmsZWDV<*W}{z~=7c5zQoIySS7gru_xBd zsX$t8NMlc4^OdO~?XiEy09!nAE`5207Gkb7~ zn>oP_fh4#&!LCyaaHOq$n3ok?jj^#eD}qHA_S#RL4S2)e+#hi6p$lLrV;iIY<7NLn z{l^9W$NK)K|7U)g+1S|slklIA>HnJK|FXp*ZcbuKPX8=10XO%5fPWXgKEuC&fL=&| zfRTXSQ2$^1&nW+U@!u`IsI86D|H?L@WuRps`13zT`(M((`~Q%!{$DZz`oHdW#su{8 z`liPJMJ%uHVEpetjQ{f;y^OJuxxS#S8-eCOh=G8Sg_(hXjg^H!`@hnK{*nI6cO>|a z$^UOhFXmwDZ1=wg^4|pih5nBcrGK;z`ZkVs{~2xPDgG}ZZ2#i_PicYu{|@B;ue9)=dHnAh1S0_>D+|YeiTH1TiGbw~ z1M{E%h5Xa>|Efh~wHZ1YTQ;)dIA0YRR;{&IbKzPJw^(nsEY?@YSa0IGWX)`P@||*j za_@TX@a<|CR&msJR9w_lxNkc~dB!7BR?I=BuVDfXUt49Lr=4N813sX!iqgqWxUfFB zXf-%Z@H3oNz1=s{H@TUDeS4+#QOXCY=T=4lg2qpy<2e1Kz``h&7x~EHw-0H{z z{Id;B49*Pgx3IOoo|Ea{{{(ORfi7}uBxwunWb&4nC_S|zJT!AsYmud={|klIYzKn% zYrYe>@0;G9T%Da+z4|+nrm>ds)eO)#G@K(ay)w`bNP1%5a0$Mw6^)&9cV^>qf)v-?i%08H{N&xE?meaAey&Z3R- z_YHcqb-!R}VEj=y$%S70rP}=6`=#Ca_4W9sP94)t|3SR?RnBjVY<}KtEc!~#0`Pq@ zGJ}32ru3uPqnA;~=KR?e2w_EA!TYVP`<>JVeyi>N_?09-wt6eYCe&oU{Kez&>mQKV z8o)IxFx$7X`q6FpM$b2~1afL>sRO0j;86d1#si#VWMumBl~8tsHl)_fOI*Y^{+$N2 zmicj12V>MU$M6$s0K|~?h5dd1 ziO&H5Q}PQoHGDAV*CsL zad`FyZve)?{vG&3bNwYp=-crd`oRqS>RZT%eD{X^HQE-fjf49e_ImsMc&h%z#@hV$ zIOWgpn#Xu0HTOq+8Qq7lFttNo7ydg)=o)sDw#;aa%5m2h83w(tzp z$X!c@z9Y8*bHs-UK?&-_ExiXr) z97m^6E_=|}vO0WF+`D7)g!qd53DgXFI4+k;xN9f;%6IVAaM~dqzpzvzmNR2h&mWgH zIBVx{11&(#@qF`IwXNfi)+8=(JH@9%-T9T>63m^#a63Zu^QJB$TwNLG9)&WW;R3qT zA)#7U9N1)jVtu*emCtsJy9H^A7Npc(;2wP{U=Mke z9!7df-Ok9>Q&r6}eo2#^*d)8zHp&TVFQCGfvj_#i1yBB=HsTJ0N0WC8`#J~ zb}L&K8-?d5(e!*>FOjMAG}>lvco6*Z&+$tgrjS~Vgj|WDU5aQQXX7eW+qDJJ(6&w* zlh2Ua32>fr$)&X2`8v4_j0}TAD<~EZ1vOf$*N3jNOxW63!8w#sUWiI4xkneKz{WS4R$0XLnEn( zUgrWPIewo!Z4O5X020s{TK;%sGT_e+5?j|R!=m;6#Z5)`1QW7SIslkOGN_OPa(5O0 z5)M*2MRpp#zlo?zlzFbbh1~U#TerwG9)uU#=5c$XzVYbF=c$&u4-KLRAAg_d){@)R z55@O%|KhYg6S^{-zg@N<{@9nhDEpo2HN__&O1B0~Xf>^{$0-~btM_E;Zk3<)QUl&O z;S__l7be{iN|vkd`H5Lw&W%P#Z?XQBeP7U7e^g97Ar-rIu@6%RO#*|;GDcK`$uS|si7=;Vybpw0 zc}!*a0YU+U%1$0vl8ykCfIJk?{Y9VxxVEsp~(kCKQcm9f|oyy)PELb4{ z`qVmo378$D)|=%SfWbl4B>SPqe5=~3O5udG)l$+|!kS_xga%Stc?j)KD<7R^jW2jr zvlXGyhRnkfXIGppN0c*z?eQrvheI6tEbzWe14gwTk{Ojp=Zapldd}aLO9n~H?9`Pm zc(8w!Jkb$>uzUk4VzAx6QKJbOV&F?U&au<4dPRfxQY~2la`_E7&5);BHM$N<-9q27 zexN(5eJBw_Ot_rsINFgdqs?^C_WQJS zge*WUB4g>G}VG?2$3p^;cTdJ9Qe z{l*7C=HNG|E>|S2l*bj2S0zXmBeQ14Gr4vwfkZ2@989(rNlT{E{AX%F8SRye8{BV( z*O2e|W!RS5Q!8!t*vnzY5|Wk1Ub{Pg%G-@%0j`+6AnTncB0ox5#+WxQ?iyKLAwltg(ycUxN`#OG@PZ-7V;!M{D;Nl)GcAt;(lZ|CH)s0of&RX ze_;ZPwB>7*Xh!X0eaUnHs9A0a1MS&=&X*`#oxV+?{PNg!gk%2x2*`jN*OO8*^mej0 z4mwi8mJKRPP2%)Ld8U?(=1KVq!&%+1Q&NhAh2%+H!c=y2iH7sAzS;p4YSdKI`FE!L zG6K7!w8~FZ3a}y+x2iPnc!MynuIe5yL=ciQaxV z(~bt%7d16n?r^tMUQ?e98}X;}HN=}^FRb$76mxblyVAU4?m>7t9@f5}js6pQHP(#9 zeIhvjv9!iqD29VqzQSP>bY%jqP8Glh4$vQZ{oqw(Z98|GA_%gdQH0vHye#PlPt_}{ zYkPKKVvX2on209yf?lqmwg}+5Hf8q5nwF|bP<2MBxLMGW+_!k26bPC3nsGGWeR;+$ z@gw5v$F57xHEcOIdEf{pPy$9m%=Jrfndyj7drt~xKO)`}-~@x5rbz7J(awBv^`8CT)*(u1 zBj7s7mS8vN)jKd@FfgN@WIG7~x!d>k`ypK9Q>>e;hQv{iAA=tF;b}meAK)!lycEmJ zI_;iNgW`}If|<-4!q5=z!VwAQ9IB4(Hd+me2{l^r;> zIL2@{0qA>ys&s>AxMVCbl4;JEh{A_PHgD~m=Apu-!AG%1Z@j+Ed?rEOGy20gw?y|V z#^5X_C5y z^m`+zQM0ym9Nd{gkFsH|bfP;tu{B`G-@O6ms}e4B6_eVMy`^MNXR#sF1Vb&)MGu4j z+f>8MDK2Si1yNnX<#e80tfsG<4{g#B_D~*RX`NsZ@~LjjL$|FN0xE)=Vm+C}pTNug zm|UHx*+yn6Bg=Zj=#Xa9dcMcIU|(`FjJ6QfhQ30*J9@W3WqILQ0-(f1y_P`s;j^so zAvNbL1C}J7uK5c1 z%%KJ|oGX6+d?!Fb-W>}i5?Q|L-%`rTTV@zT=U!HNyJmIEFxzC54h!GbrJTO{nS}5A zp>8W=;hZ#s%XM;>P`+7Ui9;WROe zB-k+@G-uLQCvsHjQ1PDr`!L9u;v&-XTMMuJ&r z#`Xc)-|LvL8*ae*Gfglqtg{ug+>z_PLy+eSi)M6DFG_Ng;}K9K^nD5Z*i}xuOzl^<2r~T5IrNIe^hiRoG zr;~)VeSM7BZtRM~CEJ7!p^6ZLMXZ2YpUD+;qUaf>M?^WQnry7Vy(D9BaaSc(zi|a^ z0bPdT{o4gK9Cl3n&c_cjBUy;~sm>k88oF_K?m*CyPeiMe!TgQGLmyARb4neEL_ow- zTt2Ya%cg%-y0qi9H*QS`j1ok;`c)J$#z8EsMXK{wSmT?j6vXg}hFW{wS8*l2kTv&? zfwM3(eg#uI94#UybAIZ{2K43v^S%pPBG)={NmKZ0xq|RH(Tjp5Uf>*8DW!ca1fWc+ zO&zg!(d`fXH~93?j3g!9!ocY=6Gi;H7`;fw64JJCw>?{Cs0#3+vCqu=3rHuPQ^dQL zq(3_fnkm93Zhf>uh6vHmX#bKlDG>mnjJ76fJ?{RCSk#@YiAZH!oKUo*F}bp0I?mTF z+1Vfw9v;&)5FP3GS#?? zsFF%DsAvsKB*(iOvUmd!(Wz@EBzVH(z>x`tK^pm|Epzo$p07e0atJJi93fIebO@c2 zSUBdCkyZ^2Lqb{nn@Ri{+24*APcW)_y(QQ$m8t1d(FcT6e5Cx;_w5LyzV7ung`O0| zS71CdD~&vFei{^9$FI4pfmle$$IZJe?>gB0i^z(xa9aY$RnP^(g}ltFwj9;{a!h`v z;($t%O1G!>LYj6KhWM%92GDo}meB^Ty|`WACbdp#2m7i=h}*?d&X-~mHy|;S2Ov)J zRB5SuG8Ti9^j`P}US>(uMK(&6Q*~pl^^+ht>CyW-GHU0=%MNT?FIa-=Suvl7}g6YvhySJ};8Ni&%n0d3muXfVeFL_S5P zsZGj<=c6HZl%G1nw9C-cx&q4U{6I%JzQoQ@hUxQGgUNhOXg~LUdtGGU&(0p}Q!zyg zhVz5YrvM-#$Bda{SDHWanxsi>g@1dx-FuOeJ}aYx99C=W;hW4J>-KVD?~vZxw=*F` z6ic0L7aX%mnU66NLkP@-DaXt>_ihB)imOeGGr>4l<5J0y zc^Z;D(B*QNbd95*T;;6tN;ZLlGu1lR6D{Rb+PXMPKYp*67;CVof*&fLH*JxUZgr$Z!Mv&2IWif~I7IXik}j`GbacbIwB5(|@Z91H<4 z{)I92L6%jm`1fg*7fAcS-yWHj&3Rqxs)eEbRUlQ zJ6hJ!Y&_Yvh1M+hOHh1Uj)fylURX5X4st=D6-yIiJ5E9Tp~j=i`MRf_&GU8umr4)0 zcW#@v<)Re0QI}%7XiUrj_L2I*WKD}eURxZDZvzbJrk1+8%OkZA+K_VXF1$eLZtNxk zyzb#kn2XV@&IF~YtShtg*Ze#oS@^y<^0KoNs5qa^+Ep#Wgld_JEL%kDg2G)i0-Q9y z7tE&=yQ<^=(>5pRia%$ocp?hHrI3yTU$Ey_fWZC~{)+NU~NYZy(E zc!bmc?ll{lOJr5UAn~N)hi&9%(YH z90hur4~6XV5UU`*3i#~c`ry9mxtGoWJ{H`;HoJ|ha2Ev|$`tmX$yp%@K>8p@mj2L} zo^5NAB^v2h$LnDA9RD4vlz6CZ&G?UW|YpMF86KAi+ zSA0WFylhD}9=V!?T;qr-mu0ubDMj{RI?)?blA-H%ozr3Cw*N=_!AM=AF{LDdVy!-d z;(aF2z*@lY36thBPT@G~q8Sl{~jx_t2(kqjPdPUZlW{r!;ZZm1Y z3{vt%fyi7sWaWu6agjlTcpB%${eGgx3W!OD<3Qoaz8K`igu24YUl!47xMK_A>+Zc3DW3o9R;z0{z_$xeH6(FLyT)_Gi=7P zFG%HX>{|o4*X$#Os1)ec8rUT5^q;)HCz4jGL({0|TQ{Z;#NxdHhI$m^lqW&-t(wc= zYL24b^o-@o-h2K`?~UaYuXV6vQv0t94<13K~ay260T-VxXB&}?ZZ{juWby{Fh8JOgF1-@P!5sZ2sDt9iz z5Z?LB)>v3bQj`YSHw2QH-=yW<{k0=BvlX3F(E>)=%m@=8Z-yF6?sA(tvrJWC; z@a^Q|s>Ddih3?cYA1mmtbZ#EMU1-_R_P=Sie5Z8nI*XmC>mv10Qe%*N&g$ug>-c1s zm@%i85Ax`7dy;1|;tCS=7$PEa)V~h|h5C2md2B19HUSGLIVadEPsnS{EjSVwYX)6h zB%rnJoX{3(Y;XBc0YVEwxvjMLbA{rC%kwWi7;@EW1^cD(EX*Bn+BH3O4XJEWwpGP6 zig?Ca)jx?+(Dj*VfR8^H4}srr?$jT-6w`gl>?o|wzJGM)Q2Ng;Crt>v!y;L52^TbA z0>XutM7UG0ZbK1digZ+`(W-*PFC0K{*)a)06EFK3uA}HYPRJYpc=!`tLt&ew{v?hc z)7ivJM?2K8I0|~Vlk2v1qX25=g2-Romvd8Y4O9*SFzIACl>&IaA4aIF#*}9{0Syz6 z;v7C7S7yNZ|84^>dwjR9241)zlSM%V6SI~hgIpKzf9QNal0()E3Yh_@O+}uAN>q2Q zsp4US($92HqJ7vAUc6%!$-)wDO5ItepXS&pp_axVui;^Jp~EFHDHtrKY`~dV=8q<| zJ*94bW8Xy@rqw>_i<~3n%42zYUR&ZW1mNjC@pnfK8>Olj16e;i$rKj!A?aE_?IJz? zO}U4ek*7~&cG^Px^z8oRKD{I=*3vA0k+1j@2yNjmv%AwoZnz@a`d7H<&Xu!WnVlVU z+IC=q0@w1%PKtE*4d3!v9qqU$vb?5_rj(??txBHGB80OLI7xUM=?{|c!Fy>Zkvx;D zPhiJDYLoatz!weH_V`;Ei)HVM6ZV@LM~pE#T7#4SP(HmejA#6TG{-=D#z*Dx5wp@jO`LT26EbHuCBHYGhFm2V1e6RZI=o{p- zVeP3Hq27*k(LNX9fJ2}xIMRN!`I%s7NMnQ`H=G z!OLm1h4esf5K|G;DJ-8u-}Z0rDCqcO=?|0fV0wisPXV(d-BkgAV%nD`Efmijx9GO} zDs_FbGwrxaf?2yqI}8weaH4}zUsKl1g5EKB7!yvsqPUDTU7@>6Mz7s-unN?6kjLai ztjDvq%YNylk?=MJGNDdDVzlOKN4ZE}pJ8r-z2-;_ z#FPfZ#pjV)N1@cQlxVKRTpThIu6^V!yP1w4#Ud$|IYya}zlGrfu0HGvBOf|)6?xs^ zzwf_>`v;+}VG>5K|N57zCWo<5vn6TDG#L)Ttz;!0m3EI`f^74Y+%)iTF9DX_mAlsM zu7a!kzy#(f>R{q(j|Ytp+wG;&NF0Q(#WQWXkf^??M48!h;YTStaOmf3F^ZAK35%>3 zAg10Oe5cD6ageDd(%`7WU#qf#QOY>qxkSS#nSX>p3i5JRLQ>y*h*yUBM4U;?OyNzU z**y<#8X=s@B#|4%#}menQIYwNX((#(3-ce`!wQ&t)9n}WMUQ9}$2e+8bdh8z=LRse z{B{BfDWaTLhrUZ$V{}jYeN88S286Ko1CH?PDUOv#=fYh>_VUk3dw_o}-l?hg4fK#= zX%*^0XG@sv{rz^@l>s#SI2v%uR{ey^H8=>Cj7l$A3M zHtJ4im8j?j?VyTv{7zqNcRnXAY_JqS$0wdkG;GTCzspVr2}5Y1A33EDd4}GP(#WUH zj9S1Er6qTj;<=tnlr7y0E7#S-I1C-XH+hpPt{SlYAQAfzq@-p=WXI%~T2|m)!ItHd zLF#?f*{S!|Wc9yFioZHzk@t-P1SKS6tn=BGbNH>q+iiWG7z8)|TF=E_qLcBC|e7s?* zZfDKSFojnnl(W^XEZ@GheipxxA)~`Q|IxTdrrBRbsUw(6=_cK@sg+* zTQn168_ysFLS{qJpCm!lZ;cv!ELhwI;g@Z65rAqEttTM&8tYqi5051?`w4C%uSIt2 z<$ci+0#Vqf88BIdqfWxiUW}~RaDpDfMxB`8FZ$|KJppoUe{ty3ipxUp(V}rx z=1IVu35da5$~fL{9`K6}+MvN6p@W$p>avS^K@|7#%bMy4Z(vvzn`?>t7d6 zl(b2aC}(jfh#`aIlWb+5fw?kjFN1CLGm743;tmpe?nQIxjk7i`mE)7RC)Z*QbJOUp zHoWkiOaw|qKXPHOG!oGfE2-u+2HkFND%7#KJTt^65BJ^0DPYuQ!@(4H3lek&FVeMW zl}sGIInPtCZHk0GUPg&IuS;uKFUOc5yVRFQ3|u=5G&s|(L6k*zd;iRj71Pk^JEKkJ z)*B$lqNlD1sZ(~i$SCjfW5~ztDP%tVB8mSvN`A<=#ho9@%7UWJBXQe0{Yp#hqQQ4v z2%a~i0AZH%P+)$K6;$hpH&6&s?Rd2Zx0pLKW=m{LtJENe30Z2QOk zRTs>Se{f7jJ4*9V{Cl3uw#>M%%28R%Oy;i*$t`?l-KzPhFon#w`A~il)D$;SntUq2 zqK2~XYVtaub^K~kLovM-#d1m}wo}*L)c-qjex9R=pW7|Q>+|V38|Y?P0?IMUG<&cu zVDOHq=;STYilC$TgQQb~fV3FZMMSyef}D2UDxwSi(SuL<+i3$%3)OY?2OGvY&N;(j z8VQEYOLPO>K)u33S)AyGY@Mk!8jxc;T8;|TeKu2Pp4^i#VZEP zs{5yufZ|;4mf&+GLw4Xe{viYAwYk-16b|*X!3g-Y149@ICg+d4+!-mwE7IRoVF}7q z>Wq9%=i-K&g7@+mZgpUfhJ;2rWA0UsfQ&u`F5=lHA~gZ?$|Djq;&Xw>jjwnj`693} z*L^H4SsYW|2@Z|URa5%XfD=ONtKHS&jnm{|TO?amO3v+)n)gZ8?fZdCfWg0Q`|$v% zbY$d387|V!qm&`{1+;4V7-&SxGOe>xNEW-;;6m(6V2%vJvkX zXt&5>V?x=gf-^rFboPun19XaHkNs2D(rbmW9I?tvS!PY^pHt)$X}cAqZj=3*B-T$q zgN6Qn=@+kU5I!f8K;d5y>&bz#ljL*AS>j~{{cyjlA7;96)TVq^wm3F|TAa?B*-GNx zjUZQC;3evl4M6~=7jlnq!FDGH))_BRot5$^tzUWV0q;%b=f=`QohqK{kGL*N@Wk1m z>)jyz)l&!hw-SNQm3Mgb{q2Ha0eSYu_Na2y7-~YOMoqP(4CwA!(UL=y#At$VXNF<0 znSXj14WqwXXL`wEWqik~Kd-`HnTW>wp-MhY!7kN15Df&lW`-qLIV$tsf;Q;L3^)5X zL76XjuUuz>MKzH~LI{(pXU63>*r!sT#L*L_(x1+CDmI=t6iLzJEf2O?t6yb_u&4Ov z-OG#_)j@}9rhuDD!nxE-JL?mRmTQQjkC$|Ao8H2saeU(Ny}>NR48&uy#Pv0}8P0^v zwK`-L-6CU+gR@NH4%$(w+;U{<*&2K$>Wm*ZB#*BqF&PlRYUY9n9h@usp_9ul125*R zt`v_4(3`qHtGT9j(oEE57cH_WM%3(vO zb;JT}noV&D)k!83Ivj+wPi|Q~NrH}X&o009P_QnA>JI%=kb9Tc<^F0hH)(Ga30t=|KQJu7q z!m_x?IX34ErpjF&eik_UL#G^&sm|lNGR7w;d*4v0?f)8@Z?N$qFkRMsOF%j_0L>Q* zSXKKuA@MrheT$w)#%$2!|9$WKOer9W@2d2dUhe55?w<7QISeon3Vo#Wix*xMSiJ9A ztM@=F#JqBS%2SBz8_Jp0(%}cL@EM4m4LLF$zUlL}=5}14E$ys*V^jNxiRPNTR&TXO z$xJq9bD{m{`XVb;}7*scHht!AjcYZvx7+L9h z^LKhrf(RJ{Qv_ZbkC>>GQO;^*5x;w&bwdBes;3RR9> z$MaMctNz1)-tWo+do#v7h~cuKIBs_f*~142E*VfbjV zYBZ?wgUXW(J__~SmG-*gC=1(deP%h36TlP>Ni>Du-~>;-6Go0m za#=|IW)&44P9-)`UzuJy`n{jkzsy(~{oByoXh;?c?b_40+fntzQ)?19^|xaJh}4Bt ztgdcVY2ae1Q?CX}Idx~JLw;;w;LcHBt`p&;$^=b*s%j?^Czs_nji}LU3qrnrDBW{j*ie(CWQ}r3 zX28p>6~*>To|{D!!JC1m&#JVX@ZeN->53c&h(A@%2q7f^KWDpU3BpkK*yoM`0tIwq zXoK@xrA*#WMMAX%y1YHJ)3g0S)aEl%y1M!ON8U8OO4;)61~YViZAnhfEt~GaK&EI3 zqlJz!Fa>1mE(&6C5-4&$d)?P)Ik}^GkbBIhg0E@EgbJ&%E20gBD60sO$#`CD?m-HG z79;JKf@&@a>4kRIrtCs?Ie5<0*Nq(fW1RL8b~GeykFeue_*QPEvX&Y5p1J|m3_z|Q zq>_j3v*F4(6BJS!Kjq}Ed93cfWyJdx=KT~HJ7BmX?p90W0dBYE|4#LWDKgbSt~0-n zJ>hV93SJs^hI_7wtv}t4{yzXXK*zt0M}%d`GL!bL;cUxyaoAgH2AbdiqD1Llg>E(T zrzWmp;FF#Jr?}|$0|t3S@v&(rL~0P0b-#vjMO~cwv|ek#j{ys|1oX2NLJ{KlE+~p9 zk320Tr}L|n6z?^H-{P}4h$NLa4iKf}f`qB}Szq~2O3xRlPYmb5jEsuK4DtNZRZ%7h zs3AVe5%4?^?PWqK}!W@nfzDBSq-j)ECn5gZAW4F$Ow#TY6 zvWfjy!6fooM!wtgp<<3SZnf0d4tyCSk&4zYO7{|i=$mb=i5Ar&yOiwbb5(E3DpH- zWb5J_QKy#TDluxibQ6kjVUk_%-i?@3ykSA@U$pDA$45VN#U|oCs_dDCKEKdaa8_w5 zc2N))sd)BhQb4yb6NVF{Hw`>$4yUra`Q47+ewf(#%)QNl^RtDHvrg~Od%xz>lTXgE z%ftELMC}d1G&o>AvkRss$wB*ek{KM8oPj8YuwY{8rIZb*QvU=Mwcf7ynM22hZgKjV zU+*E*G%8SpTw501IKTUIUm^&{~@FI_a{`hn8HbeOk16HhBexRr&0w1XnzgsAwA=yS7O_|GUSmvwO)z2F_ddR*c z=ug*rdVg0xVnQpX3I;mx6q0^o>13Izcy&R{^p^c#L7aJKkEU}$?tJM*L_aGfCb}mgN#Hm^nMz#TRl$R6}U~)O{8lNr@}cwqaNy$Yh|D#+k))%}}(c z&Oou=OK1Z1k+l1429%DajiJ|P_<9r)ssiwJ!eHunNH7->qe4JP!u()?3VcJ7y1?ZM#1Yv_L zz>$0D?<+hGN1J-eec*8HU#1GE%`m5PL9ar&IU3Tcw;>1VwL^kyKvkT3c+#AFE@S-o zRI{9vh0Av^$?W!JwnY;1WB6Gi3Dc(iKxo;Eq3;WqK^&i!9Ei@9rweU$UoLT#IW!(? zsVMlbc_RmY54KZ?pH{;|&_vTyAUMmA8W>>bThNRSLv=K&X_KpY2ZhAj93hr+K6QLk z>0U1ET1Nx_YF4md8g=w6G?;@EcINz>0yii;iJA7!&PJ6~3;9mEdt6vyLL^7YFFS6< zHhnpF%d9jbzqtOp&v0D$t$~hKU438XE>=NjQ5+TlApW~`n{Y1eHU2I=N^aupVXAS7 zb7Au)H0efSORshJ#hQB=B9*7TFzqo%rb>6DmPJc(urd$)FwQZ7L4Klzi1=Mho2s%%nN+mQ0_$@!crB%J9ORYmfg{f2fq=;DVvWKh98VWDYYw( zi!>UbpU2YeU>4K5T7GkSRzt@n@%gpG=hR_W+=KI^`#>H5HUXQgc?Uo?wC)FOyV|in zwLbw2-fuMx4r5)Ng^p3z6Miuob!7N9rs_|Taje%-m@BiYY5kdEs+qjC50wDeeBPJ617!zb}Q=J~4oEx6(&L^vCn z`{fk^>GqFjQG8hJ>#=dR9y2hDaal9ZUpJEhEL<9$1UO8xOhJto^;H;s+cL9aHXtuV z8x}fxVJkRWreuE(%yy$T8l|k-7awKkC^wO>Ot_ji`S(SXu9i zs3S7*k5c4>2>N{0IXkjARosUuuL`j+c+!kDPJGAm2xEsdzVdgvfe=RvhPqg10Ftc? zxS)OI(4b!hiDnvOgzls+=79wDF8zhE#-dmLJWKXN?Is9vY}e4*OtvHy>AM`<)?^3Y zob=F-QAs;?Z;Xky+^&P#9-OHfV_)>=x&~O+-H&Dy8JW|9RAY+uXVU!7lb#j>y>H;p z>Ew|V9-9)heJK1Ci!k7YD@UhpvI~5dsMNpFntuMC`+8Y{blJGbCY8fhlH2ZLkx$Zx zelkf~4m04l-%}$q+Wt|&k(v~?N*;ld)u!%Zv|&{%WLyV!jk6bjJK|dig74+UG&P1;=#3m{gtOA&nIE zZmNy$NX)n(9N~$FhQVP2>?#{6-&0GhM75%_c)wD^t~?dN>4RIj4j!|r?jg(WL#S#A zMR>!h&<&YoLP;)@MR#0J5(69^NFbrcATDj^dbMrH&!qSvTGTPlXxU6VO~Oi(FUo;F zX;el_93cCH@!%MTp5zVt{b4@pY)9~^;8R3`4@#*Ojd<_Wdxa^!a0u$9^(g}>@CIYW zY@aV>P^arb!dB(|)BJFh@$+icCF#?^0g1p5 zB@6dU7L@i3$F3)c5-s&PLDkqz=6-B+9sa&}*AIhb5qsheWSh=jXW=cuD-+A#>z;xv zJZpx}I-1M7>-STYaOAUQFITnwS>-gkrl}{p8V5EX(&)9Mg+=X(6e6y$IId$EX$Q6A zngd;GqCyY%e?$ivZQ&_5BHt8FM)XPk9_75Nc$L9}m#Of9r*^F-TakG(@L&&}ueO=l zF3>bNVc{AZpuLL&#H!?fo%v_*PMbO4j-h$rU^rBU~16g{V zu*`x*;hwJ5vMA=jqI4Jc!P=91CKOCyr_N4VDtap-XbmtA542zm5uqHrXeuTs37(-N zYW+Z?U~4F~k&Qrr zkXLlOu7UvTZTQ4OKDKwehPp^NV4DyO(8M^`zj6`SzjVx0?(dcSGI? z$BH<^Dz|3s;Fqc|8b3$)<{hh`gvSC02494+2}zJR2ur`06lKAk5XlBfc^C~keoN#^ zTo>~?bXC}b>zWzTv$LR~A$VUbuQUW5Z*$3UKXp3JKgFA

C_o)u(MzuvcbV?+i!*^v-|1R?@mVpsE2pPrOT*Uf={Q34 zvgV%^5Yf0mB6-K>UAgth{Tp;4yibUoR3Xv1D|VyaRL;2h34M#+`k)NGV)a>tWs%pwKwOM$~ZLA|!8)Fbj(LJ9J%^ie0v<9=LgfwG4~ zQ=@{5R|OoiPlX$8bFKQvsgQibv!0-VHqWq%RGk-lM5q=|38G8kK3S5dXMlcTBJDQ0 zdh#C}J>!3G^Y% zQqDJjS+>TC0FCLBKX~m#amrk4&)az$gi~VNvbq}BmrR>X1&-l@m7PPVn7TgZN%8-+Qed>ebB3$RDY0^AheEwOyKZM?9!Oe@wNoZPP8NNl}yi5$=U9Q zUKW0UcNPsZm&4!x!E<ZZymS^<+=Q({X(?%sLo7lUnp(rsg-pJu}`d;_h`|gXv6)?3VU9a%e(yiJ~ig=Q! zJHZO~mG_MUjA^aqmN%lt;M&|30HpGd12q}>sxR-r+Vf2YxMB%3>VlnLA9e)R!yBF` zc0C52?Eg+sTB$<&dIlK{PG5ix^Bvj<)fW8gy~{zGowQLC_~jKGO9dFf5vVsIlJ!o! z2cA=YOJz1zk`|)s+<0e&eN*gap? zKy#iS3Dw4ckT_Ibz=I{b&HOv+%K<+^bV55URBqel|n=0o-Kc`jqpNfgFB!;#xhyEZ?S3+e4B=JibtZx zK6Bs*xe`d%#wMw|N=Bgne8#rsM!P{%{rP)HYZyKj7K#k}5n&BLV&k)t(#-wa#-HTa^dXR^Bk>{BRkiW~BUW$LGNZ+6>> zm$7{uE*Ds(w#aTo)LC+1suu{dSmAZO|G3M7FWg{Qd}Vu19P4nr5*6M< z9=IrkkzpnUe76H@!4R_$k%4EMfGJ+EGIEh!-^;a^1I)!;0#}=ii`QgXfEP!xme@sB z#d(!x+j*?N^l#>yvI(Nrg?)SkFHlr!e2Is8;cIB<0DY+EYnRN>$r^tA4xrx0NKn8C zcF?>}07KJAw<|&$W#Aq{s||D&h9Jr=#^}_6H1g|r69(V7MNf;NDH_bquO@!rsMzd) zLl2*XLKst~9sj)&F5;)BO&e!vu*p|N2U>=vvVCRz>yA>c&DqYLEAfna(GODNRPefZ zio=3QeXDS3k04TZ+)4pmheDa+?g3e!S4s8#2fUV_!jsG2b%e}<6RftckTi*_T8Dfv zOLV`hh9TqK7ZN)i^2O0q^4gtF&AbGIi>>()GLq-(U2;d(c{uNP5*BU0Np>bE5^k48 z0|9Xe9$b{7gYv_+D(`k;Vy4V5PpI>+cejGvdNjrY(DynC&Z{4lwZ^9cKbws&njJDP zfEK^<3+b`SHA4OU&ZcM69M2s`5qtsH1!b{kU86k4+19z@c4?e}Fz$#tp%PW`xgW+$ zMn(b;SUv6y=qS?I+n=mIi=KA3M82hziue#cu$E*VB{&1;d&*Q)K5RTK;Yo;gSH}Tf zyAxV;kXnRC*7-Bx?z6K?Ntj!vKfi(|(7BTbx zud#^FapNOY`0bWEi}-!X%3SU1?6rZSM>A2Ygk||=$8kUmON5^#pC2TY3xudLIufc4i3;Si2xELRrey za_J}`kUDGHv^P+BgS7MK6S4U=Mxh64dkjVY@$!;hm zaaNnN>G+d3YUIW_V4XDaZW6}*B#GE-il?dzCy$B&$4H*=saEBA3uW%y#m)3-Ew*Ns z2OUBz<<{nxG9TtROC$G2zNa50hYrzR$4kJjn{q`UtW9bFjonrsc-Q=nKg1gzQx&sl zN6!p70XLWsQXRt09fdgx1B1WkfZ{u|A=*@b6HOTYA30+&&_+$m9EDPTGY&l}&x^tf zFW~Ea%quUe4n?yNt&9==x&wK2Jz{MUKF4LvIW-x$ctg3^&SWr|!eF3vnxabcuNj1r zrgrwWIe?x=@3f@XEN}Ywpf5J4BHu~K;20!T$b=@1QlNr^9zjyU|Grqja^$Q zDkl&r<0x2ENsn`9IL#s({e0nLkC>3_B}L0^rZ8-Y(uts42I)I>#_Bo3UF_9KGt&K* zuJp$f<=WWT=7{eFc2q*@;iq-4`=!bAi37bC*=y~n2|c60?4VT4Y{gD;i{m^Nq{`hL z{BjKDtAx;p5~o+_TvMF#|JuY1Z}33#w&RN+Y6{#Nnap zY|emKu%MYY+|&+6#*3FCGRMkwnTg#p5Kpv{=KLb2#V z)u1VHNlbRT@kDWMDqHYr{>!jh*cWH&q#)SYvSxh7>M)YHgOY9xdOM(d3~oKeMkZP^$FcMd-o6gG9*iBVE0&#W0L$CBc*_5%k=$Kx|Egp5!@EzpXYZ>gB!z8mxhiPYg?_ zwUbW-&h#hWgUA|u&xH$fs@;*9K5%?C^~uAQJK515RcH6fa;~ND`C=p-y@W)x&2(^g zu*_vHIvo<`5uwN+BPEgcH;t$gYehrQTlC5ffx=0$22nugW>pN?Sb@2`U1tu6OA|$= zU2qzQSY4=OA%7TA95fO zx|ig@m#sd<$*;kIXs&$>r`EY>8h0LaA%_I-nwHg9Y}mKBN(**;}(UWGGN-6~g;~B#iR-qUP0-L3;b%I$8beQG6P$n+4vDl*n ztt~4U*cD9@7tOf(tn5+QI5a?d2Im+}@D43F-GKAb0wfk)dE<6f1^9cOI+KM1^`i26 z4;3AcrX)~`H9hWcXyLnq0Fu&ow3~}yoXB;k8$}K_3lydgT$DQ8HR?#X2ZbHFn56Tb z-vJi6n^0}b`K%2hZFc4i{^#TY?97oqAp9(3lfNLdJ<48ZeKX@stkwifYq=i5P?0xt}U&! z!J8H08KbkN64uS&J=@^QOp1(_B7nSA|3r*$P-&o#bXafAYH8#j_whh_o%bhMkn82w z1N>oSE`?h7Db-jkaqqi$W#-_UCH~q=SPW-yboMAoW$+JKzhEsXPdyZ&F8Tg^6CcKG zMdGRzYZa_gJc=d3jP0Q5D+a~5oAn{afLWB!%Lp51jaWZ&&A*^n z9(Rd69R&cv%*vrQE(@YOYaxB$#AtU#HCd7J^pKORK=zfowvfsL#;~D%h+t*npS=F! z!h?x|Ff$mt5zYgfl;q^vQw9We=sx#}aDbLbCylny*<^#F4?th9mL`sXrt5Cb@69D? z7R2@E5YYyql+u|(evKWztkS08&QF`~d|Q1$bu?=Gwm@~5M3)LLSpSi3zUDhTca{Vz z-zX>9?`KQM4QR(LP@Tbug=r+?jCHtZNh=#$C4^&_s>zO9_i7y9TS_?Th!k$n7(Knr zlPNSJeA}%XW{m4uOaJ`8K?W>5&6Dl`o*k$G^Lpc=^S;-wwBNk5SA$#6DDPAYY!hZzIZw&H z3Mnj%4FmSS^P6P%DJXR5?s0t`$TcoY(l#Du!=_mrm!7KHsqh^b19MYLfP$S;x_o0J zz?Sf1HO!QlRkI!by5sO7>Z<~@9B)ZUNxfPv@~FLVX+RCWW}1cLB@(^bM1?oykc)Ra z=%9i7Xn&qQMV72%O||TWEl)sHF)u6{=;y`O!+Tdll#wgW5R3SW{6jVvVA8{ZCJF`_KB6=p!8RW#Bxf4cqf zm^T!2JYnPd-`w-OkjtnTiiJD(RC$od`rUyp`*7}RLAr|}hSO3UQ~9Bc9!4Q87jHDX zP@7PNL|SIHtZPXa_;6h?1PBDYOxdU$UXTl>T{J=H0_$zE;@WIdjS-m(3C5>M!)9uY zYDpSBCrIF?KDd7a?d40$@Es^vGlq$!Y6q+g=4nrC0^=^2yW^}uxn+ELq~$1Y5;3Xk zvlQ79++5>t9Tvc`qUtv-u@-6-w}bJDu$q)9hk6CqT5wQ|t+)Xup+{)+s!@!fKWO#k zIKSzGHrdxme}c}DfGkHWBJ2z^g3hFJjfzq5cFs)DUD=W@;f>s!pEmz zpWiv5ZKuM{BYJA_!CrFNglVbSZk@v$xU%tbS}5>jA6rAG{J+g}>0&H9l2)h&g~!7D zIAu4ItGr97qGB1Rl@&K~`%qQuJ8!8ii=A7@o7$#!?pPFw!BFCqW%f(F6e!r(~vR8RM!i zZjC+|2b@2aLoeMLhq!@h{pC(Vw|P`hnmk4UIri3r>Z$kbNKbzxZRS}}u*E5ie3)nd zBkQBwR4{CumKWjy#U6y&S3G>Nik`C4wt5d-gC%KmT|e{oM~Ju5?Y?2R1wK?r_{rpN zAv<`QUV@kayj$Xrz~r$x@VWGQ@HIAV=e#=rIF#tVn_r`W=7;t!@aJ}J$+$U4cK*4B-sg(J*G{r2>%S?Mh{u>^UCP*wm(#Z2E#yuit+nSRtLT^Y)3pLqP8i4IaL`Yr zy}?Lx$qQ8lDQZ~zHE{W>^?YAMhm7^aQGykGaTcU0bj-p)N>&TJ51zafqDdJ4E`X62 zap`*FkR!10u}tQH&%w;BLSYx(r_-VpE%)+*2>5y7>u8rh$v;j9CnoLJ&H-M&-oWb! z5kO!K75!$X#M7%R`ea4i&}LNxYXu$IDh9lhL>~MzqJ*wZKiUZV^6Key&Yk2q(DZi)=C}tCt_=uU&J`W+!>SW$<#Ow86L$soPF{ zq;Z0lOMTZU`b|;HizS(A+E(q=9#i%pp9OUh}xG!@{5NrLRE9sy!K<9lq z%<+_bBw|y9@@3RRln(&i00$&LK$kK9+EzS42ylo8{JL>sZ+i;v(Jte zKFpdX6?IAWe)vuRVvI)5-R49q`D28ixo71+Re}X)@<6nE*D*JYQgxQbX4+XWXklY8 zJuaa_?t{{jIx8ZT+*qyolWerXYD;KYF~%nyl+&N_m}4&isNKw1r}Uptf${zdwAZ)M z_7Y2zP%;Zs@HDaOB8X*-q(uPlkDjtAv4E7^O2Z*;F-erq>V}{VC4g2W6mbwkpf8DJYwBeef<+w<^gSmYWP`c&HO7#we_xJG92(tZM(# zK=mxy_+=oDb%3PWm&znuRQYZy%@3aH`h<<@^iNfJ#5qZES$9Di}KQxhvogA-(*b?Sz+BKXpiQ5>MhZ zQ61&r$el}mS0kL1mo+f{W~0U~;JF(#X#uMWh}B_ARGF+5}+go1$ZjdwyzpP8@TBCy$Chzrp;yt>+^TnW+mI!2Qwh#yhddO?)QLYCXIXCA9bjd>B= z0Nh2JZ!6<-ZH1ACPC{vF^u>)$TDGRQ~t3hYWM+*B<{o)-gIX@ z+*{7XicWn7-l=}S`~kGE0FfyuHk*cC=6r~2k(HLetR-RSySm8W*XXjM3487E&~7XWRFM-7F)q>8 z*jFb#-J+iyi*qf2yltU=PCQ&Eeg>};bDp)>iSkxh*l7_W)J?F7FT|SNJ90KYVdqKQ zKmeTIVsjp?-rLxGZ0B3RSPylxyJUGy(r0`8HqfSQ`o620D&TdRCC1(_$@5e-KqrV) zNc>xsXUbT1w|{ojP7&CK z_R-;=ccuHjK>Zjr+37o7F!bAaU&lL{Vcx}Dl;qihkosFM=fEC=v2r2hqLRU(wB99N z;W;sS1|nle-Jra*g~W8|_G z&a3_>IS3E5e?*s?aedPSSmme4N%E#Py@nXTVUrkXN&C<;CDW?dP8W2=7-^a=Pb%V^ zp+3^C0{J9U)D^hT9_bFF#eXjGEZ9>TVEijd$;D9fMdyt)* zHg-?!Eaie7;=x{q38^VUwa+Xm_nq-#@!x2?pZdS%8oi@CkVrsU*GO@jghL~PnSG`Y z)7XO^M7bOLa-EzjqjhKxK(awwcPag|LLvK11~X{QRifeA=f$b7Vh6VcG-M7fRV!i` z2zens4v7TUQ2Q548yca1b<=Ngl*Aeob!~|6n9q-WJlsd>5z$CO6h8<%lDvhlHXO{? zsGpr7fpGVKMJIcE)*W+Zu?(?>#Ootd8_BqEy0GY|T0qVbL3v0-G4kRlFjkE)`gEVG zQK{*LETZ+tyI{!pyP0clhh$x=_!j(4J69?pl$tgG} zNDj8Pj^4K01DHC)Fe#n$DeTqMj_if+mBxMY9$AaV)oeXJbnzSo(??BbxFW94zNTcc zfiVxJW4Tt^OPQ3nOh3nYDhXl_(BcA^#OQfYyg`(xsu{V&JqAH@0}F_?$ng5{HzN3~ zSi-z1Cjvi6R7a>4WragnaH;gOup^gUuz*j@ep_Jab@S$W(?EWi;yC(@}nXAH$~ zA#cq}q2ge@eU5kWX=>>e#M`>zIQx5Ay0L~$g{tkH4=5ldxF*_CD+PKeqfTZ4%EJL$ zm=UiAvr@PgX)S4!NM#=cJx>pbf`iwMc-tyoI^OYB#cyPjV31b|fe6`sk`CeDM|zuL z?&1^dhH)VskFx}-6;J#*wLnFK+w@27_l}ER6;Vhoy_sO**`LI=>O3gDfN{xrnf7t} zf^DNtT|G}JUXIzwZ*}ZI=#mp9+%TlYC%XEq*}2fo+yHK)`UB%)={xLkM|!s1St#l~ z?8VQCZU=n>rDh$wrmGQq7V2es-ZOSxG1;REz&Dn|?FYF03?!z;2WT`!A_6StFFz1G zBDyy=iXr2AG;XT92UxCt9mEuCoBn64)~xEylMXKVqV4>-u`m;T&Gj{4Ltai$I|uAN z@=`^|-WST}4AUr9c98I=7{O;P0H7fqf`?PO3XKQ6#muAirAtLvDG5B#DKV2P=XL2} zr|s6z%2X1u_Oz0DrktUdxr$?6P>d~ag#p;{B}e{xx)<5 zq;W(kwQMty3>|qG>T-SLppE5kk>l9E>?KshV#F0G3;40wyOYol8VbsM-qM#`gA$w6 z@GvT2#2z!`VxETb{DSD=Urzt%3Y@I@VKq-}i-@3>9u^F~eh=w8_rC1ci;Cm5qm>Ar zp6p6F%*-h+3gE1Qcdy8J2f+@^>{q3Rvd*u)r@Q+q{MSC0Ro4gk{oO4q!_UzZG9|LR z9mPEMx(h$PBKp{$v)@i~DoAp>bp^P3CJm?e7n~A!+PkuLTCG5vBcC0W z91oCqqg&2*SUyz?kRRkq?EZfTkVtRU}7v^D0hsxp{7FhE=^X};29BKRCtk$lJHx8;GJUTTWV zV%;?1rjT{P1Z}n3@Y96lzMHignANM6OFjJH;;CMu#su88%7Z(fhO0M+$=z>-x3$Z9 zBgn(dy3kPnpElP832i0W01U6^$gEt?tLlgUZh?VI9JNWE(a4z`GuhSz7O|>j+$Pjo zX<=oSW2Uijq;YTbK-n28&6R#LE@BL5F8i{%3u7qJ1EC9mD zd*ikHjTT#qumLp_LvLpC5B0ghm2SIO*=HIa54uoXQmpFw?VfAt0Lqrl`@8< zIiTMtlW&oo2V z^AXCO=iW<@TkPEV-_Wrq|C5`v!uw9gIHR zNjlyeHRR(hai9D#4#Y^YxvIQ2$R%D4+etm+-TAF}TX4Lg7v3SWGR{eY zrr>Dw6d~^Wb+r~hClkg@(Uf5G)}Z$){_Tv3bdnDMy?a3AIHB7@--R z9oggD%Hs14maDw4f9I9d$e&m_f8l;(==UL@*->1Aoi^Uq5-4Tw_TWc-L|x(r#HZT? z#9!9JRyDsOd!@3<#+LKZhN*av!%y4A1r1^)pG?Z3|Lnz!`i>^5KOmq5M&R=g%}n6x zk=U`@utKmWA3cYbUQ>YY$rVfy(6>?SnFlTnr&26JeMoHIBEsWn5Ng>Y$*$Od7{+RH z*w(^#F|1HtaK@=Jt zHvf%oqIW^^ikd|$gzK+NJR)XX?{$w4M|w}@{qN6}GJ*P;R2pw*4P61qHVh#cz&<^?m-wf`PNId>)ec+wc z=p>@1^7hyU%;Vh)Wrt-wwcC%^pN>(18W05dVxePe@{X5D(>eEJ^`}7|a99{~bE(gw z>|5R&29&6nd1re0z`7#7R2jUoqgX1kF3?ArfvQd?2=$vSO2=8Fo{-S1=hE%UFk)A;s7VJi80^5+k@Vk$sq z7TmE#YArAbQz97{(b7tUj{s?r8!&R+8}@;MXnzJz2xU6FPbTgY9!!kN=B>k1LFGtn zjYY?{L;_Of_Iwbdi+LlJdKJ+ayH9hkKzhrMs)+vaKQBQ0FG!5o(-gg+8F`EVaax_* zf%MXWI{BA3T)bw{!VP13Rtj2M1`r`n3T(L0l{c@2Cb*atvhszs#vD{K^z-f6!0FGsZDpHXYafDo*bM>YB2PaS*hU=W>}+Xf7JDj*S3RF1aNWwQTfYoBdO=Tf>ab^ zTdZY~P0>zQVfhn!b6%E6UTrnF<%&E?e}C;8wr_HtGnj>Uj&}Cx#u)>n0NawHsa18C_m<*v4*Pl)QnVq}feI?2q1(dCBDOO^rbG9R1qJeo+sEek{4Ne7!qznuNEl>hG@;_c2( zx;B9|*^a$$J#B4VqRZ3DqwCqxtq4M*9;)6=FKr5{_W| zbS<)+*eH40mtmymf+?4lUte0rpFVNrqeX!|+J%>wp*4+zhm~_`I2%9}-&Mcje;z?l zl@l#6j|>XyGk1~NECoi+gs)Xt2uV4ZC+J;=e&I?R!*Wem&Ko*rd9$)I&6RN8)Gerg z&CtYw;&|?8#h#TkUE|OF+cA=JJY-%p;y|&ov&Bbmu=aM+hghi5L+Iue(+Yqfndev) zi4*PlPxwDjwS#GpNcZsg<+O+G$J&@LP5KnxLlkPX77~u;h{{Skmhqu0xbjnZC zxeW19$2G6_WEOP`4WSx|NkVr0q>@vb_Nwk>dIZ>A@cLp-Xo91Ot*utq9=5G!d^nEm z1vl+TJv8IEwEm2uG3*>t%ge~2>fmshv1iP>qMyGoXR{{?jgLZ%Ep-~o7PVtdhVMle z`;*Mv<4`m0R!QuA=iUoAWP-RsNHvOvqm;53m;HJU;{v;a6d6S`y9bk6H6D5}F0G|P z^?mE*BM%ci$mGb-3-j$kC*eur_T5pEWptSL5u zmDc&JN90f(4mz@?3V>rQ2?MWf$eNDd3nZLWZ&YSWKcg&v$xpgEcP7(eR7KjA{GlAUNWj5J1FSHr zc>LQsqv+V6{x%|lZpb{uEzQ!neZu%j4 zRsQ$VcEdNlQW>2vJaPW5e4fln^2guD;-fju{NeN19R1HQk??j=4wts5rUUx(Gr@VJ z7ckDlt6x!q(;R{2pek>%B*0+YW3F7L*^8{9JI%cj`#+?Eq5XZ#4{k>Gy@M04bKU1J zBB6z-YZ);9xvu8CsF>|=@;CF*ER3;qfbKE;7;ng*Fka9Gs#2mc6#IkqATaD^>EG3q z+q?n(XeOIAs>oHTONh5FiWsIBHmBs)v_8<;$wm2?ArpBV1sdhJGi@H=;~$w)&S_GL zsS3K4{s@rNaExQ_EC(SW#->YtQ;QHRpk=7f{PaL1;~KMA`rQf=t9@tjMW0_rl1U4# zK&6sd8xGaE+#<6SUV}!V#r14aMsbS;9wUoWQ`@TiaF%!u15x+RuZs4XXP_G){&uk4 zdNlxArI0YFNv*R^)?1{u*9(yU3g#YErvM7%S6=(Wm`>9Sil?W!?N{E`chutnU(QM{ ztU1;C+3_W^S({m@uh$A{Q5bDnJmrpO9{||x@41!#5qDln%DFv3|KGutj>u)+-*_EL zlqdi-boQ0czRPw#1LHQJf*2;$48)*26j0->KTN0rIwgjF^&`U8&moBBZ({3oD06#m z#K;7@UsBIB3glfjYh_ruhcPv>0Wd;i@ACGj`t5(hd0M+Zo}yIYj@MIXumkf;M}==2 z=>1L$T6&vqnf}#M?ed)XZI1oHF+}d3QHy!HK8)x|VBFTAHu24L0R6>)N0cZ`AaYFKRmP(ZK0w3WGSD!2Gj%=Skiu04+w z8!5fX)tTIMlG|jRnf#bxM8ulCXK`~x1*S?G`cFwR17NU~ePAI;php)?YliVxS@3cl zWi6;*B?4p{FyorL-!dtGaf;{t2#?3&xvszYyDqiqp zZ!8B}dR&$Zu9|vpURBCwygIg#XIK2%e@n$G0(|hxrA^VqcCNLWPuQ*2@_D9CIqE19 zZiUf-;UL?ShYU7ygIJ7rb{SRmO6!>YiA|ZrzMgL6wdhkI(b?Etx{!mLy9{+HUDkDa zei%+*_7ft>b2x~yrG7moXRH3zj+XJ|211M3!yBA2T!g`tor&F>duWdTEGZ_Fxi=pw zK`TR!g?}C+GkkUDEwcLii!bZ4?hohiRM5wv^SHQ_l<~EdaJ55dAQK|$t^oRw zfE@m&>3bPKXnTxfk7pGqwbE-%I@DbV6$|y&6%_x(9^ZcluT>8`h|#8JE-+~H<^8*v z-nnTvCO~}UvhTio;!Jt7TT$VA2ZsW-IZ)X!JukJh7W$86Ww*fv1t}s?q89@tZVsvB zVtxLz;LOoR4aG(omEiIeE*D7tD1$se0avM&%R&n*cw3T*Qhe%?&@v6V1 zMTbup@3wF9)?8CMEP&FI?gJpPwy#rKLd&XG=nJ-YW_r|khirg$D9(b|MqnGMv62LH zYo`t6Fn@*X1XVTUN@n~8Q%5Ug@fEml!pr3s5-ZhPb&22OKndh$XeHkA$fLS!T&V}& z=%h!4Iyt8kNxIhQmz9=6tRP{Nkw)Fr_xNNJ#2Tc^>V+a#sze=x+iL?4p-FY|u;)J> zZ@tHs;-ULj(asTg(wJd~gk4hgP4o;;6;)E0oV-f4QZGV zIYz-ywE@hqE zZbviTAjZuO&$Y%ZLb$NEoU}u(fLty6e;s*U1|>5?sF@tv!9KmrM&ByNC7{^hLw>Rw z+N4&aK*cvzGN1jga-~n2GYgj~ST@K3j~$RRjKWa6tOIGor*1qMAPf~On6+61^tRU- z4N}JaXe^(T=o=GhnZc_cZp8ovuU(g|E*N|w+@GiK`kZ6K%-^C>DldshYl;;rFRkyQ z$(Vqd|DtoFx4{%6Q@TL`l$OX&Dm;sNLy4FDE6#;JnWn~zgzJ!0Rb&i=1jkGmN4Th| z9jA6|kRBR00S*8(k~n|5Q9eirc14S^-bgd=2t(+M-8ODE7@Wn`URTU$Nuy=?tZ={% z@M64cbI9Mi|yHd3Q?!8N$(U3X*l5g*BPru-~|I7H@a4z zWfB8B1P2pi2%!@WWdu(&6%Gm!_%rYovI$t_`pe{;6^a|FHe(EP90xM1KA~u)7I75) zxkS8CU{2?_z@m65mPG@%8MbB)xlMPRK;vqsw_Ih58RbYJW99XpeR>$$m9}r&C}*ks zZ#!+F#Bje)dZZVYh{#FMZne; z3C{#&Y5+jiO8PTIt2JZhQoQoAL_}V@Bh^S0exVeb#3B+AcYY)$Ww)e@jw`6pH)BA) zz!SHF=BRKsXZbijgR|?gFTfv&29X^=xTILocrZ(u;1_RtJMNf;RErE8q=|pn%!5I% zivQ~8*ZZDIRcRdFRXD8kwo!2nZb0AtMgi7G039%hd~PmkMg(Eur6C_Ob9;&9HG5@V z25@rVPCR(pxfuCYOedG8duD5dyc4-=mqNI}k)9D({}PG>i)^5;ict(%<|P@YZ_STB z1DE?;bbg|h_F7fU<@>EgVS4Rx_O;C?u_?&n%B=P`|6+ewXq|XAC($5&wd z3$a?1KR|K;1U%c2jzf31E!iKFtJc6i8JV(2pQUhCr67*+n9T|1evd`46bjcBmxKVp zdX;>aFlGf@7c}7{TjR?M&2j-n9>{%ZmI>=#D_MDjtDNx&WitwXA4y` zzKGoki~OLlTU##gjetS;y`(WOttSm;Ts=F+qLzE6_L?BZFVt0vL}qAISI77 zP>5-ISL*7)%}zx834X?fU8@Z0tBqzjp$Zy1NV+LJKj3`ih!%-U-gd$v2FgQf!u1C- z<|m6b_m`??u{JjI;NvDuC_wkg6=)$pjR%FMt%?I;XH|-qHLa-orDZa~w z3$S_W+Ye68JwBR^MsD8;L5w+FC)-fr0D*i06R% zm51K(Vs25$&O@<0hHWZr#i(P7GQdij`Gogobp~s+^9(=`VL|S`@dC|Jg~D?~N0IR+ zv=*5Xa@%;vseYwdN86#(J$-< zeNJ@?bt zA=^Z%0oz+ZVBtAMaXBnF)B~$U5*Ek;j-m*6da~cS))x@KfzIV?!DS=A3EScP$`(od zzQ$}6Y1y5S87UT^xq8P7vphgJC2>fyyjVT=q*cE)74kilg0M*E;(Dz1jVGHMm2n^X z`-h^cW+9A29jB?~-tauzXYq8*EXL2WN}BDaD*B*HqPB+yD-Jf7-mYHBMN6L@{qp@X zz;d|ki;RJs4@3#Z;mZV(;~QTk?$$d4kzrjIJ(yjREunBgIokYPKP=HCZT&rjeXT$N z6b-HrSB2GH+Ng1UmU~42HY6RwQrt@~hGH>#^|XXh#O;GY(up)OP$--d4uOUtXr;gJ zXE!ZYQ65VwR@ACx1mBD*5HHln15(|+K*}_o${MLM-r``j_G-J-Tb$T`PmfXBd16^n zVrd@ts9XLj3gAzkQ@zDUYNL?=o~PE%&Ief6UdDz5;ql|>ndp#2&#gczzaj(7PfJ&+ zVR@;HO|(yq6BUq8bf7fONSPa|?2kHB)eOQ$!>>G;DKOcU_X-k||`3ob2yE~<33@bcY zWNQid+gH&ot25*&!|bSITY3gP;~8$iZ))?J$BX%D@1BZ_5gsc*g9#uk6lkv5>SX<|eb*HWZEHSv2&ddpv}8;{qQ z_jzD=GNJ%yi!RD`5w@g6=k1q47mgv9*Jz5w`V0Q1@=KvtU~`syQ?C=}m;8i&gwNVx2|Xa|2|KdKbpI0#g0&+J&m zV|(7_DNCtbmP9xt<^utC_q@dEHRW#%iRWR4o6D~6x|DFP(HKN%RCp`AuM8)V-K!M+ zDhGF#Id;m>Mm_pM(pqhKcbd+hRutd2N@29(jd}lx-(fWKpttSM`h)E+{~e@Te@-c2 zbn{IPSa@XY20P&{6$0~*%?dm=_1Oap+}HC6>~e{go1Of`5y#~M8nyTAU}Kf&>IE_9 zvVxpzeCN=d>o-GPY5Vl)9X(UqO_jG3RWyW!f6#9rgdypwbcK~5&sIBob2KOA<^BD3 z!(&sT3U)^FnrYG!#;gBZ`^_Z<0sWJ&S>@Hsu7Tk8^X_{a%M`jF;u>YF(FHHJc2oh0 zCKel2v#I$2X{DvU(~4LOh2v>j2Xe;3HTXg=s0NtFZza~+n-7yf_Q!`u;xJdWv^ZCs z!P$m@^bt8JXw_{s+8auh)^WHixC~Wo;hbHP2sE9o2cgtDkynBJuYen`wu7TW;n|ogAUY(#u zzQNve+T?peslM9f5pl~@VCyU!#6gx8?=gObgMF z1DhoQc<}~EgD6LGGth6C=C6}HswI_{bk0?$uo*r;j9w#p+SagbWo+nZFztU^ z2hC@79Pk-z$Rr)y@88#~IsnbLtg=E~-|%+Ya6WnEwlbPPxu|NTeXWhD^eW_+Ul-GO zXLIF}o*S{$#I*?r74A~yS7jXWJEZQKOrtGI-5d%vf}S@Fhe3T9^v3)LNn_Ws`+P)hF6Z z8nT|2{PMX9)T!AQ?*FBan$(seNG(v4QM^yq^H|l5QhJ446o}QOAxeiz1;5Z#Or^|$ z%)>PYTnosxkqGTLn@Ht0u6CE^(9K{DvYh&@NT2A?4;>tRKaN zz`sX%`u2}xD8S}iS0?M7a-T8xpiRNIfJLI$7}U})<_la{F!hL4Cdgwj-J(I9nLc}= z6J`9p?_ClKa`8+kV& z>i652WfFl42;RKPHlc`e1a0FKN=>hhi6fNbb0qeUDHPy#=Hd689WL6H zfroR8!=yadME)H&d(L}0pzY5{`Dn5+qw3P{b9uXg_U?)gl76bKxha3Xu0v`s=V9YN zRjuPqqfZt74Jkyv_k$D|u_{$@A9!BktO{Imj{fSUiU}xXuOG(#(rhjX)G??(5K$jDS5e}%j=5m~C}!L_;Jkb&}5oi&B$kMSQG2Co3GTF&ot zY2w5Y()ecWJy%ey7%G@^YA>pM>4;&%DfdB{3w#iIoQwC+&C0RM1fL`*>#Hn~ML>sT zsNMCnMLcQ5u17}_RS!eI>-Iq#pFhw=rCJ>+@XQ;Bn{RkL&vgSRy1Oz342Fs3seMM}ha@v(M zpnVonJdY2qG}_LqtZP{<^`aO<60r5uft;mgtonA^aV{dZM@O0B6(wffK|!d*r%Zxx zVim8@F2gpG&TBlZSN6pL8C^6+3rw|iY8jfgNXAb;aIRfr6IbI}DD&4(+3!$@I6`bmZ z(2C0|<8Z)_lg|&)kXqnO4LS8(rY0meEuCNx<0S+C<4jQgFpt;hBLVQrzcHDNPE|zF zA5E*WHkhEMV#vnvm2NZ@r=`GKU-i!}GqQGF@n6EPJXbiEq%y%swV+mF2Ge|PL_W4h zPVSAq)gO7&WRu#MZ%CZduM(@E>ivLoWGOzMO3=Wse>cBYv+Nk6r%UDqNYF@<^8+hr zqpJE43VrwRRRE?8zv~HfZ#C3)>ma(29$zNDh6)1=A*&lDDNb=q?Il?#&?^i^E)K+( z3at3(U7ABu?9VC;used}WIRUo59N0=#%)Or89yN-@AnytQZWwlZ);}o6F*S2I)DV0 zguG&jmawySDOdVfI%9qXUh3CPyni!XNX~}JqBtlTS@Jr$p7@O!QM>vzw4NRR@tS#R z4+{7o*62wswygVJG3@1-GT3h_b%D~iUZDl8adtg8AnHqWLM_&?ufzUfe2cbQ{^J~B z077CT>48wdflWR{c=}=Xula6nU_TEZ=T0DCi;%zOwhfN~7EgmnD1oHAb|FF$s5c49^vogiOa_!jgt>f}uXFRS zsiqMJW9DQ7wOOfiiQO1o`ntrz7aiKTl?YIC&MJ8BFr|NVQ5IlVDO#2{XK+Dv=29j3 zvIv~Gp9n=&%|daxnLSF_nWk>$UX{ZNq8iWvZzgkG&4Kg?h?tF9NxD-oW^~JDJF%>6 z1A%E{)hgdgx*%3b%0T1_nc2G^+qYZ7t!_-80GAQW7XacwbY8~z66gE7S)vva0r#@Z zU4?~KLCY~zu$YZm@ZyCgl6>fFJM8@bK{Nz3kI_2$ut@&RF3RMao4MTGf!`O$*0f1o z-VeOVYIdmqh#mQZl~-wxFvb3v}}$&En~@iVm4U~*hi@sxn`?UzcS--;aA17pZf;*C$1W?eogQ~ZiUtavx6 zZqkXHiP~#!5s|Or3Ot1u$LSf3oL~nphVt<1(wDC{pxc_Tq}tjWH&uZE*Uh=(*fy>r zAWH9d6m?QH{M6(N09(`7S0_gcNmZ^l{B+4MHn~P*u2CwZ!vj=%w`=C$Y1Nd?mh-}E z!XgJ6Rgm)_O7B()M9E`<#P*cKXiQ^LCk5y$JQpr)z2j>biv1YWV%-syk1WHnb&k<(h;{b2G2P4_!zE_Ce(`pAxsGKMjTCSOQ+3gRmZ%a^%LqR_bsM&DY8F9J_Kq zud(uQl@Z_QAU{5{8=#-Xxhnzjp#uf*h@~wchstR#>naI7`1~W0OwPnKk)EiT$*gs^ zlVDE{Lap3vo5Tii9=I3YI5nJO+adwlcM=8%F4%=%op*Rk(*v&vxW|h}3YcV#_5BcX zSP7XWKOU`O&*k(YJI_dSwXU34^+-is<|o5Nxe*xop~t(#Bw(pay`%topkiCCvsP`j z(qNYoL7-YG%cdnvxCb>QkeNdB+|pZa$SBt`0v@KL)FUgI3*17aRCuLaOnw6R5M5o% zV<~Z9kf(H}{gJM(Gj_+A6!(E=0x+BkzXp;RD8be-_5PWY9=k(&!5gw@@2~@v+t89s zA!#BX1D=R7EL>z4<~~52d!@17#)Jj7GiN`4@iaq5L%xPw17>yZ2zG$CPn*KRPouRi zQCI4EFQ|t3dpb0c=5lL26D~I+uf=cXqD7YsTCm^9%AkR)dA`r|-Ozad>3xz}my7YB zIYDT8N?x!`E0ZoBz<*P^L_y0T%YCu*@XSM$<)Gj+%bG@w($F$qEfdm!pP+QJ=P zQO`AWD8)c?vQB{s5pCUyI?xC52nv+dL(cHNr+IF**|8JfEBWxZjHLW$@*&ptcOewG zR;;=Ujx5U_caKArTrx>9E@6zpEnnhgOBY2A%v~BpY%6JVx+6%D_m`CG;BK))W{7W| zaY|c1aGQe(NHSp;`?gQQj&}U@r#mvS5r4oM6eZQBQhYI)Q((%%SC&-U52{nLm7#sP zv$X$2`*|xdE0*h)OAg~1N~=Y6#d(=N8wkqaGyvTO!YU0pA7%K9Yy!p#pb%yhDxj7Z zQfWaY7HG7N^8|z;@p9B_e*7_S6N1f}%4x5XA35CBBYhkuHe6GFIG&iD?k+50imZITn71Uf-cgqG2M0AK%B#IR zeXp849&IW=gxW<-YX@IuT*ngp``T$UUnLTq#6E@arH#4jzR_=jCQU^u+2QX{mCybV zc4c7JY>Jw1o@H!CvunSAC#1_2ix!&_5mn3qf{x#oT4UARFl+pBb`(($1CVyr=MTKb zv2td9eX7{?$*T$gd&_oC@+0MqWpzCm??0+qJ4Qp~X8{-=t^j7hj}l57>&BOYEa6d% zne(2Lf-Nb-(q~vN?bnLEZCXT+n^F#?TmFd>n%=TU3t9sO)wV}dG($y$mQxzJUW8o+ zR|@IUilPs~mf3T(|8)kX8#A^93Nzonz!iYs(;DgAI`k6Oz>lv>8=FjA6%jg`s~Fm$ ze0E@hRfuyft_OgeQl&#?;mdSZe%KSM$<38B+XLPIi_P1l&GHYMiHz}io)&yD;)CFH zcOrC%S~~#CeE>L(YDhnAYpH0*CBh)(@f-CXcfz;Mm)KQmT9 z3r4{|n44~35^yFS7#Y9|f}V5f!I)DIUhs-nawj{)CCr3gi4P1yC;wyztDg9)b{HDq zDYJ(shiSWLc1*#dW8h)>3?8ayUC%VJky)jrOZ=D@@!5gFo*aCJX8l<{R*QT_!U3Uz z$&FhP6^sMX7oQ9I!RE>kj&5oEEu+D2ixcOtisPHLv!H7ivBRM{mg(07PR0Nm!|iW_ zrll?_q}8Zu>%cBYW=H5;LB3Mzy7`)xKA#%oFWe63AjEX=)l(Djbxe(g{2Qswv5miW zRo??|Tt5=+Lwh4h6tmVh|(p{-ng$$<``IBJoNhUinQXJ8SS#OG^gtjbun&Ah5A%t5dpD@%X5;c z{RMoV>IK{f*)e|K7!x?{Jyu*{A`c~7u0)bb{t3Kys$p(^rBT^|po*bW<$MnAB?cT0 zosgE#Bh-@zH95FkI&Ao?x+_UT=zFMr1NFxEm3PB4J*tym+Dy!^ML{}Y-FiRSH$R$i zRn6xuI%LFf;W2Q^EuP6O{d6GjX$#eCr&rAx68SO^`Gx5BJ37tZcoKwbMKTVkpe{h< z&y(Di!9ayOetHXhk6~EKUT2v?@>|~KCJ_XEAD54q zaFY8%b_aoTY>%ESk@DJd&~D1?Va9#jTaj0oo-~8Kw^_OSy5347j+poC-5#0y5^$}>PLDxF=EL|j7kEbuJv7CA_T(E zpSW8oPm3se(AIZc$YFVJgdG59nn7r(>f;wAEGd251UUgeDUyysS6rpSgx^?NZB=91 zl`@fl6fcrIUY=St)kCRvJl2=&$Vzw9!)j`jHABvdE{g`j9+J4y3ZsSJkkmO;rAwTC zd(Zlyd-EVm4lDJX24^ePd(29&qa~m~V&sG|$`0!4o{?z*@s3w;HUBi0#vh-G9wdzd z>N(|n>hjl{a%)xe8LLIBghTkUO28Jl;84#($VVxIfx^RMiB~b3CkDXflB&sp+!u@) zHEMKUE?GTA2^;5k9}92hGwIEl$FscZ#$>(-KgOg)r5XKhFQ?a19eqnb!~)Ky*@3(c zJ0|KTH`vL5XH7vs@F8J;T#S$-6FX+7S+DW-O<@3c98V^6ruq~ltIgCdYI&(=IPxg+*V+PpeAT=)O=us?@awNP_Xp_o869~?-8z(D z<Wcm4WpxIDf%hGcrC^nOmyR+Ed3c)t~OOEVgqV<@H^P%#a@L{(Kv z9-7E|Xz0n+J~^vG0$Z<;5fc#yM}`7Jqg!4pyhzn@v`{MS73i}?Nip@Z#=H@w(1m}t z&MGzFq69nm+))W@hPuYkhuR4dEXZKc{qgP*en^=WwCnK^`@>g~Oo`vN3ZJlu-OCUN z3hC%Z%cKV(%70?DwJ{baIKCT>$0#5Y8>0;SEns&)Mz6AsfwmL^aSMWO;*Qmt##L9p zpur=++r&<52*&q>!rI^fmM}R07y_O+_Sm!$s%Uv3kGaJMlz{UNwnTaMzM~Yrf6ALD zQs-G8xjgu@u{7=~djLk!=HJT`4m^oA(miv+FKhRt`kNZwIRk5bWvYmJuVo7w-<6&< zoZsS4F-*4eEZJvOijkyjWPcE(NnQbPs@@JJcFM$Jt&7*zqMvAAA0_y`prw`LIgo?$ zT>j13^QAw33(-Fh6pW0sq4Ohubt3cz{gVPE3+II!zvUVYx$3mg5G$ySg;DcW^gDwg zcFKa-5>F?hsJh4}#t?@jMVNsFY-1KRCZK*tOa@T8d}XBas4|KTj|5O_WsL1QG1V1| z*%BX8Cz|77Rd$9Av{|3!ugpR9*c=8Ub+UuXrB~sFEgJ`a|$LMqb!;Zn1r;Fgc(lhBE^1~N(pcwflTxqQroT(?1TNLj>_ z3KB*2GxXTS%gUKmP%;ENcW*B(^)D|;G@0;YgpLV?0HyhVh|F}=RYUuex>+czSk&M% z@p+_7w2g&am^B^qmjl*Ta4!5g07Z9%(w+?%W8ese>n4&d6! zE{f9k*-kiOBvRFfy^u{t5_9^> z{yRK5EGVqT23?f{s2}nrMjC-CBUcLyma$s89<`zoLQL~YLmvg$4q8};!JI=t{c(*8 zztt4b{c_xhTpzGHRhEvr9ZO?n2}7BU{I_iJ?Hiyc4|B#Ak3kWx!j1CXwp zR!%5iXPA>jHMx@-TnBkHpQd(-#ZqGpB*3y3h2H-c{Y4P`M`&yvxm^ z0rvhjdN?3F^l)GAKHbnM=J3<{9DFE zTuxvYT>GVxy`xyMsBDq6r0*RHN@KVV^1}1e_1pJR+;xSD_jyv^9==04{5nKM6X0MU zek00RW9A<3DzE=CU!Mc14^GCyn%Ad&<6teN3@?trar4;7*vqp3!rx&rY*>T2kNW{v z{>*1Yg-lQFNE=KUx4kj7v`0O@asO3(^fso;YwO3*bL7VnN)5+a$?oHEu-w;M<%$E| zUsA<5IzH-g>6!ukKKv~(3p^I%Y!ePbYo+wvXg<6e7_lf~m#H?{QaMx`kG{s1$6X#x zyUR|-@*8`iLB9`=|Fg-wG83h`8Pai(TtfKop4#dm?5Y@g-Hb}mBX4|!ZCr5Py63-o z7V&*?L8+Cycy ziya0hj)2u& zl#56~uw%hRhpwp3>Um8J(6)-@^;IjpIArY6Jui`JB{5^>)2fF=`JG0HQ8MzTUeoloRIVyIx z1cHR|x%o-gSzXr+R)T|{6)><RPEs`je?o{>` zvaGUQJDo9tRdQc@Y#OS)D9(c{jNcCg@JscC=dy+~;z8Q92fL2pUtD*fys?M%pSd$T zYU}PDOxJR)y-u0fx@Mqs^Y@JzJmI|9~u$H^x(!h z1dn(;tkv^a{zQTITf_HfZJG-%sU?d2ea0_HGQw9Vg09-j`oa7f#G+6*p?{^1yvF!= zlm%^&*GxMG;b3k?3Eb5SnMkR%?=BwwDu4s;E&=f~2xDSnbpfa^gIV$;Z4km(YgfA`NS~sR{k;=SFPrvz(t6;*#B{@3YEax?xsbnX!LlVo zs9+KS;b`ZZK9niy#@ULCoG!#X#8Ms2>58Pr1%r@f72iv+!u79QsGQTf1EkrX_kUEWRQ`J9Lp9?dE@Na+)G>A8ztM zt80z&t=dPuhTtsz;rZb5i0rC$%wz0^+5yhO_(}guu&Ba?00%n>*MFV&2K)(`-AKMV zu=!~voyqRor-7%nA%$%|o2rG8I{AMg+v^I0{ALU9ao;E%0DM4dYJlCp6p#Uph2YTIT+B zsJ|!i5O@feA{IBE?P~eEdcOnT*1BMmYCvH0!QJyOg!t4D-Bo+K@A7>Al1f{$;{9eo z&F(e1?K7ueo^85I0B-Wo`V0h6)>)eO0mD|ZG<)AsbTMhCm2rS{n*|0bv<_YzMdlad zd9CMl5?lO;R5^gKcxw%>;jdSZDBG&h@@&1Wk_4}8KJMiVhkf9q3=sn8n$PWz5wfn3 zBy?B?gs($zR^64Q=VpYfQ|IkMH^&2!`9!Uzh!tdAA%NJypVq~~y?K&@3dMHiR-n|s zSeQ^4$;ZB3XXntig$b@^V zyea*vR(eGbwcMW?011#TQ!`Atk(`VJm2-4pK$?t*6pZS8oZkK{vSzGT<`eywD~X z3+&?BK6+z;j>L4LDwoQk9p5f?sRtC+Xh63Q{*y@O?-PMU_}o%Kwzn-tMq7YB=!J2u zn;ooP6dgIo0T6km_-BMGNdL)RY43&Bjuy493d&}0%MM*zs61&kP%{MDK^x5QYcXbf z7S?oJ-ww_UK7>Lpi+?^!<#q-#f@ zu|#9aG20XvMyO;aHddi1yG`x6?#N(ag|I6KN4Zurz9QwEloGdRj!7O0FqD!64!Io# z$8i_9(67jVI)mKoXo|lllkj2X)%xdY=h5BNXd-%RDy`b+GNv&9N}w?3@g?0K|1bo( zpIo)~W70Ev3@jXpV7hWA)uF_(rpcF1RH)2FxniwLPZjp4=DcZzX=x+hUv4{fpcMBT z%UUN`H;(>F)&qF~)!ELh5Rti+c9h_QhOO@?{x1|iz;=N&m!p8Xv%z)3^rHd-2_J(Q zqTk>+3;ae;?nBH5|B0wAk4yPI#R$_hc{n6NtNhc{&*LYd=eJSUwd~51UqSI{ZLGHx zSa{)K(I|g|DOKX`d{(`LNaQ=~U21<=`d>^%IhHi}nLXY6Q&|Ko*UdCG6YgBn)m?(h zQ=;lFl|5v=A2D^X(HVLjLnOODJ7g^@nCp%3z0LB%u7o!RZ-yWnD-sU=HV#u)@$ii~ zS4idfqb&BCZHesLe~xZFeP1pLFN(mLC%(WS`hzTV@T-UWomB2SY#7Fa3kl7gD=R)* zMM`!WdX;y*K_bmgX{jOr{Cg{GDDESvEP0@1g(p>0!M7`s*$0>_6kbPb5;5tJqLu%t zaFC7pDkufEhbk3vip2B0KY>5DfTZG~b1OC=v)2%;zqsFI3P$M?bBeAOQLyEX*|W!$ zmXi=u^45UWF@h>6JUZh_;xhs|Y2YI8Fz}wg)Mt8aW=8qAGveF`(?u{TH7)}3N1Dw1`yL49D&j=J^xQyGS>%{;3O|4SjqygHJKULq=WhbIfrar;KJo= z-Xc&}AY1mib4>>c?!!kqGi9NwMl+mCk>z}Qz?%ipB9x~Fa*6Q=)3JRebny){qS3u4 zJN+2S+@A@tz6$4s)UIp4e9chHE0W;~H%j%XoDm=H_j+*GCW+a=T=?vwvV}>;v$H;o z;t&_|fNkjIG~Yx-!qltUtbeA~RH5MqM=#O;zsQ(~_AA+f9%9Mn?_AY^aUN^phAd? z{KwS<$r(aSwjnB*4fEoincnsOM#b~%sis*m0R4-y*=@~WaHydOdO7_4(s%EC7^KKYW|mnXhd*np951G` zF=&pgMbun;aT9QH5q9<#GGXd+g1|U3R?9YU4cA>En|95Hba8$&!G(xfFZy%#h>Cg_ z=wT-TPQRaoP#~g=-?W!W9{hpvlqV8^TWx50j-|LI=K!uW9-;3ycalDF%6*BIgeI2_ zCZ{m|aKQ9|0jC|gD6WK2EJxzK(!;jc3qo41X|sFz zc{?f$UJ31EGSM&(lZc7X_8>Xaf$oB)S*j^qA~f^NXjJazlgF!s@vOY4kl1vywuJ%$ zRA5V&Pl!IzOnW6r2NQMcsp}#dsL6!^sm9uVMGJ zJvFrWm__r!Jx!P1d;&?D#kN3*#MQp0Y|RVt=r02-&THh*1NM*pF1&bv6Hjg1A#|=m zgP5`NC;(SiK`dB%Fd-KlQb8tCg7Z*6p9DyQq|9ucg(bjIU9{G3sdhaB_1a{iRgNiv z-|c|ua-BEX19TK{X7UNwwoUW%6(#I|J&-_fxf*P54xx7s3 zwX@lPdNGmMIl`r|t6-~+ER~k}M6NyFCUwVgE)p9+HbVpmX8ToUN~hMjSH9tFpzkM# zj4gD)3m5`>xDJ|vrQ*5d@6BCy2tP%;V*9*MPkSR;_#K=F!~l%G@`ul`-T9-{Nu@AJ z{GU26pC*Za9ZTLw|Gv`P2>j!hoh^__w4=H=?q-g$fI4gUT=1PRD0)>~k{&Q_PuKN7 z{#m_KC_r}ZwA>|n&Z;yx`o{2*b1*L?DgA?yt*n9v`Q_(%a5{uel))uou-;cc>G9iW;#~}S$TaX6ni!)nAeVi z&(y!SLH8j*c5OEzotTF5%Cfc}Wh}?|ToTG0ci-^QbTH~9Omp7UP}esa+>Usy-M+Kl zWc1(cRp7LT{p+*%s-ymQM56PBsMQDibAcgGg0hOFv!8@k{h1AYvKKCL%rI#}*5Fk> z3qo%HXl^1-n+i?jZ+e*4hQFyrggDsm&F)NTum1Rv2RcQX#r22!nr^+n>{02Hy1V#t zM4dXeoCH5s23g*yZ)_yWagKOs|I$Y3V|^2Q7a}f7BUV#DudN0aOWyp z=XX5@GcSg8eLD=b_v$SjZ8dExbUYx_?3W?C$~OO+9)A8I`B}1vItq*e)-9+%01(Gu zgyDI-XGSB4P)|KgYKh@rJP*@dD&iwh3(FS|f2)@aE;lj$Ro*X*2kH5Gz&;=a&Qi?# zHuZEk#9)J3Am&e-n;G!lTFjftZL*S6e}Iw6a--n7B9)a)_;gE@D*p0skql#bwk`8* zLP1{lOWbw}J8;ocrmUSyW{j@zP%_vy7aVPQD6qfW{^_BL z%6LW7ibY!H%(%`E^`VGoBg6w#EPsq!k@X>3NTg3;>mOMdn=!ZFJqdca@LYT{&sH@8Wo)%^ozd^_i%|^5OLr&VyNrFVZMNPw3*N{SXZ#-sEPJL3XTT91w{- zb989cV_43psufB+Z;61X8@`srJij=NOa>BUpsLL|2$Y92}x zioLbdTb<~Kn>-+>xWdf6t-4$b%o9lrgN;lF^rmu0M?6xM)G8&UqM%+}a6I83k*SI8 ztmu^?)Dr2CV5YU*TYygXymYuh{^2yoS4}Uqx6#5 zHXqClTfPf!9d$SgH1q+#t4&p{L_iuURt~T-#qux(m}BFN;U*FM+8^p4+jlMUZX@jX zN}HPE+L_EdQ$bUVM>XzR&PPlpnL8LnA)x&0R%R(7(b)8fF0o{$XSet;?A#hl2lRQ% zv?EgP{-N5B7OA{xu$`72&ReD=uPO1sIW9&HR*fgwa5d1d4qgbkRXfS;i5k}6j6Za- z%(v=yKb+meAkE|Q!I`-Gja_S06W1D50WF~gi?pCr7;6LJmCWRw6h(+wtK2971qr$s z!{mV^1Ih3bu%L(*h-iR`S!QZ`8%27{8Yh#u^svD+|TUyS*5Y{sk`*WW`p{3B^f!N==BXX@n2Mw zkRPTk$Q#-n+CG$AoP6D~yWc-kBWqj3eB$n&=`Ov?pHUb?deHB6&9Zx2W7|6=@IX6I(ZKn={De-p z|Ky}CX4f}4>&6{DPVGMt+8id6%wbskl>W58v2DI)Rv%592_9K~k0r|1_iWOp z_$43d4_=exRut(v|3O=A$i}V^2M4s{n>`Wi7ypGn zjz#s5wlaV8KKNXFI@8t8=j@=}myTwxdHbZA=9%G79U=4YSs4X?xMx`99~XOh_VX5$ z=0AM&*R%NV!sf)bHUDt=VeHD3g0Vc#m6~J8;_JzZgxU9<#vg}E%hrEue{?79hqp$X zt9Okw&3`)7)nfHZJNpK5e-S^urK2xD$7Pu=VF`VMl-knD{3AT|xvhvV%c+nDB^~&;+Y+di2*xdWxbs-wMHPz9AdW!0r%oF*zRyM5?6%2K+YvJ#)aV{dah1GOs zok^{W-W9WL_Tg%$kG6DquIYIGQM*~`3VC{0XW-RenLJlJ7YFl;HlJI@Qa0FbBM;QF zclXvdY(;w;8^@sY$7TkMR$JI*o;By(@pITRGK+($-u|XN%f&4r!rm<3*?Oenu-x5O z_VwJtA=LvPH}+t<*LM4U-UK0$!CJbtJhL~%@RM9j-KbM z6qcKoeEnB<+M>Psm)D|uNxv7xU!{lIzZl=X*CTJfqi~(0veb@!vgz~G3)b#V!$lP3 zcgweopQav)@2l*-xw>)F$+`cwdlDWS?K66*X~&bjMRF3Niqv3g1XdDX=;kOA7XSfb z1OR}^2HEsUoM*zly#ZPPqLN{900MDLL|D>RD55b$jgQeVhAc-UXe2^fx|C!Jop@lQ z?ow0>IM@HltE>&>*?phby3UkP2HUciR(9Kb`&HnFmB^xxttL`g440i$&pkulwc)%5 zCQVz-D07B`y)H*oU7uMo>}2Pc8}g^FeulEMzGpmGa5s->U2}=qf0fH{S!XdQ^Au`A zBSx#fcPh1=v9fnZTg0vR*u918%f^OY+E@=rFX-xKYh8{f-+v+I0A=3wO023o>6f;X zjeAb$7u%&=(d!nlgy(`B_zC*5i>DU2OYWH;d==txb6-!T*naaCCvkO^=bslWMu(Tx z&)fL!1Am`+&P6tvwfQ9rhXacq=Yh7RJCkIt$z!bz1MRD@o}>OhwNehua~$O1|Bb($ zllXW|&q(KIzV**!?@y4gWKE1*xLxJ8HsDc7?u*VP84gQ)_%fkFyeoI&d~Qkk_;ZVG zD@r}yY>$C+Q-2r`bFw=SG^IVkNihiC3RHzHM>G<(A_ha%#xCKI5LV)gLWOd3oJNEy zVU-Z&1sGSOVVMSC{$uIqhb97Izzbq?0WUfi!u!pD03MwQY&6(>iowweiLXi)jR2ql z@zqEW6-JmCrbKpPPBL$ryw$@ftV=Xu;?@m4Y}<0C50C z#Gx|r@Mg!?!ZJi-AY!#5lGq!Ya4~gDfX3l0KvM(Ir!XnN?*LpT07U{T)F=Q^xl}Gd zkz-g4pGGraGBq3{S4cEeR4sFJGo)FoMkC`S___bTy{2rRF(tcEu>u%Z6e=@@CTanQ z8mHErs79nD5Wsa0{__BA7K_0Gq<|4)K@2)xH}L=}69(d>uLgtFn1Nk99tR|m$^L6rkp2dx C_iM8N diff --git a/scripts/proto/examples/viz/rnn_2.pdf b/scripts/proto/examples/viz/rnn_2.pdf deleted file mode 100644 index 7509de3d9dfdd478ef26579bd7f3c6fccda7fb81..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53587 zcmZs?W3VVem#(?J&$eybwr$(CZQHhO+xFSEZCi7`?%OwdCZ?hyv+}LA>dlq^@`)@G zd0|l+23lrFl9ub*Pe>+wdVD)WOGs{Rd^%|pTQg^KeCB_WA|yUOKAotAwX=!izuwxw z*+kgH$j;aVl9w0K$=T7wzy{Jidq!I(?r1cj=TYqr4mu|xFQGpl4h9_za}@L9 zZF6_*7vq;>eB96PbL=fFZ|g12?{)6?o9yeBF9^%8-|v^}>uq&j#`cRd9P8K08j0u8 zj>h53C7ImCE4-;Mwh#KzB`!Va*Kh%EjN4Q?w4$)C=r!`uRhegWNwHOoead5n>X*@) z^pCjFne0H(1^cYQ+Y9`uP2}-L6h(V`xY9L4p{3r} z$@thY!$wM)Rk?xmjuQ;`f;HQ$qr;hx){x`BWRbBeoU*-yOhWAmw6L8Pid3s##&!x- zm;OL~On!Zp#p>Ji0-+R&m^|X)dnM}OnQD1xk1t04>Mwsj0#1KI*@2EiMqdxypSwr0 zSBTS5IzBi)U)O_fD7ZXg-`ro}G;Q0k5sIIhw|L&U-S4HJrI%AXIKS_QA$$kN*?06! zn>0Cs{l4^4?C!|ZZ3i(Q`0uM+T=vt|FgacaM&Gy3p!T^NGwJH=^xv_FX|6|SkM=j* zU1Hp*CXz1%IT#2Mh!HaEbmZ@lKS%;q0f|EyRDn?uXdl_T$T{OeNeR=8?;Ahd>v;EJ zg%Jo%V-GNe3Hot3c*AZ5e?Sym57cw9Uid&YeZ!N$KMc?YWD1DUYe6~htKjN1NK4XgdAmnBRU*{9-DP|s56pg}EsWBMFTgHXPAXQq->d?C z*scJsQLfIo*Pw2m9Q4)d&%1(j<^pvVVf5)d_3G0c$?7j*uOK-K2kiQ50dH2YR$7$3 zpOi|N!lqp6o#4uL7*iu)4_wNr#N*&*aDyd+KIrF>>f}$^N*MPeTtnq|VYvJ~Ne_8p zp8u5iVlOewe;4S^3G#$t<>x;56yz@C*VJ`V%sKA z$;(}k*+G}>zpeLHh^MGklMTa7U1QZkJb%}VDQTDNl(WlJil^xERTI|K3G|%I%$NgfAgAsNAon$O>gPID?prvNw z?`N9Yk_B1sPFJ(mZ4{5y&?xgrhPlk1tF3nG`3ZNa3_@Z2jAa> zqLQ~LTsKZ z`c!;dH5WeaE3tb{WctyICEBI{{5!y7IAD0cdbrDdop)$iWk+J{_ z%@h;=w{5Y6?2&t>jk^Zty08DN@LWDe4 z4X%7wotOIir_Go6qr2NK^&3uTs}3M=se0+qb+tkQ%Hwfem-^ZQjNDzYF|&#Lo7&)2 zQagxDyG`1@Lc}$_LIx7*{a0kwu+3Pm$yOo}Z0vCNnY@3!{xn7JMtqWsy@ls+EiyNz z-0jI5CZF(1qNniorN5FB92{QpP@cJgK!v`_7Y(RrO(ms8qOm?Jx#8g>-I3RdP%2D< zC!@VU!4-yr7Yo|s*i%e*-5s9~O9cYPXrR2aG12MooZbj)o`BXjy<1wS-e&YNK6IiL zNMyedE^iM+)eAVH(K!Yqy3()R==O-W2z$@;RFW(@O!Vjvk=}1Q)a|bu0>WUMq2zxaYMxXHiCS3WWV(+KOdV&Kgjn_7S zlhV#*Lr{Uz6uQB0_8RLofC%#@SQfR^LGvhdgn)*V2Br4tcFIiCguvd>Slh@iIgOO9 z(igwDd~6~8aA=LyQ*}|L`$Gu!d!-%g4XA#W&U6;|9~1)2OLZJ6rjb>V7NbymZ|g#tyMDVk*|@t@ zg@J4%?}xZ9MBNcKBJW#ubS?s?Rg<{UprO2K>LPEYMd5`3t>UgFKzQhery#^5Huj;5 zUvW9RU;-vQ;ZxWxlU)#ppy*KpKp;{fFE2nht*e&&Nq7%i5?@%XM5Ytt zfrTs;!W#qyV^P&{ZoH-@K=)c$U=KHo334f}ZtZ%kxYv)y;eG>0iy}AK5yM9dqYi0^ z(82^ooC;U5fv1t9GF+?;w_~{W#KQ}rL^vA=P!(jP*`L-rH^FYQ%6iRwLbHVjY~h!+bqK^sIeR^fb=4p74hkuE!!swaOnHTC ziuYa-tn3>RWbR1{DfhO6ngB;F{F4aF+NBvzXlOPzC=TU0d12PzKGHr08|5#sD9+k& z8<}9gV5~2f%6Z7zfdZmNq~JQ-v&OP9H14~#U%FMvrY=GA16)&#`M^6ZIjKPMxjCJUJ?Rm(sRDC=|i2;O?$r2+`XQfPdAhZ}?bTFABLM)~&H& zN%2Wpp6>)VxWKAIPEv{8= zW)mQ@{@5Bn$+a#Bxx0ya5kOPltF5!MCPaB9SA9fTVb3;XjcT#u=AL9hT&1@h!tPLK z-<-N;iQnze96leeyh;7#xOLcX0?xYDF3#Fa?FPqgs9cl*u}L(YO(?UWp}AEcb`fFo#WdUDL_KaFIob^y*s6LUirYAh2ZiZ~17N`}G z0OpeAExVIlxLjYxJ$<89B)pg%rRL|XWR(Dd zI~!mNhoLrwTSPkzg7kzGV4P>Q!)Z$WPXaqP?=)&`iBz$sPnAUo$n?66JbTp3qrTjQCxKb%8E{j%!_x$^_B=VtaGwr{69nVa>1kFA4NYM)Ik${PCG z@dGo6G%GJRFLa**ajSYCNgJ#Q1lawKwZ|I_F%)?=mFztW!}3O&6uweuhmvxS7LQ;v zRU+Xn70Fps)4n&Z=CP)P8k%E55QOQHt~HkfbHVNlMJr*0R*k7MRp!;%tH8kKfapLwZ3*Vy$^)EdK1nbMItwlX7SlAL4J@-Hg{#!t zQ$DXmCWtzP>bk#xORXemOT`QFX7vgjHm7J^>TR3W3TV4CfxEs%@5N@NW7!OCR{_-4 zG?}WU;&0J}GE?Q3si4Xy6%C95qTJ0i5?w}-@6Z|0bYDg8|3-us(rKS-tsw#FOYj?! zl$r~+{0&LluK$t$5q$X~_MN!T&mb8)q^ikME7R4`m|BbsmC^yc68H(z*?dh!{ zck(0e>Yb`wV?u5t3~VQ3$U#Vu(AmZiHr{nDaI-P8G^a1UW@K8EovC_>k?+NJ-R?Uf zW_we!W%m>No%HG2UGM9CU@V~&#$x?p8Q?ZP)Kjv#g`9>>&9icgvg29<(KG#Czy)NW zgG#ycstP*hsC0=9PbIt_)mbIxqnl) znpsIojj>aIUiKDRco<}>Ue#Mg;pT4_&`4=``A$|b`q-J=_@xc;msW=|UqnJ1FSwr4NgW@je#x|Mu8dkg zo-&9NYCbBnRYiBwV^RuRIMV`xt!A3j5!T&=wqiOai2|cB6wBdmaXXNR?zdPub})54 zD7x5)*p2bRUTSmWU3sRE@rMns%NDQSmYoy}qrv~dZ?L5Uz(PuC14$nq#+5+Ewo@_O z#BWN#Pacz=|GVI(a2?9CQ3v*^FN@2&&~ZG;on7PK>u%mPbVaR&wx*7Wbm*3hk5?J`0tiZ z)XvuVe?^ojO7g+P5ym{;eYbz zWK4`L3CdL0% z3ERK%|A&mR|KExH|Bs8ge3Aj5b?sHY?UtM_R8Tsbr5&I8J#^IZnH7zaOu+H*~Awcun)1 zqL)uCZEP6{K}|RbjI@OE(`#aoaFBS2*!X9+HvXm%LYQAbzeP6p) zb7=tnwt=WKvg8L9b=1JT_9=>B`-92m1=#&-h?J+Z0R?AE0RWi%BSNwwQnFH@`lrXo z@9TuMz-|2_f@%n;Gs0_fx`H|?GLYEzcH00XRY+VYF)=s#h@`c(wE=`){TTf1_{E%* zumIluAiPC&{8d|BSNO5WaQ(;4)+Q&n{^(mC+nSyHp{HjB!~88NFx3Zb1_{s5%Gg|A z?*t$hDgV(rb}Eo|&f~MLHwJLXhxxnTRO|=+Rb^t>3R~Lmw=_6DFb$N??Bv*ZXsS>D zUM&4nc!nc$X5%jlotJ4`$T~{cy5d(eih%l3H?TX6<1d;w(#7;e$J5r_;L>vcwTq7A zgmu_Q3V_bF$xyzLzR3kl()Z1Tr*CEjPR~&92m-{-r?J*&p_Gs0ACr)g5ETLe&#2@#5V}WI=YklyK=|M2I#A+&w~S_?GHc(ZkFG6>WBCnqo0uuFnS^? z3m}wV(E)R{dy!|5Kf%{f9gq~%fyEz{@Y7TQFxuDq+q?Mg9N173lk&NH?g+NR4-ga{ z&=GXa_cb9t_Qpm*ZG8!LS8QZ>3i6l1i4iazolok}&=l1DHw}Kz@3Wtg96kMa?%_vB zaba{Awf}jJl6Ui!F9iTy`{&#i;8^db6zHpsHz>O%sRQ9|k^Q%|jFkxMu#Ab1PWrdX z<~QoM&Exm?{h5v?p)dJ|_2Rchkim?q%I_)hA&;@AcwWN_JsluL+X?OR37Q1-oDWhTQZ9j9Hc5jgj#aZ}TTfzJ&pd zQ$1r70L2<-=GQY0;4B>-4%6JZ2#Yc0q8exJa(=xuKwX440^YBuAp3= z7jZ*^g|M^}4y9U(srQG7V}bk>GH=iK;zVa!23vE4xv zJspDG)G`s*}N{se0!?T*o z7Yf*}f{mEd+f_YgPNy$rkdQz-`%cktFXQC1OM^d}+KevuiW!sIyP~&y)u08B9-6TP z$FC|BJ=mS-jWAB5-dbs@-m{2NA8Y-kY92BTwL%@Lu|D5gvPM3*7F9$#c#)1`jq~fq0jeO#CGU@lX8!I^DNBLA~f*x;C zCkYrVxvpGmuoDMee-L1|wPQ3)i%UhruBH{z1lyc(6k5h0M8{uc+`Oz(&J;wAT(N5m-xhBAeW9bHfteab`;jS< zjxUXZCj#9FlG!Z5gbspJ5Ci}}8AT#|c%0bZhH%c^k3)(n=xilbS&A(E=;O+k2>q+Rzng2UupCQ>FLyiZ%dW188-YbGN4Xv{bwZ?7#A^KGV_w$ZjR8GPAF|_>q>E z>m?#}iaN{8{R)C#?kQfW!vsRB5swowlsoa*^JF}wYMZ7Y3c9*MW8xV+8v)KkCaHv~ zD^C}fj*dQJvdssc@(KPyA^g|e(*mPdzSW!g1%g98Z$#tb%^YV7_O3>&btBwZAk^k< zRr+f~aj9W+8M=9G-m|>nwTG&%Nr8bi>N>6|AF`@c2>ZI6ysVAZ_dqeoug~)o+ zY*e9|R57qjY9i+0n1w}f%yO3!0SM2W2P=i55SVsjE>*M6^kHn zouS9e@R+%X8S|;o(>5a|S^Oj-b|trSO*WN!HqxuLW`+|lCm3;K$~qMe@%#DISV{wN zAh&?++wsOCVxeD6yVp5?I3>c^oDtc=>VL-_DAy2Ku19%t>=;X&Zub#}dVHl1_=Pz&7t;m(h z{N=ng$;Y|aMgHq>pVcWHVS)*4VwY8+FLKTFc!Lvn3zy!Qw>-?w1)~&Lr*74biJ;0ja|OwcD#o^PF@Te zTC2;g>iMAT#Gzo&35V9JsX@F3Mlu*=wh5p*NUkLvL(WKPMw zk#i%51jC`P-7Y>Q&>e8iq?H1(`nJ{K{APKiZR2ylGT)~#YEGw^3}*v$LZ?-g&O$_r zb5{p&8A0*#kVsDSe0b}^%IS}txsL{Y7_mAdxa?zK&w>9ht2RK$v zY%@J%Ox3VRgbsT7FXSN!7N_kLLnbm7_`BBrN7W!gx*GuH`Nuk^4*RrJ8c*p%3s24W z13qih2aX=Fne*k_lDd0tVz7A8`s(@%HwNk%;LWnNhlT|EN`5u#7a!9}XjZ8hboh~W zi_NiHNcZk5!_>3EzTe(_7JBX9#rStbO@W^Gzx^c->?9)E7=B)V4oB?XVxCW91#1Zc zu^T*_{ZAy-yMl2LNEiDxbd*|ceJ34bRj)zV>bfT+3`M@K0HBK4!Dpuk8?fs6R3G&Z zVD*n`$GP`?RQhyu^-88?EEnTH5*785K{c@B3;n-mrf$CG-NbNf4>1&8 zycmaRJQjS%$N4^0;VARm9;a}q)cVrfgE1QLqaQ)X#I7!X3zk%P1Wk#8B);uN)n-E0 zAwjDYVaA4_#^NbR*iYzaTdE>E|i=aufWj$#cj$Qdw3oT zkDoW)XA(r>C}a{-;b5awp+A_+AR0BT<$`3>Zu*nlg(+ipZA_0w+?p6U@pZRijUYy> z))vw|w|amE8%Ff^Jlz=V`bd=;5eP^~gP>z^qnjls`J)U(Ku1o7PN%_y3~<LCM~ca>BG2a9+A zeu8zzvFNxZV_E^>8+YiR9lMlc0vnG2UKTX`KWb#Ohc(s80No1jEnZ%Ns;rif>o}M! z7>&K+vHh8ET?3}V6_yr?1t{(Ix#YE|>Kq>wur!G1j}|eU(6bfJ!x*b6r_VhDBlnOX zceNql6!kno%b!8f(|_)F)`~$FS^IZ7#T4Fc zbf){>E<;bNP^#nEIw+d@P*o5wr0X*sfO4j?5hEjQJ(0MTikjjS{AHI-P@4mn4g;<| zx9N1nxLIA6o}4E{u7esAwuiP$RT^%cY_>`v)EO{%djBq2Lr;)uaH#Pcz>*T`o$+P*yAa^c2?%B_KKkN@d%mo~!35!|m&X>NUU@JT+ zOsd^G2=}fyF8nLr^>7TabV;a5UM?}JDgUgE8FlXr{SbTFH>ZL#xBAW;h7LGg7f7{E z=ro6H1s3YuB|Txp@bJ2glk);(_$=rM`iQlkkDccXzzFGw_}X{Jl=$Zmubj-hb6gVAb~cc- zrR$9rDJ7b@dIj+2T;a}?L1s2d=Ad30ru@@7+F_vLm+2K#sawe#JWnY#Nm`x5h4Rtt zH*`+vRc#lC8%y>j=OUQO(45Dsa?|DxHm?^2WS0Jr~%PK>F-><)fA{jgzcUREFaeJIb&zBWv12Tc^V=iM6%p~{G z7!{Z;Do7D-Vd%3oX7BeduPvk!3M>^om1*9SK+dQ$jzB7&E4BS87M#1zxr{uqd&YG| zB1aTv1P3XT%>`4Q2tlSr*AgaF!)u9US*5{%5TK3A4a3I7NKK$K&l@{URgysyJW$ZbSRT(SteVW*~jB5WZrO z8SQG{!bThnMygf3CmcS%y(;SQ6JeGKYg`u&iLNbmuhUMXuwVLpM#$CGR~8XU&H|U& ze@PW_S`7qGdS)3Yi?Y95Sk9R|sl&5HdoOpUY~AoSXlHan?*2gOt%~4?BlmusJeNSD zHD^Voj4k-ZVNH6RC&i9$Z>Ksf0B{05!I8zLc@(`}S4F~C3#;r6kNWXgIT0>d*>68o!#g|aXJWf=L$!u`S9 ze1f%$@Zvac8Hm6IJ?PC!gqN-?7#EKuJ>$>l2_1iFvn;Y zJC_&7(VkAwxs)QhJfhs9Pf%RU8BBAL_>t>c0hpvNZ>`!jHF*L@$7YfI|m#BH~2 zSNNtl{fe5zTa_dRF~RLkyIg=~M`Q`ftH(iqoloWBGTBV5{!q3oeyjxDU&z20vSAf> z5xzo!{J;%act)8T2!7#jg97R{s;yW5pG$q!dT3Z)c|IjaWrCF1(O;uzUgCor+HK-J z?d;$iBQ@2hLdxd85b%gSD$=PsF^mP)E`vGr2dw*aVlM!P9=0iCA#F)|4-pCdH^rjx zq&T)*;zd3~+p?&7J_H+^C>YhhnnXf+nYNW|Uf?!LqD3 zFM?Y}LKS)+M5E@sKZh0E z$>#?A^axz!^_Rgk+ZlBxE96#SVJpuar1)nf3hHYL-;zuKmV|U;~9O20bTXv?5 zV^A0GEk&%4lQ9UKVh0GEDOr`Ypy?t1&h1zp1zYYRaiDT8+V)ne?%7IV>$z-!BPbg9 zsBE{Epk0gjsb)OsQ|NU@zRf!13c-!IjyFpQ+ztI8_Zv-fJsb|>U_-mk#d~!Z6MZ!EgtRx4lzwxWZdRO(^70 z7u4SL9_h=o!1u-M#ZsJ;nzyglZrh$nlr%xt#g`9a2*&aIwXkrt&Z7S6_Y51?G7UAo ze7~>r*hC6dKpx4K9cbo=4$qhlgR`fa+?-!SkUm4}Z$4W|eHh3W=n^1VQBX200STf# zYqtWnHH<970!%L2@Ojxf)yR3JKn0(%KV%i+#cd zj`!hDwejt{8P7XXB%q+B7*&4{i1F?8JWZ9e$cj8cSrr(D9=fg)lVpOxRgd_p3aj;a zaV+u31V^D$lol|Ep1PhsQ!azdy6!66$l*_JIm>s-Qx! zs2<_>Bn_J|7be(nY9BAj918(BxE7vYFUO7vyd9O$CfZ&p_({x$2gHxS1J;;JZxTt2 z{F&VRez+3v6AqUbY?i(k17c$Lg{IPSNck)!X%Hi4Ag+7PbIQ7WhuXw~9mYenB>Gh2 zeo_{4?B&t*jEzlw(Z4Um&P?KMH$t4?7}O$Wwns3sLNqb1cqQFWCz{(0Li}b|EYXa< zNEgFnKwMF*jsroiXXx(=YDs_rUnNToTyD~-)T83|e3_&6OLrO@cPYC@XtS|(w+xC_ zAy=7NQ1A}pmT}*iUx0&0N<5KEEH%!ep_#GtG#C$Vzqfpt|76M9CJ|g`Tz;7FoHxaU>?IiNo#<&3&|^52gw?j2 z_)unQEIm8dZOL{BR9-#&Qy&s(E+jF#IGtsiQs)tDzO5mgrxdwnq!QkfVQ=~rGcojz zfQH|a#3%KrDjlSmc&rH`sL_VYxp1p^wirj0?hZbWGIR)~g~K>9#4X}(zVdwta&1I7 zfwn^2`dM=F2F$*mXCt}EbYm8EZX|R5YlWqyyG#ew;Oku*N0EW^oUC}n* zT7xnLU0WyeQ$Ut*A{)B23AGFRl0BR_3g+$Ng)fay1y=yNARB^keKwikud^h;U3XJ3 zN#C`XJKKs2*CmIYBm%l}4zkTF@g-X0LC^C;H)93`9|A9-LBYBj@b_+!j3y}ac-zbvdj)QyEAmgv1;j(4 zO37Ol_Nb8<_G;|GefN1|9TA1;X)YHT?Pace3>tSvs$w+>1@S{`^=KroQ@(oEU-X_2 zsSRW0b+XQyVS%4Zjm`5FZZuf)xZK=`8BnFJ2=Ov#F0*QNdqUFv+$79cFZlc_5o80}DuC@Z%NN5c>GCn0%O*!>qc2(Ciz0 zYEdXyd}8vb(_~-vuM=BY>57yw->UAdxT8PuWn?uH7QMSkGdkKdbvRGR)gpbxfppbCEZZyYLoJ{suL1c~{a!nwF6nvdtdy8cVDx;h%`!%c5nk7|~ zHY3=np(*wxrZ8`yJ=wK!6&BO3s|`Vc3{T{BlB{+I1`mO>T)2J41gJ2h^XRBRwT%u`Sf|s6OW5aHGpS z9||r%txVut3bCf`uPe^$9Fk7P*^D#jvbf9TkeBR3$YNe?X3~P-As#E`*}c*U%EKG* zE`g(wM*8(v+}*a(vPna91}_4?$fYCZm?J;2swlBQM+HOvG%xr?$SZ0LH?j}(%uO#n zgx%e|UO(i}LZaB;kZuT4tiVlIo3Z)&sboRu;cTp+CXDFY9;lffpfGpE(Ud%wuZhwk zyr2A)FB)sa{}fU-$uObd2pI9na1q6s8`J z8%(%j9 z`*cQ~q~Q@BRI9@()oJB&g6P`untfCMjdhvwGi@=$MoVB`>uC_C$nY#29lt;d0HBgu9I?~CgLjQ2J1-_xx9fumQ)xB?_1Ky zcipgGUVsL4z7Iltn6v#W6lp%?yOm0;Q;&&xq;*u+gSZuF;^CBIbb3seQE3bSJ=Sa7 zMEoVDHU_H-?=*EgV0!?OF{5`3tk*Xi|Zx4u}iiS%|7z?a5nid?@d5qv}bBYIEY@IA8vv>X9o;K6b8b@<`7y% zpwuuEC$2-A>oF3od=)Hu=#C*p!pK%R2kDM}1mJ?MyIDH{s-XDgywh^w7 z6NjFJbBjfS!kHOJgEb#DK5M8e(Md;XIrmfS+c>4Oi8$O#yybThYVo(LUKqa6-#3W5 z7B)B$ZxJE%duh@T2lwgwidn40Xm6;|T~#OErOJ{Jx+aF&n27Pou~u6qJI2_cM&ahtk(>E{Cm!tTwjNO%`1MtazQb*gY=nl9K`#L zx0P>O_>R#kDi+!N-oRYC73TO1noMW8C{Tsuc`*yx@#t<4Qn{c`#NYD0$vF zC*hiWFqAl2EJlbJs{&4MhyEfJHRA!^;h=zo!?Vtbn$3Z^JQ_r263k#lD`JPvjI~8= z+x$g}CEE+9(9=gh3?08OeU~Dp8LIUn3H1^ppJIb&$KaS=lHXaqkmHR;^1ajDYV^@# z484j^HNX9<<`oVGNa=?rL)PUq1fpQx&=v0gf13a5aQN&6`(5ZZ!HPF@H0J;A@@ z$p3-SRDY)f9Xqn{w z^(X^ROj00uQ24EFm)>@s#Ez2iN>Yac6*-PS;GUthX>rBnyOo<_Hd*PbNuH@sKnAAi zrdAk3!4#Fk2|2WZfsa=WL&y7X%Si()Qy8=j=<*3KkEDRV8vo1@)5F@5T{Xc|%4)BY z>9Hdt)rukEV5YFlks`^vqj_Ql9~t?fGW8ZsS2Nc_Dc>bm3f=}M(RR&5`XXTnKyT*7 zDeZD|EjEd&&a@J6!tSDBsDyes9epJ_ zhc_tLLj)4MOZxXC)i~7s0>a!#mUc}^u% z>mIg_x5b9-j~`Btpo@<6IK{jmvqJIZafa-b#}!9W!XvKop^5l7HBBm_b|oR*UUwBJ z-qkn`uR@q`5Y$*1lsua&4*Jx|FD_+-xKFx2ab;L8n44wl>i6lesA>aAPD>2ucm+Ev zm7Gd6z756x-}8%u#u@Di5B#%cLlV%BEfF^gj+%j6>LGgBEHp92uba-nY)B0i>ax zc+XdvNT>T$lM0za?sm4685f&fnBr1K1|8z$(CTs!ABnp}NP2>o>DttZWlr8)XKB`T zC4pYAqk~*mq;zbS6O4bl)m6ldTDgj{xYBDtQ^a<9*%ro0>8K4|kSB5)4UytdQkDcX zDBGQ7Rds~Ul;ZakuwK7W#Xq=7z6rMG?p%q=fS~MPGnu-*3XAPxfOTC9pVa>ZK(Ah* z#dVu4sLqgOq7}`W^U=|1UR7u{1!NrZ*e;&C?d&8mp_FEw&0Ccp28- zd9kLX8CwgdJ0ua+ZEC9@`kg-6PgO)sZIIw~y*FLTc5`e;XWJxL{g~4AK7qX3*eH|% z=nCEd>9ztOE+%(#GYp#oryqAo=s|t7q7l57xnj{Ec0Z9oLA&_7L^>`5L9q=#bKSNUi8N3|avMA-5M8_?gg>iWOh5dO1*& zQfNDmhAh(RZbE|d;bawf$DAiV?No z?nib3*W~(6e^N-3+4{6O1&_Si-M{|Tu|!cpB0ao`%7xKn-8~IF6I57xW`fdVpuGx-gD8My6D#^@AQRE)BMonz6Ik*YNaItmkG94mv&f|6*px6NGpERBCDCp6ztWy+}WF-AQH z$XpiArC!!qk5r;eM-*+WqG!wU9szUhmx$*JVlJXD4vi;en7PYzCQzZ*E<5iY5n~FL zSDbXfaZ2ZvE!)Ue?-$W#@VF(te>HQniUvlz7)>I@<@7`^;bv({e#Co+6BYIH9JP7_+$(E~QEG$7 z@#-fBN)A+==z{E&c9To*M+0>&YGkw*U=H~aYT|{8cLAxn&7Pt!1^#>60M+gfn*C%K zL^!y_LC+<^fM59$)qaLNGA!%Y&Io8PTOQc3ixdLC9DX6x$Xa93zXl5UhA+`a&N@j9 zU6b>dQd8`Bj8iV*!9EL;U{qyWgKE{m2+t!FdF=q>)0?egBI6N`!l$0|%iaqRZ}9yv8UO;e9vvwY^kC@NbvRnY7h&Wy_VNHZ#l^D^a^?VAz zp#eaLT{}%3KhNc!_k4U8Nx8_KKrhXA)5g5Gc6|qWM&}&N)^sf&Mr2{SKE>y7rrE}0Asiqp(}k7jT8@Vwi${q=7=#xR z&kAIdRJ3f$_PR<015>Qa{QiN=P8MEK%u&3vs|=f@Gsrd&hlJs8)Sh03u0<6Alox^; zC@P^4Cmepg4h|>LsO6Qq=bB37Ng?ZsyezF2@g@!ns_&W0%Czv?FuNN1u#zb38ltl6|pQi{n$uH6BO z91QyXXo3t2Z}$dA=tJrCy6{6MMjq-pp5`@u{Ri@K^ejII0l$obJ@f@w5zZ-_XPX8a zw$ksY$zuWAtrTUmF6fh%h`ej zJs#qgCtmrpm8d%Oc2)&K7Ul*Y^SfFFFsIcEh_qk*O^3hO4&3G=6Z6-67mEn$U>6Tl zUACG(StjTUv#s1bPj47oS^pwg4)?GeA%M>^>kUZ`k zvt$s}Nj3}Gf=u7=CgFzL!q@d02lySdu6;ZrdEnvUn4Ny|H4T3GydOkm`}|QZ;*QI!g* zecQUnAP+A(G6{i53B) zwMy_uYz7CBq~h8hqJ&(KFy$`uEB|rH**x{J!5o;OVUef-o^P58$^-#5#CsV6o=5&S zVs=nwLUa|AUX~6s^#$!l&9-1~8onSkIfP4m$bjf6w1tlrb$wq(1~lnLaN(>CsIdDA zdV*#SOaxiuz#m*!S+rcPg12JuW^L2fXK5H^mb|63ocPm`PQ>aA9SwIrev+&og;!D- z$#>!I?guGMR)l!i_y`#;x$a!eE(LOOd* zO7@Kr4Lod9G8~iyr)l|>!jpL$yle!uY#J0f_F@V0)#M3=?*_l>g8A#Rzq;`!xkUOi z^l5Z1_%wl7NEr**6J2ip8QiTGRsP046>|1{%2q^xMRyv-d^A1k2Kl+HA?)3BIRc*O zbllMN3T=0Knof*4_HhZUVX{DVMUp^`#*B($y2l_8tSZ7iCL;h55F%G5gxiT=D4u(u zg5@((G3aojJ~E8_i&|!FmrhcC7RL}@J;(%aQvgf!o6WgHm-fAu`-&2>vE5g}MDiI% zzMHebB92sU)s&cad}%|G@|G`(cjAKR8?7w~=2ap)lx_1UB=O130c1!ZS-SojOR@IP zB4yC;mMb^EGaBu)ui%-%$hV+EdRPjc=M4Dma)0(du#UBNc#1#?)dpc?>EIkvrc zKsPfJh7qJS_CIJ0rLepD-i+P68{2x%zRrU4vxSVYPHoeBz2wo8Pt3B*!Fl6E?heA# z+haYk3#KH>Li==(=^qxKf+z&DU}EVem-eer{{sDHwN?Hrn~n|L{Ny9A&RwW+M4%A4 zrZlQyZs*sYe9E@a_)nyAy?p;qJ!1Icg^X^qs z-*&lDJmb0;wKWc#cf9y0tz?cOpTr+uyrecNeF?dq2`tcnqH$V2u}d{8aPl~QR|?li znf%=0E^p;=xoF=bGnK=03$vISrcVT2v%~px&2QB5XTrJ2gCA3NAy|cTp7IjY<2670 z(Z|9~y3zp#tZ0>7e_?YtK2igImr#6tlC>6^5|`(Y^jFWzU)QqqkiCh}A1-zDelEVm zgqBR@40PbhBz?qENtPk88FgI8<;TATgK$OIyeZ@hWT0tHAnD0kXoYe#c!Q#GyNIrw3V5Jj~k`?+8{}dq2Z)t!@MvnnsSI`KNQyUD3pTNcI-P1`O$`_ z6TKQO%EX&7v$wU1&Sed$2GRVed(Al#6PBfHLa~C8$v`QMGKyxKplDH@fTBGY&;)A3 zsdrfnDD8{ugD+3;btoiM`QU4WLDX@OV9p|j1%TlAxq*CT_{s>0+XsOMhaYJ@_SlxL z+ENWUM(i3W)!_1>Ep9y*9Rd;PQaG>i{q$KWE?rx_W7YHv{0B3Jba434utL@eQaZ2zv@wN%i#h6euC zG=JVC^6*J$AR8z2)aefeZeUmw@~u?Yn6Uh~NVcMHR_wA(+EUJ@X-Rrs zQQZ&kq1dn+eQnL!y55W(to)9`SS$iS+z+c(;T+m4{2h9foP?Q!6r*CNf~E~<()EPq z9;>eNRku<^Di1qh+9Qq(<*o=#^X8%;B_8-8oFfALyaYpG?F>OGbsg3<_E~6<@>av+=<+WrYL~7>$uS^xuF{kd?$)kG8-)j+aH5cY*QE$Y0yVMi=o@b zETVO>`0n_mijGU-{d=3wvE8<)8|P8yp4=a795zYg7JzJE)d$*oxove~cMKS~+iV;d z!n!;S8Ktfx{9-ub!0>(0QzI5DgNmb>OuBv1S!!E{k>pa+S!l>7_6fOrDn7EXo$x5q zk>s$_1q#3GO4TB;dBfil5%eiVPgM*oElvMN%4y2GqRpkWncOq%K3NQJC6YMy%s^ex zffGOcnp6FIlCMz!9oG*=qpqbXsnOi*wiHGC58^w_vlX`+aD@qoFg7x`i%SI3t)EZd z@L{p9M#tE?O~K5^WK2DNUr+e6aH)3?;4sNB1vZ@5RbupRNzaH{gFF+hn``TaF5_&P zkbNGQZAYxtOIWqegVj|)g*H*=$|y4GkR8AX#9iv-9Yrs#yXHL~DG!YUP-K@U2nup7 zYmI|mK2rCzE<~=WjO9SEuYJowPf|S4AFgp^8E$AiJF%Rfe(HZ=wR0HC!CV*c7}hps za9tS$aHfv$ZQ^~@aM`N`BL5gk`nuu9-R*^c*u7F0P2A%+$Nb-u;`BwOxxM*GU4PQL;Y z#WczY-9cT%0}1L?@*887MYrr}hU};6bs*&Ej)9e_OmPa*4_Ua)iFUqOsllHk61MDK z7~`!uo%=Q2IFr>zKIl)i^{_5G?@h+i(kJ;TMigsLq;pGfqIAb}}E4 zub1@}42CIc;Q31e7_G&QRBqS+?P|Q$(pSjTkNrM#j;Z`n$y$X28cFJ%6l?9!^l#j!(rZcyf3q!=55L6XxQbz<9ngwx3UIED3^~~Q z85L-_iKoB;oE3@rC%zBv#a8vim)C+HYtEsmZsTn z%&tIH=>c%jHbhXIV!Xc(Qk&t88e0Q# z6-C;ae2qLeS5ahoS!d~;awG!xbMIMxv!K`XQv*n9=L`k`O{#GCO`1v?oHH3qY_yN? zl!)-nYUxJ}_mw_Ci17FrvrQ%JyFCc3qjuLHz7kQ>$=Z%BTd-Krd~kT*QuKY91FWUW z!-1`H)H$W-#jt^NK@<^L$7&^H<~~>Z-yThL^w7s~MxQ7c4G$DK8}kJh;w{nYeE zaO}+SdvVM|5V?4>Iwp6i)_=ETd@;y(^R%d~nh;zz+d44#ex0uEX22WnP#$Yg>Dr_f z^j!Hx{ns$xoI@p)@Mu8)z_TzmAqf%(Vabo;!c4ehBAGx*cf$dP?+IK9YoeY9F7lgj zozsK5w&pZ61aAwayi-}zZC=SjYf;*-_yTBV|EwPItaHp`6H|&aH8Oew1 zW~pZW0ou`9mX&8fhu=MsiaVRh9@ReGh?YpZTr#2ZGD{GinK&=Hz-E#Wya;#}XILz_ z7=yRqOLe9q2ftiE_ToTu9hZY}Q3lZQ2VIK~J}V}7#nhBcN$8p#9Y?Tk=G>D!A{rM+ z1n=0K3%4%0U%d{5*Dh+Qh?l=iBR^v3#X6cp@Ds!R_2vuUqfpp2-$BT0G4A9R^q+Q0BkA4Fqr~GfN zULL9PCxSZNdJM)lW^Do;cu)6ik!H<39oGE>QUDr*08KZ&?(ZjJ=S$)Iy9>9L)9smr zdoV+fGNO@dq(bHQ5g}hPVDC0hwgS-vUgnawJ3EMW$dkDw!6Gc(v(5@o%J}9kN>^DC zpfSDk2Cf__PMB-#csp(aaf*$aS5^XglW3Euz%iVWTnH1`0*>DQNFF0{y+QYj_lv@$ z4jWlt8Jo|s4|sNw>J5+*gw!&S2^_qNUKsH?+!SNkerpeSA=C9xbh7=an~Cr5l}W?Q zW&e-=;5j_Fh3meKCt%BQME9m08Lzl=by26k$}=Z@IbQ9+r@Ad>OC{K z5%J{hsHgF_=hSniATmU#^Fyb>l$gFdtJO`GD}b#!%{Bb- z>x{mJX}yA#P4vyh;2SY7-tfUx+HTkB+s?E6B`~EpO}F60!nMj)l6ZosE8Y_Kh4+;M zjA^y`hBv%g|H{k-0HpGZ1vMW2swZd9+WlP`xNHG5?1Y_L8*%{F!5bVabUp+g@BN8a zT&_g>dI}i@PM?nr^8?xt)du|Qo%4RGt(0LS_{Ak0OF0<80jN7JlKDow3!YteLuEQz zoEog+)Np+Ce*ir|!oR80l@D+n!2opxT?}XCH^9ANyB491CA7-|Q0NUwO^#Rxz5j8M zjriEB8P<)sNs}P{5wf9b-%NjPnYbFxoe&eN*gap?Ky#iS3Dw4c zkT_Ibz=I{b&HOv+%K<+^bV55URBqel|n=0o-Kc`jqpNfgFB!;#xhyEZ?S3+e4B=JibtZxK6Bs*xe`d% z#wMw|N=Bgne8#rsM!P{%{rP)HYZyKj7K#k}5n&BLV&k)t(#-wa#-HTa^dXR^Bk>{BRkiW~BUW$LGNZ+6>>m$7{uE*Ds( zw#aTo)LC+1suu{dSmAZO|G3M7FWg{Qd}Vu19P4nr5*6M<9=IrkkzpnU ze76H@!4R_$k%4EMfGJ+EGIEh!-^;a^1I)!;0#}=ii`QgXfEP!xme@sB#d(!x+j*?N z^l#>yvI(Nrg?)SkFHlr!e2Is8;cIB<0DY+EYnRN>$r^tA4xrx0NKn8CcF?>}07KJA zw<|&$W#Aq{s||D&h9Jr=#^}_6H1g|r69(V7MNf;NDH_bquO@!rsMzd)Ll2*XLKst~ z9sj)&F5;)BO&e!vu*p|N2U>=vvVCRz>yA>c&DqYLEAfna(GODNRPefZio=3QeXDS3 zk04TZ+)4pmheDa+?g3e!S4s8#2fUV_!jsG2b%e}<6RftckTi*_T8DfvOLV`hh9TqK z7ZN)i^2O0q^4gtF&AbGIi>>()GLq-(U2;d(c{uNP5*BU0Np>bE5^k480|9Xe9$b{7 zgYv_+D(`k;Vy4V5PpI>+cejGvdNjrY(DynC&Z{4lwZ^9cKbws&njJDPfEK^<3+b`S zHA4OU&ZcM69M2s`5qtsH1!b{kU86k4+19z@c4?e}Fz$#tp%PW`xgW+$Mn(b;SUv6y z=qS?I+n=mIi=KA3M82hziue#cu$E*VB{&1;d&*Q)K5RTK;Yo;gSH}TfyAxV;kXnRC z*7-Bx?z6K?Ntj!vKfi(|(7BTbxud#^FapNOY z`0bWEi}-!X%3SU1?6rZSM>A2Ygk||=$8kUmON5^#pC2TY3xudLIufc4i3;Si2xELRreya_J}`kUDGH zv^P+BgS7MK6S4U=Mxh64dkjVY@$!;hmaaNnN>G+d3 zYUIW_V4XDaZW6}*B#GE-il?dzCy$B&$4H*=saEBA3uW%y#m)3-Ew*Ns2OUBz<<{nx zG9TtROC$G2zNa50hYrzR$4kJjn{q`UtW9bFjonrsc-Q=nKg1gzQx&slN6!p70XLWs zQXRt09fdgx1B1WkfZ{u|A=*@b6HOTYA30+&&_+$m9EDPTGY&l}&x^tfFW~Ea%quUe z4n?yNt&9==x&wK2Jz{MUKF4LvIW-x$ctg3^&SWr|!eF3vnxabcuNj1rrgrwWIe?x= z@3f@XEN}Ywpf5J4BHu~K;20!T$b=@1QlNr^9zjyU|Grqja^$QDkl&r<0x2E zNsn`9IL#s({e0nLkC>3_B}L0^rZ8-Y(uts42I)I>#_Bo3UF_9KGt&K*uJp$f<=WWT z=7{eFc2q*@;iq-4`=!bAi37bC*=y~n2|c60?4VT4Y{gD;i{m^Nq{`hL{BjKDtAx;p5~o+_TvMF#|JuY1Z}33#w&RN+Y6{#NnapY|emKu%MYY z+|&+6#*3FCGRMkwnTg#p5Kpv{=KLb2#V)u1VHNlbRT z@kDWMDqHYr{>!jh*cWH&q#)SYvSxh7>M)YHgOY9xdOM(d3~oKeMkZP z^$FcMd-o6gG9*iBVE0&#W0L$CBc*_5%k=$Kx|Egp5!@EzpXYZ>gB!z8mxhiPYg?_wUbW-&h#hW zgUA|u&xH$fs@;*9K5%?C^~uAQJK515RcH6fa;~ND`C=p-y@W)x&2(^gu*_vHIvo<` z5uwN+BPEgcH;t$gYehrQTlC5ffx=0$22nugW>pN?Sb@2`U1tu6OA|$=U2qzQSY4=OA%7TA95fOx|ig@m#sd< z$*;kIXs&$>r`EY>8h0LaA%_I-nwHg9Y}mKBN(**;}(UWGGN-6~g;~B#iR-qUP0-L3;b%I$8beQG6P$n+4vDl*ntt~4U*cD9@ z7tOf(tn5+QI5a?d2Im+}@D43F-GKAb0wfk)dE<6f1^9cOI+KM1^`i264;3AcrX)~` zH9hWcXyLnq0Fu&ow3~}yoXB;k8$}K_3lydgT$DQ8HR?#X2ZbHFn56Tb-vJi6n^0}b z`K%2hZFc4i{^#TY?97oqAp9(3lfNLdJ<48ZeKX@stkwifYq=i5P?0xt}U&!!J8H08KbkN z64uS&J=@^QOp1(_B7nSA|3r*$P-&o#bXafAYH8#j_whh_o%bhMkn82w1N>oSE`?h7 zDb-jkaqqi$W#-_UCH~q=SPW-yboMAoW$+JKzhEsXPdyZ&F8Tg^6CcKGMdGRzYZa_g zJc=d3jP0Q5D+a~5oAn{afLWB!%Lp51jaWZ&&A*^n9(Rd69R&cv z%*vrQE(@YOYaxB$#AtU#HCd7J^pKORK=zfowvfsL#;~D%h+t*npS=F!!h?x|Ff$mt z5zYgfl;q^vQw9We=sx#}aDbLbCylny*<^#F4?th9mL`sXrt5Cb@69D?7R2@E5YYyq zl+u|(evKWztkS08&QF`~d|Q1$bu?=Gwm@~5M3)LLSpSi3zUDhTca{Vz-zX>9?`KQM z4QR(LP@Tbug=r+?jCHtZNh=#$C4^&_s>zO9_i7y9TS_?Th!k$n7(KnrlPNSJeA}%X zW{m4uOaJ`8K?W>5&6Dl`o*k$G^Lpc=^S;-wwBNk5SA$#6DDPAYY!hZzIZw&H3Mnj%4FmSS z^P6P%DJXR5?s0t`$TcoY(l#Du!=_mrm!7KHsqh^b19MYLfP$S;x_o0Jz?Sf1HO!Ql zRkI!by5sO7>Z<~@9B)ZUNxfPv@~FLVX+RCWW}1cLB@(^bM1?oykc)Ra=%9i7Xn&qQ zMV72%O||TWEl)sHF)u6{=;y`O!+T zdll#wgW5R3SW{6jVvVA8{ZCJF`_KB6=p!8RW#Bxf4cqfm^T!2JYnPd z-`w-OkjtnTiiJD(RC$od`rUyp`*7}RLAr|}hSO3UQ~9Bc9!4Q87jHDXP@7PNL|SIH ztZPXa_;6h?1PBDYOxdU$UXTl>T{J=H0_$zE;@WIdjS-m(3C5>M!)9uYYDpSBCrIF? zKDd7a?d40$@Es^vGlq$!Y6q+g=4nrC0^=^2yW^}uxn+ELq~$1Y5;3XkvlQ79++5>t z9Tvc`qUtv-u@-6-w}bJDu$q)9hk6CqT5wQ|t+)Xup+{)+s!@!fKWO#kIKSzGHrdxm ze}c}DfGkHWBJ2z^g3hFJjfzq5cFs)DUD=W@;f>s!pEmzpWiv5ZKuM{ zBYJA_!CrFNglVbSZk@v$xU%tbS}5>jA6rAG{J+g}>0&H9l2)h&g~!7DIAu4ItGr97 zqGB1Rl@&K~`%qQuJ8!8ii=A7@o7$#!?pPFw!BFCqW%f(F6e!r(~vR8RM!iZjC+|2b@2a zLoeMLhq!@h{pC(Vw|P`hnmk4UIri3r>Z$kbNKbzxZRS}}u*E5ie3)ndBkQBwR4{Cu zmKWjy#U6y&S3G>Nik`C4wt5d-gC%KmT|e{oM~Ju5?Y?2R1wK?r_{rpNAv<`QUV@ka zyj$Xrz~r$x@VWGQ@HIAV=e#=rIF#tVn_r`W=7;t!@aJ}J$+$U4cK*4B-sg(J*G{r2>%S?Mh{u>^UCP*wm(#Z2E#yuit+nSRtLT^Y)3pLqP8i4IaL`Yry}?Lx$qQ8l zDQZ~zHE{W>^?YAMhm7^aQGykGaTcU0bj-p)N>&TJ51zafqDdJ4E`X62ap`*FkR!10 zu}tQH&%w;BLSYx(r_-VpE%)+*2>5y7>u8rh$v;j9CnoLJ&H-M&-oWb!5kO!K75!$X z#M7%R`ea4i&}LNxYXu$IDh9lhL>~MzqJ*wZKiUZV^6Key&Yk2q(DZi)=C}tCt_=uU&J`W+!>SW$<#Ow86L$soPF{q;Z0lOMTZU z`b|;HizS(A+E(q=9#i%pp9OUh}xG!@{5NrLRE9sy!K<9lq%<+_bBw|y9 z@@3RRln(&i00$&LK$kK9+EzS42ylo8{JL>sZ+i;v(JteKFpdX6?IAW ze)vuRVvI)5-R49q`D28ixo71+Re}X)@<6nE*D*JYQgxQbX4+XWXklY8Juaa_?t{{j zIx8ZT+*qyolWerXYD;KYF~%nyl+&N_m}4&isNKw1r}Uptf${zdwAZ)M_7Y2zP%;Zs z@HDaOB8X*-q(uPlkDjtAv4E7^O2Z*;F-erq>V}{VC4g2W6mbwkpf8DJYwBeef<+w<^gSmYWP`c&HO7#we_xJG92(tZM(#K=mxy_+=oD zb%3PWm&znuRQY zZy%@3aH`h<<@^iNfJ#5qZES$9Di}KQxhvogA-(*b?Sz+BKXpiQ5>MhZQ6 z1&r$el}mS0kL1mo+f{W~0U~;JF(#X#uMWh}B_ARGF+5}+go1$Zjdwyz zpP8@TBCy$Chzrp;yt>+^TnW+mI!2Qwh#yhddO?)QLYCXIXCA9bjd>B=0Nh2JZ!6<- zZH1ACPC{vF^u>)$TDG zRQ~t3hYWM+*B<{o)-gIX@+*{7XicWn7 z-l=}S`~kGE z0FfyuHk*cC=6r~2k(HLetR-RSySm8W*XXjM3487E&~7XWRFM-7F)q>8*jFb#-J+iy zi*qf2yltU=PCQ&Eeg>};bDp)>iSkxh*l7_W)J?F7FT|SNJ90KYVdqKQKmeTIVsjp? z-rLxGZ0B3RSPylxyJUGy(r0`8HqfSQ`o620D&TdRCC1(_$@5e-KqrV)Nc>xsXUbT1 zw|{ojP7&CK_R-;=ccuHj zK>Zjr+37o7F!bAaU&lL{Vcx}Dl;qihkosFM=fEC=v2r2hqLRU(wB99N;W;sS1|nle-Jra*g~W8|_G&a3_>IS3E5 ze?*s?aedPSSmme4N%E#Py@nXTVUrkXN&C<;CDW?dP8W2=7-^a=Pb%V^p+3^C0{J9U)D^hT9_bFF#eXjGEZ9>TVEijd$;D9fMdyt)*Hg-?!Eaie7 z;=x{q38^VUwa+Xm_nq-#@!x2?pZdS%8oi@CkVrsU*GO@jghL~PnSG`Y)7XO^M7bOL za-EzjqjhKxK(awwcPag|LLvK11~X{QRifeA=f$b7Vh6VcG-M7fRV!i`2zens4v7TU zQ2Q548yca1b<=Ngl*Aeob!~|6n9q-WJlsd>5z$CO6h8<%lDvhlHXO{?sGpr7fpGVK zMJIcE)*W+Zu?(?>#Ootd8_BqEy0GY|T0qVbL3v0-G4kRlFjkE)`gEVGQK{*LETZ+t zyI{!pyP0clhh$x=_!j(4J69?pl$tgG}NDj8Pj^4K0 z1DHC)Fe#n$DeTqMj_if+mBxMY9$AaV)oeXJbnzSo(??BbxFW94zNTccfiVxJW4Tt^ zOPQ3nOh3nYDhXl_(BcA^#OQfYyg`(xsu{V&JqAH@0}F_?$ng5{HzN3~Si-z1Cj zvi6R7a>4WragnaH;gOup^gUuz*j@ep_Jab@S$W(?EWi;yC(@}nXAH$~A#cq}q2ge@ zeU5kWX=>>e#M`>zIQx5Ay0L~$g{tkH4=5ldxF*_CD+PKeqfTZ4%EJL$m=UiAvr@Pg zX)S4!NM#=cJx>pbf`iwMc-tyoI^OYB#cyPjV31b|fe6`sk`CeDM|zuL?&1^dhH)Vs zkFx}-6;J#*wLnFK+w@27_l}ER6;Vhoy_sO**`LI=>O3gDfN{xrnf7t}f^DNtT|G}J zUXIzwZ*}ZI=#mp9+%TlYC%XEq*}2fo+yHK)`UB%)={xLkM|!s1St#l~?8VQCZU=n> zrDh$wrmGQq7V2es-ZOSxG1;REz&Dn|?FYF03?!z;2WT`!A_6StFFz1GBDyy=iXr2A zG;XT92UxCt9mEuCoBn64)~xEylMXKVqV4>-u`m;T&Gj{4Ltai$I|uAN@=`^|-WST} z4AUr9c98I=7{O;P0H7fqf`?PO3XKQ6#muAirAtLvDG5B#DKV2P=XL2}r|s6z%2X1u z_Oz0DrktUdxr$?6P>d~ag#p;{B}e{xx)<5q;W(kwQMty z3>|qG>T-SLppE5kk>l9E>?KshV#F0G3;40wyOYol8VbsM-qM#`gA$w6@GvT2#2z!` zVxETb{DSD=Urzt%3Y@I@VKq-}i-@3>9u^F~eh=w8_rC1ci;Cm5qm>Arp6p6F%*-h+ z3gE1Qcdy8J2f+@^>{q3Rvd*u)r@Q+q{MSC0Ro4gk{oO4q!_UzZG9|LR9mPEMx(h$P zBKp{$v)@i~DoAp>bp^P3CJm?e7n~A!+PkuLTCG5vBcC0W91oCqqg&2*SUyz?kRRkq?EZfTkVtRU}7v^D0hsxp{7FhE=^X};29BKRCtk$lJHx8;GJUTTWVV%;?1rjT{P z1Z}n3@Y96lzMHignANM6OFjJH;;CMu#su88%7Z(fhO0M+$=z>-x3$Z9Bgn(dy3kPn zpElP832i0W01U6^$gEt?tLlgUZh?VI9JNWE(a4z`GuhSz7O|>j+$PjoX<=oSW2Uij zq;YTbK-n28&6R#LE@BL5F8i{%3u7qJ1EC9mDd*ik zHjTT#qumLp_LvLpC5B0ghm2SIO*=HIa54uoXQmpFw?VfAt0Lqrl`@8V^AXCO=iW<@TkPEV-_Wrq|C5`v!uw9gIHRNjlyeHRR(h zai9D#4#Y^YxvIQ2$R%D4+etm+-TAF}TX4Lg7v3SWGR{eYrr>Dw6d~^W zb+r~hClkg@(Uf5G)}Z$){_Tv3bdnDMy?a3AIHB7@--R9oggD%Hs14 zmaDw4f9I9d$e&m_f8l;(==UL@*->1Aoi^Uq5-4Tw_TWc-L|x(r#HZT?#9!9JRyDsO zd!@3<#+LKZhN*av!%y4A1r1^)pG?Z3|Lnz!`i>^5KOmq5M&R=g%}n6xk=U`@utKmW zA3cYbUQ>YY$rVfy(6>?SnFlTnr&26JeMoHIBEsWn5Ng>Y$*$Od7{+RH*w(^#F|1HtaK@=JtHvf%oqIW^^ zikd|$gzK+NJR)XX?{$w4M|w}@{qN6}GJ*P;R2pw*4P61qHVh#cz&<^?m-wf`PNId>)ec+wc=p>@1^7hyU z%;Vh)Wrt-wwcC%^pN>(18W05dVxePe@{X5D(>eEJ^`}7|a99{~bE(gw>|5R&29&6n zd1re0z`7#7R2jUoqgX1kF3?ArfvQd?2=$vSO2=8Fo{-S1=hE%UFk)A;s7VJi80^5+k@Vk$sq7TmE#YArAb zQz97{(b7tUj{s?r8!&R+8}@;MXnzJz2xU6FPbTgY9!!kN=B>k1LFGtnjYY?{L;_Of z_Iwbdi+LlJdKJ+ayH9hkKzhrMs)+vaKQBQ0FG!5o(-gg+8F`EVaax_*f%MXWI{BA3 zT)bw{!VP13Rtj2M1`r`n3T(L0l{c@2Cb*atvhszs#vD{K^z-f6!0FGsZDpH zXYafDo*bM>YB2PaS*hU=W>}+Xf7JDj*S3RF1aNWwQTfYoBdO=Tf>ab^TdZY~P0>zQ zVfhn!b6%E6UTrnF<%&E?e}C;8wr_HtGnj>Uj&}Cx#u)>n0NawHsa1 z8C_m<*v4*Pl)QnVq}feI?2 zq1(dCBDOO^rbG9R1qJeo+sEek{4Ne7!qznuNEl>hG@;_c2(x;B9|*^a$$ zJ#B4VqRZ3DqwCqxtq4M*9;)6=FKr5{_W|bS<)+*eH40 zmtmymf+?4lUte0rpFVNrqeX!|+J%>wp*4+zhm~_`I2%9}-&Mcje;z?ll@l#6j|>Xy zGk1~NECoi+gs)Xt2uV4ZC+J;=e&I?R!*Wem&Ko*rd9$)I&6RN8)Gerg&CtYw;&|?8 z#h#TkUE|OF+cA=JJY-%p;y|&ov&Bbmu=aM+hghi5L+Iue(+Yqfndev)i4*PlPxwDj zwS#GpNcZsg<+O+G$J&@LP5KnxLlkPX77~u;h{{Skmhqu0xbjnZCxeW19$2G6_ zWEOP`4WSx|NkVr0q>@vb_Nwk>dIZ>A@cLp-Xo91Ot*utq9=5G!d^nEm1vl+TJv8IE zwEm2uG3*>t%ge~2>fmshv1iP>qMyGoXR{{?jgLZ%Ep-~o7PVtdhVMle`;*Mv<4`m0 zR!QuA=iUoAWP-RsNHvOvqm;53m;HJU;{v;a6d6S`y9bk6H6D5}F0G|P^?mE*BM%ci$mGb-3-j$kC*eur_T5pEWptSL5umDc&JN90f( z4mz@?3V>rQ2?MWf$eNDd3nZLWZ&YSWKcg&v$xpgEcP7(eR7KjA{GlAUNWj5J1FSHrc>LQsqv+V6 z{x%|lZpb_UT0o`0#4XLzxP8L-WvVLsiE|1<+(;BQ0fu|YVtn1D>M28s#@HsDJ{?ZY zP1vwyukzxuBR1iPMCW8Bkx0mM**hl#1 zHWCrxNhySaw3h~Lp&55QOy?I9dO)EUOJEEiS=*tZo#k>GKVjZ+PFY1S=Y>WMfK~qY z(ssi)y;2#SFg$Vot$d!$N%F_v$Ks*c|=OFp=eCWg3}y<<)A8Wu_VA?++(g>rrC?Ep*zjJ5&J)+gQ5L>%nxox_Pv7>uXEk!FCw9Z zsB0N8{<*H^yr`J%aPl|v(JYLybb#(L{1|V@pDAxrZ?|vH>tcWAF0zsQT@H!g*S|KAxgf;f~i+XRrhFOGkxo8|eK` z3tD=cZkhhoQtk4b_-&5;!7)Vco>7Z=x;~8PNnqU8pf>T%bO8OufJc-lOyl;{t2#?3&x zvszYyDqiqpZ!8B}dR&$Z zu9|vpURBCwygIg#XIK2%e@n$G0(|hxrA^VqcCNLWPuQ*2@_D9CIqE19ZiUf-;UL?S zhYU7ygIJ7rb{SRmO6!>YiA|ZrzMgL6wdhkI(b?Etx{!mLy9{+HUDkDaei%+*_7ft> zb2x~yrG7moXRH3zj+XJ|211M3!yBA2T!g`tor&F>duWdTEGZ_Fxi=pwK`TR!g?}C+ zGkkUDEwcLii!bZ4?hohiRM5wv^SHQ_l<~EdaJ55dAQK|$t^oRwfE@m&>3bPK zXnTxfk7pGqwbE-%I@DbV6$|y&6%_x(9^ZcluT>8`h|#8JE-+~H<^8*v-nnTvCO~}U zvhTio;!Jt7TT$VA2ZsW-IZ)X!JukJh7W$86Ww*fv1t}s?q89@tZVsvBVtxLz;LOoR z4aG(omEiIeE*D7tD1$se0avM&%R&n*cw3T*Qhe%?&@v6V1MTbup@3wF9 z)?8CMEP&FI?gJpPwy#rKLd&XG=nJ-YW_r|khirg$D9(b|MqnGMv62LHYo`t6Fn@*X z1XVTUN@n~8Q%5Ug@fEml!pr3s5-ZhPb&22OKndh$XeHkA$fLS!T&V}&=%h!4Iyt8k zNxIhQmz9=6tRP{Nkw)Fr_xNNJ#2Tc^>V+a#sze=x+iL?4p-FY|u;)J>Z@tHs;-ULj z(asTg(wJd~gk z4hgP4o;;6;)E0oV-f4QZGVIYz-ywE@hqEZbviTAjZuO z&$Y%ZLb$NEoU}u(fLty6e;s*U1|>5?sF@tv!9KmrM&ByNC7{^hLw>Rw+N4&aK*cvz zGN1jga-~n2GYgj~ST@K3j~$RRjKWa6tOIGor*1qMAPf~On6+61^tRU-4N}JaXe^(T z=o=GhnZc_cZp8ovuU(g|E*N|w+@GiK`kZ6K%-^C>DldshYl;;rFRkyQ$(Vqd|DtoF zx4{%6Q@TL`l$OX&Dm;sNLy4FDE6#;JnWn~zgzJ!0Rb&i=1jkGmN4Th|9jA6|kRBR0 z0S*8(k~n|5Q9eirc14S^-bgd=2t(+M-8ODE7@Wn`URTU$Nuy=?tZ={%@M64cbI9Mi|yHd3Q?!8N$(U3X*l5g*BPru-~|I7H@a4zWfB8B1P2pi z2%!@WWdu(&6%Gm!_%rYovI$t_`pe{;6^a|FHe(EP90xM1KA~u)7I75)xkS8CU{2?_ zz@m65mPG@%8MbB)xlMPRK;vqsw_Ih58RbYJW99XpeR>$$m9}r&C}*ksZ#!+F#Bje) zdZZVYh{#FMZne;3C{#&Y5+ji zO8PTIt2JZhQoQoAL_}V@Bh^S0exVeb#3B+AcYY)$Ww)e@jw`6pH)BA)z!SHF=BRKs zXZbijgR|?gFTfv&29X^=xTILocrZ(u;1_RtJMNf;RErE8q=|pn%!5I%ivQ~8*ZZDI zRcRdFRXD8kwo!2nZb0AtMgi7G039%hd~PmkMg(Eur6C_Ob9;&9HG5@V25@rVPCR(p zxfuCYOedG8duD5dyc4-=mqNI}k)9D({}PG>i)^5;ict(%<|P@YZ_STB1DE?;bbg|h z_F7fU<@>EgVS4Rx_O;C?u_?&n%B=P`|6+ewXq|XAC($5&wd3$a?1KR|K; z1U%c2jzf31E!iKFtJc6i8JV(2pQUhCr67*+n9T|1evd`46bjcBmxKVpdX;>aFlGf@7c}7{TjR?M&2j-n9>{%ZmI>=#D_MDjtDNx&WitwXA4y`zKGoki~OLl zTU##gjetS;y`(WOttSm;Ts=F+qLzE6_L?BZFVt0vL}qAISI77P>5-ISL*7) z%}zx834X?fU8@Z0tBqzjp$Zy1NV+LJKj3`ih!%-U-gd$v2FgQf!u1C-<|m6b_m`??u{JjI;NvDuC_wkg6=)$pjR%FMt%?I;XH|-qHLa-orDZa~w3$S_W+Ye68 zJwBR^MsD8;L5w+FC)-fr0D*i06R%m51K(Vs25$ z&O@<0hHWZr#i(P7GQdij`Gogobp~s+^9(=`VL|S`@dC|Jg~D?~N0IR+v=*5Xa@%;v zseYwdN86#(J$-btA=^Z%0oz+Z zVBtAMaXBnF)B~$U5*Ek;j-m*6da~cS))x@KfzIV?!DS=A3EScP$`(odzQ$}6Y1y5S z87UT^xq8P7vphgJC2>fyyjVT=q*cE)74kilg0M*E;(Dz1jVGHMm2n^X`-h^cW+9A2 z9jB?~-tauzXYq8*EXL2WN}BDaD*B*HqPB+yD-Jf7-mYHBMN6L@{qp@Xz;d|ki;RJs z4@3#Z;mZV(;~QTk?$$d4kzrjIJ(yjREunBgIokYPKP=HCZT&rjeXT$N6b-HrSB2GH z+Ng1UmU~42HY6RwQrt@~hGH>#^|XXh#O;GY(up)OP$--d4uOUtXr;gJXE!ZYQ65Vw zR@ACx1mBD*5HHln15(|+K*}_o${MLM-r``j_G-J-Tb$T`PmfXBd16^nVrd@ts9XLj z3gAzkQ@zDUYNL?=o~PE%&Ief6UdDz5;ql|>ndp#2&#gczzaj(7PfJ&+VR@;HO|(yq6 zBUq8bf7fONSPa|?2kHB)eOQ$!>>G;DKOcU_X-k||`3ob2yE~<33@bcYWNQid+gH&o zt25*&!|bSITY3gP;~8$iZ) z)?J$BX%D@1BZ_5gsc*g9#uk6lkv5>SX<|eb*HWZEHSv2&ddpv}8;{qQ_jzD=GNJ%y zi!RD`5w@g6=k1q47mgv9*Jz5w`V0Q1@=KvtU~`syQ?C=}m;8i&gwNVx2|Xa|2|KdKbpI0#g0&+J&mV|(7_DNCtb zmP9xt<^utC_q@dEHRW#%iRWR4o6D~6x|DFP(HKN%RCp`AuM8)V-K!M+DhGF#Id;m> zMm_pM(pqhKcbd+hRutd2N@29(jd}lx-(fWKpttSM`h)E+{~e@Te@-c2bn{IPSa@XY z20P&{6$0~*%?dm=_1Oap+}HC6>~e{go1Of`5y#~M8nyTAU}Kf&>IE_9vVxpzeCN=d z>o-GPY5Vl)9X(UqO_jG3RWyW!f6#9rgdypwbcK~5&sIBob2KOA<^BD3!(&sT3U)^F znrYG!#;gBZ`^_Z<0sWJ&S>@Hsu7Tk8^X_{a%M`jF;u>YF(FHHJc2oh0CKel2v#I$2 zX{DvU(~4LOh2v>j2Xe;3HTXg=s0NtFZza~+n-7yf_Q!`u;xJdWv^ZCs!P$m@^bt8J zXw_{s+8auh)^WHixC~Wo;hbHP2sE9o2cgtDkynBJuYen`wu7TW;n|ogAUY(#uzQNve+T?pe zslM9f5pl~@VCyU!#6gx8?=gObgMF1DhoQc<}~E zgD6LGGth6C=C6}HswI_{bk0?$uo*r;j9w#p+SagbWo+nZFztU^2hC@79Pk-z z$Rr)y@88#~IsnbLtg=E~-|%+Ya6WnEwlbPPxu|NTeXWhD^eW_+Ul-GOXLIF}o*S{$ z#I*?r74A~yS7PYTnosx zkqGTLn@Ht0u6CE^(9K{DvYh&@NT2A?4;>tRKaNz`sX%`u2}x zD8S}iS0?M7a-T8xpiRNIfJLI$7}U})<_la{F!hL4Cdgwj-J(I9nLc}=6J`9p?_ClKa`8+kV&>i652WfFl4 z2;RKPHlc`e1a0FKN=>hhi6fNbb0qeUDHPy#=Hd689WL6HfroR8!=yad zME)H&d(L}0pzY5{`Dn5+qw3P{b9uXg_U?)gl76bKxha3Xu0v`s=V9YNRjuPqqfZt7 z4Jkyv_k$D|u_{$@A9!BktO{Imj{fSUiU}xXuOG(#(rhjX)G??(5K$jDS5e}%j=5m~C}!L_;Jkb&}5oi&B$kMSQG2Co3GTF&otY2w5Y()ecW zJy%ey7%G@^YA>pM>4;&%DfdB{3w#iIoQwC+&C0RM1fL`*>#Hn~ML>sTsNMCnMLcQ5 zu17}_RS!eI>-Iq#pFhw=rCJ>+@XQ;Bn{RkL&vgSRy1Oz342Fs3seMM}ha@v(MpnVonJdY2q zG}_LqtZP{<^`aO<60r5uft;mgtonA^aV{dZM@O0B6(wffK|!d*r%ZxxVim8@F2gpG z&TBlZSN6pL8C^6+3rw|iY8jfgNXAb;aIRfr6IbI}DD&4(+3!$@I6`bmZ(2C0|<8Z)_ zlg|&)kXqnO4LS8(rY0meEuCNx<0S+C<4jQgFpt;hBLVQrzcHDNPE|zFA5E*WHkhEM zV#vnvm2NZ@r=`GKU-i!}GqQGF@n6EPJXbiEq%y%swV+mF2Ge|PL_W4hPVSAq)gO7& zWRu#MZ%CZduM(@E>ivLoWGOzMO3=Wse>cBYv+Nk6r%UDqNYF@<^8+hrqpJE43VrwR zRRE?8zv~HfZ#C3)>ma(29$zNDh6)1=A*&lDDNb=q?Il?#&?^i^E)K+(3at3(U7ABu z?9VC;used}WIRUo59N0=#%)Or89yN-@AnytQZWwlZ);}o6F*S2I)DV0guG&jmawyS zDOdVfI%9qXUh3CPyni!XNX~}JqBtlTS@Jr$p7@O!QM>vzw4NRR@tS#R4+{7o*62ws zwygVJG3@1-GT3h_b%D~iUZDl8adtg8AnHqWLM_&?ufzUfe2cbQ{^J~B077CT>48wd zflWR{c=}=Xula6nU_TEZ=T0DCi;%zOwhfN~7EgmnD1oHAb|FF$s5c49^vogiOa_!jgt>f}uXFRSsiqMJW9DQ7 zwOOfiiQO1o`ntrz7aiKTl?YIC&MJ8BFr|NVQ5IlVDO#2{XK+Dv=29j3vIv~Gp9n=& z%|daxnLSF_nWk>$UX{ZNq8iWvZzgkG&4Kg?h?tF9NxD-oW^~JDJF%>61A%E{)hgdg zx*%3b%0T1_nc2G^+qYZ7t!_-80GAQW7XacwbY8~z66gE7S)vva0r#@ZU4?~KLCY~z zu$YZm@ZyCgl6>fFJM8@bK{Nz3kI_2$ut@&RF3RMao4MTGf!`O$*0f1o-VeOVYIdmq zh#mQZl~-wxFvb3v}} z$&En~@iVm4U~*hi@sxn`?UzcS--;aA17pZf;*C$1W?eogQ~ZiUtavx6ZqkXHiP~#! z5s|Or3Ot1u$LSf3oL~nphVt<1(wDC{pxc_Tq}tjWH&uZE*Uh=(*fy>rAWH9d6m?QH z{M6(N09(`7S0_gcNmZ^l{B+4MHn~P*u2CwZ!vj=%w`=C$Y1Nd?mh-}E!XgJ6Rgm)_ zO7B()M9E`<#P*cKXiQ^LCk5y$JQpr)z2j>biv1YWV%-syk1WHnb&k<(h;{b2G2P4_!zE_Ce(`pAxsGKMjTCSOQ+3gRmZ%a^%LqR_bsM&DY8F9J_Kqud(uQl@Z_Q zAU{5{8=#-Xxhnzjp#uf*h@~wchstR#>naI7`1~W0OwPnKk)EiT$*gs^lVDE{Lap3v zo5Tii9=I3YI5nJO+adwlcM=8%F4%=%op*Rk(*v&vxW|h}3YcV#_5BcXSP7XWKOU`O z&*k(YJI_dSwXU34^+-is<|o5Nxe*xop~t(#Bw(pay`%topkiCCvsP`j(qNYoL7-YG z%cdnvxCb>QkeNdB+|pZa$SBt`0v@KL)FUgI3*17aRCuLaOnw6R5M5o%V<~Z9kf(H} z{gJM(Gj_+A6!(E=0x+BkzXp;RD8be-_5PWY9=k(&!5gw@@2~@v+t89sA!#BX1D=R7 zEL>z4<~~52d!@17#)Jj7GiN`4@iaq5L%xPw17>yZ2zG$CPn*KRPouRiQCI4EFQ|t3 zdpb0c=5lL26D~I+uf=cXqD7YsTCm^9%AkR)dA`r|-Ozad>3xz}my7YBIYDT8N?x!` zE0ZoBz<*P^L_y0T%YCu*@XSM$<)Gj+%bG@w($F$qEfdm!pP+QJ=PQO`AWD8)c? zvQB{s5pCUyI?xC52nv+dL(cHNr+IF**|8JfEBWxZjHLW$@*&ptcOewGR;;=Ujx5U_ zcaKArTrx>9E@6zpEnnhgOBY2A%v~BpY%6JVx+6%D_m`CG;BK))W{7W|aY|c1aGQe( zNHSp;`?gQQj&}U@r#mvS5r4oM6eZQBQhYI)Q((%%SC&-U52{nLm7#sPv$X$2`*|xd zE0*h)OAg~1N~=Y6#d(=N8wkqaGyvTO!YU0pA7%K9Yy!p#pb%yhDxj7ZQfWaY7HG7N z^8|z;@p9B_e*7_S6N1f}%4x5XA35CBBYhkuHe6GFIG&iD?k+50imZITn71Uf-cgqG2M0AK%B#IReXp849&IW= zgxW<-YX@IuT*ngp``T$UUnLTq#6E@arH#4jzR_=jCQU^u+2QX{mCybVc4c7JY>Jw1 zo@H!CvunSAC#1_2ix!&_5mn3qf{x#oT4UARFl+pBb`(($1CVyr=MTKbv2td9eX7{? z$*T$gd&_oC@+0MqWpzCm??0+qJ4Qp~X8{-=t^j7hj}l57>&BOYEa6d%ne(2Lf-Nb- z(q~vN?bnLEZCXT+n^F#?TmFd>n%=TU3t9sO)wV}dG($y$mQxzJUW8o+R|@IUilPs~ zmf3T(|8)kX8#A^93Nzonz!iYs(;DgAI`k6Oz>lv>8=FjA6%jg`s~Fm$e0E@hRfuyf zt_OgeQl&#?;mdSZe%KSM$<38B+XLPIi_P1l&GHYMiHz}io)&yD;)CFHcOrC%S~~#C zeE>L(YDhnAYpH0*CBh)(@f-CXcfz;Mm)KQmT93r4{|n44~3 z5^yFS7#Y9|f}V5f!I)DIUhs-nawj{)CCr3gi4P1yC;wyztDg9)b{HDqDYJ(shiSWL zc1*#dW8h)>3?8ayUC%VJky)jrOZ=D@@!5gFo*aCJX8l<{R*QT_!U3Uz$&FhP6^sMX z7oQ9I!RE>kj&5oEEu+D2ixcOtisPHLv!H7ivBRM{mg(07PR0Nm!|iW_rll?_q}8Zu z>%cBYW=H5;LB3Mzy7`)xKA#%oFWe63AjEX=)l(Djbxe(g{2Qswv5miWRo??|Tt5=+ zLwh4h6tmV zh|(p{-ng$$<``IBJoNhUinQXJ8SS#OG^gtjbun&Ah5A%t5dpD@%X5;c{RMoV>IK{f z*)e|K7!x?{Jyu*{A`c~7u0)bb{t3Kys$p(^rBT^|po*bW<$MnAB?cT0osgE#Bh-@z zH95FkI&Ao?x+_UT=zFMr1NFxEm3PB4J*tym+Dy!^ML{}Y-FiRSH$R$iRn6xuI%LFf z;W2Q^EuP6O{d6GjX$#eCr&rAx68SO^`Gx5BJ37tZcoKwbMKTVkpe{h<&y(Di!9ayO zetHXhk6~EKUT2v?@>|~KCJ_XEAD54qaFY8%b_aoT zY>%ESk@DJd&~D1?Va9#jTaj0oo-~8Kw^_OSy5347j+poC-5#0y5^$}>PLDxF=EL|j7kEbuJv7CA_T(EpSW8oPm3se z(AIZc$YFVJgdG59nn7r(>f;wAEGd251UUgeDUyysS6rpSgx^?NZB=91l`@fl6fcrI zUY=St)kCRvJl2=&$Vzw9!)j`jHABvdE{g`j9+J4y3ZsSJkkmO;rAwTCd(Zlyd-EVm z4lDJX24^ePd(29&qa~m~V&sG|$`0!4o{?z*@s3w;HUBi0#vh-G9wdzd>N(|n>hjl{ za%)xe8LLIBghTkUO28Jl;84#($VVxIfx^RMiB~b3CkDXflB&sp+!u@)HEMKUE?GTA z2^;5k9}92hGwIEl$FscZ#$>(-KgOg)r5XKhFQ?a19eqnb!~)Ky*@3(cJ0|KTH`vL5 zXH7vs@F8J;T#S$-6FX+7S+DW-O<@3c98V^6ruq~ltIgCdYI&(=IPxg+*V+PpeAT=)O=us?@awNP_Xp_o869~?-8z(D<Wcm4WpxIDf%hGcrC^nOmyR+Ed3c)t~OOEVgqV<@H^P%#a@L{(Kv9-7E|Xz0n+ zJ~^vG0$Z<;5fc#yM}`7Jqg!4pyhzn@v`{MS73i}?Nip@Z#=H@w(1m}t&MGzFq69nm z+))W@hPuYkhuR4dEXZKc{qgP*en^=WwCnK^`@>g~Oo`vN3ZJlu-OCUN3hC%Z%cKV( z%70?DwJ{baIKCT>$0#5Y8>0;SEns&)Mz6AsfwmL^aSMWO;*Qmt##L9ppur=++r&<5 z2*&q>!rI^fmM}R07y_O+_Sm!$s%Uv3kGaJMlz{UNwnTaMzM~Yrf6ALDQs-G8xjgu@ zu{7=~djLk!=HJT`4m^oA(miv+FKhRt`kNZwIRk5bWvYmJuVo7w-<6&j13^QAw3 z3(-Fh6pW0sq4Ohubt3cz{gVPE3+II!zvUVYx$3mg5G$ySg;DcW^gDwgcFKa-5>F?h zsJh4}#t?@jMVNsFY-1KRCZK*tOa@T8d}XBas4|KTj|5O_WsL1QG1V1|*%BX8Cz|77 zRd$9Av{|3!ugpR9*c=8Ub+UuXrB~sFEgJ`a|$LMqb!;Zn1r;Fgc(lhBE^1~N(pcwflTxqQroT(?1TNLj>_3KB*2GxXTS z%gUKmP%;ENcW*B(^)D|;G@0;YgpLV?0HyhVh|F}=RYUuex>+czSk&M%@p+_7w2g&a zm^B^qmjl*Ta4!5g07Z9%(w+?%W8ese>n4&d6!E{f9k*-kiO zBvRFfy^u{t5_9^>{yRK5EGVqT z23?f{s2}nrMjC-CBUcLyma$s89<`zoLQL~YLmvg$4q8};!JI=t{c(*8ztt4b{c_xh zTpzGHRhEvr9ZO?n2}7BU{I_iJ?Hiyc4|B#Ak3kWx!j1CXwpR!%5iXPA>j zHMx@-TnBkHpQd(-#ZqGpB*3y3h2H-c{Y4P`M`&yvxm^0rvhjdN?3F^l)GAKHbnM=J3<{9DFETuxvYT>GVx zy`xyMsBDq6r0*RHN@KVV^1}1e_1pJR+;xSD_jyv^9==04{5nKM6X0MUek00RW9A<3 zDzE=CU!Mc14^GCyn%Ad&<6teN3@?trar4;7*vqp3!rx&rY*>T2kNW{v{>*1Yg-lQF zNE=KUx4kj7v`0O@asO3(^fso;YwO3*bL7VnN)5+a$?oHEu-w;M<%$E|UsA<5IzH-g z>6!ukKKv~(3p^I%Y!ePbYo+wvXg<6e7_lf~m#H?{QaMx`kG{s1$6X#xyUR|-@*8`i zLB9`=|Fg-wG83h`8Pai(TtfKop4#dm?5Y@g-Hb}mBX4|!ZCr5Py63-o7V&*?L8+Cy zcyiya0hj)2u& zl#56~uw%hR zhpwp3> zUm8J(6)-@^;IjpIArY6Jui`JB{5^>)2fF=`JG0HQ8MzTUeoloRIVyIx1cHR|x%o-g zSzXr+R)T|{6)><RPEs`je?o{>`vaGUQJDo9t zRdQc@Y#OS)D9(c{jNcCg@JscC=dy+~;z8Q92fL2pUtD*fys?M%pSd$TYU}PDOxJR)y-u0fx@Mqs^Y@JzJmI|9~u$H^x(!h1dn(;tkv^a z{zQTITf_HfZJG-%sU?d2ea0_HGQw9Vg09-j`oa7f#G+6*p?{^1yvF!=lm%^&*GxMG z;b3k?3Eb5SnMkR%?=BwwDu4s;E&=f~2xDSnbpfa^ zgIV$;Z4km(YgfA`NS~sR{k;=SFPrvz(t6;*#B{@3YEax?xsbnX!LlVos9+KS;b`ZZ zK9niy#@ULCoG!#X#8Ms2>58Pr1%r@f72iv+!u79QsGQTf1EkrX_kUEWRQ`J9Lp9?dE@Na+)G>A8ztMt80z&t=dPu zhTtsz;rZb5i0rC$%wz0^+5yhO_(}guu&Ba?00%n>*MFV&2K)(`-AKMVu=!~voyqRo zr-7%nA%$%|o2rG8I{AMg+v^I0{ALU9ao;E%0DM4dYJlCp6p#Uph2YTIT+BsJ|!i5O@fe zA{IBE?P~eEdcOnT*1BMmYCvH0!QJyOg!t4D-Bo+K@A7>Al1f{$;{9eo&F(e1?K7ue zo^85I0B-Wo`V0h6)>)eO0mD|ZG<)AsbTMhCm2rS{n*|0bv<_YzMdladd9CMl5?lO; zR5^gKcxw%>;jdSZDBG&h@@&1Wk_4}8KJMiVhkf9q3=sn8n$PWz5wfn3By?B?gs($z zR^64Q=VpYfQ|IkMH^&2!`9!Uzh!tdAA%NJypVq~~y?K&@3dMHiR-n|sSeQ^4$;ZB3 zXXntig$b@^Vyea*vR(e zGbwcMW?011#TQ!`Atk(`VJm2-4pK$?t*6pZS8oZkK{vSzGT<`eywD~X3+&?BK6+z; zj>L4LDwoQk9p5f?sRtC+Xh63Q{*y@O?-PMU_}o%Kwzn-tMq7YB=!J2un;ooP6dgIo z0T6km_-BMGNdL)RY43&Bjuy493d&}0%MM*zs61&kP%{MDK^x5QYcXbf7S?oJ-ww_UK7>Lpi+?^!<#q-#f@u|#9aG20Xv zMyO;aHddi1yG`x6?#N(ag|I6KN4Zurz9QwEloGdRj!7O0FqD!64!Io#$8i_9(67jV zI)mKoXo|lllkj2X)%xdY=h5BNXd-%RDy`b+GNv&9N}w?3@g?0K|1bo(pIo)~W70Ev z3@jXpV7hWA)uF_(rpcF1RH)2FxniwLPZjp4=DcZzX=x+hUv4{fpcMBT%UUN`H;(>F z)&qF~)!ELh5Rti+c9h_QhOO@?{x1|iz;=N&m!p8Xv%z)3^rHd-2_J(QqTk>+3;ae; z?nBH5|B0wAk4yPI#R$_hc{n6NtNhc{&*LYd=eJSUwd~51UqSI{ZLGHxSa{)K(I|g| zDOKX`d{(`LNaQ=~U21<=`d>^%IhHi}nLXY6Q&|Ko*UdCG6YgBn)m?(hQ=;lFl|5v= zA2D^X(HVLjLnOODJ7g^@nCp%3z0LB%u7o!RZ-yWnD-sU=HV#u)@$ii~S4idfqb&BC zZHesLe~xZFeP1pLFN(mLC%(WS`hzTV@T-UWomB2SY#7Fa3kl7gD=R)*MM`!WdX;y* zK_bmgX{jOr{Cg{GDDESvEP0@1g(p>0!M7`s*$0>_6kbPb5;5tJqLu%taFC7pDkufE zhbk3vip2B0KY>5DfTZG~b1OC=v)2%;zqsFI3P$M?bBeAOQLyEX*|W!$mXi=u^45UW zF@h>6JUZh_;xhs|Y2YI8Fz}wg)Mt8aW=8qAGveF`(?u{TH7)} z3N1Dw1`yL49D&j=J^xQyGS>%{;3O|4SjqygHJKULq=WhbIfrar;KJo=-Xc&}AY1mi zb4>>c?!!kqGi9NwMl+mCk>z}Qz?%ipB9x~Fa*6Q=)3JRebny){qS3u4JN+2S+@A@t zz6$4s)UIp4e9chHE0W;~H%j%XoDm=H_j+*GCW+a=T=?vwvV}>;v$H;o;t&_|fN zkjIG~Yx-!qltUtbeA~RH5MqM=#O;zsQ(~_AA+f9%9Mn?_AY^aUN^phAd?{KwS<$r(aS zwjnB*4fEoincnsOM#b~%sis*m0R4-y*=@~WaHydOdO7_4(s%EC7^KKYW|mnXhd*np951G`F=&pgMbun; zaT9QH5q9<#GGXd+g1|U3R?9YU4cA>En|95Hba8$&!G(xfFZy%#h>Cg_=wT-TPQRao zP#~g=-?W!W9{hpvlqV8^TWx50j-|LI=K!uW9-;3ycalDF%6*BIgeI2_CZ{m|aKQ9|0jC|gD6WK2EJxzK(!;jc3qo41X|sFzc{?f$UJ31E zGSM&(lZc7X_8>Xaf$oB)S*j^qA~f^NXjJazlgF!s@vOY4kl1vywuJ%$RA5V&Pl!Iz zOnW6r2NQMcsp}#dsL6!^sm9uVMGJJvFrWm__r! zJx!P1d;&?D#kN3*#MQp0Y|RVt=r02-&THh*1NM*pF1&bv6Hjg1A#|=mgP5`NC;(Si zK`dB%Fd-KlQb8tCg7Z*6p9DyQq|9ucg(bjIU9{G3sdhaB_1a{iRgNiv-|c|ua-BEX z19TK{X7UNwwoUW%6(#I|J&-_fxf*P54xx7s3wX@lPdNGmM zIl`r|t6-~+ER~k}M6NyFCUwVgE)p9+HbVpmX8ToUN~hMjSH9tFpzkM#j4gD)3m5`> zxDJ|vrQ*5d@6BCy2tP%;V*9*MPkSR;_#K=F!~l%G@`ul`-T9-{Nu@AJ{GU26pC*Za z9ZTLw|Gv`P2>j!hoh^__w4=H=?q-g$fI4gUT=1PRD0)>~k{&Q_PuKN7{#m_KC_r}Z zwA>|n&Z;yx`o{2*b1*L?DgA?yt*n9v`Q_(%a5{uel))uou-;cc>G9iW;#~}S$TaX6ni!)nAeVi&(y!SLH8j* zc5OEzotTF5%Cfc}Wh}?|ToTG0ci-^QbTH~9Omp7UP}esa+>Usy-M+KlWc1(cRp7LT z{p+*%s-ymQM56PBsMQDibAcgGg0hOFv!8@k{h1AYvKKCL%rI#}*5Fk>3qo%HXl^1- zn+i?jZ+e*4hQFyrggDsm&F)NTum1Rv2RcQX#r22!nr^+n>{02Hy1V#tM4dXeoCH5s z23g*yZ)_yWagKOs|I$Y3V|^2Q7a}f7BUV#DudN0aOWyp=XX5@GcSg8 zeLD=b_v$SjZ8dExbUYx_?3W?C$~OO+9)A8I`B}1vItq*e)-9+%01(GugyDI-XGSB4 zP*06rcTkgC)0ZZ_TtrlCk03}V2?-=f3!#JbCQ=M0kWdmxLPw+tL1}`5A_z7RX(Gs_ zDIi|J5fqRjRgfwoMJ0526VUrvzIWz*W-{mO@9ge5yZhVoPo7ECPPxa9IIHr%b(9{R z%R7pQ8_if}f>l#p)`e8?$^r+j!Lld zldFc4g`!|W%30(S!6*02i#3luSWF$sH((Eg5JSr+CA})tUtC0nKgrQ~eoci#3441oH z`=TdRx(^HSRNEJw?ly-C9)Q18c^B^xt9LE@tSZ&Q#ev_#jw_kNA*sB=L?b7-G3O@p zI;K+4{wY#yp*1r(t7vL}oLxdV=YABblgProksFaq)}qIQ{sgCr{TU11WRi$+MzD zwe!bTB**M^4@)6ik{q*`AkwHiADMP%e&U`My6gW}q@sU`X=f7>cCjdI@xI+MS&&$E zhpoX$ZH(;$pX&Q&rP8-i88paL$C0PmXCSv{bx-KVKsNQ`{N52l*cK5Ou?O@OA?4y|F-Mfqq@l8y3 zj()lKDX?8q?GZ9t(BX}E0ybjq2=yt;giJ|`#<74VUMPG0-_E-Q<8;el$Xgr+)EM^lXCI#P*03~n1Z$6poaifl zG#TMi^2Uda3d-8wp=@oke%w)qLv_mR2$S$bnd9l31v3IEZ>anQA1@ggIY;xVp1IX! z>>q>A1*VYaf}Q8{sD| zanQOs*?^pvhBahow^`ggHg~3^wnO;MJ`jQ+Iy0Gj_7ZeFU{> zc@gwU_v6~~m&YQxjr>xwdgB+CO~V5oN#1BVWl%9~_ClhCwC7_0zsCV;utaUH0`V+^Q{KFQ}9)nvo0cg!z(Frk4=fz#>5+*vXZ0)x| zxEd|-%v@py+}+JQPu0^7S7ebI{{HJg0ZL)KO9unS3tT&(ux36JraC)M$`0vsV84%EPtS4K%$Qz)@5j|ea&|i4?A`d&=P)k?5=w3NQe>;ktm*?RZSEK1 z`eVF(@Z+&@%)4cL3mP}`0x&NV)^g6W8~_c0+s%iqk*WC4V=8;ShYbim?l4ME@r20G zr<;6hAn~Ls)IUtDtscjF9u5pK5BIiR*j;jU#oS!_-ucn#?yZmwVo zu``j+1|YcE!XGEQ*AA6;hRlUal>7L$&_OY0uMVI$?aVOR~gf>WEOSol|fDS=C-F zxHQ4l)f$JGHV*ylweS4{TcNlR_9)E~p|PTeZugL;^Z3HzGVhO8wm-@~8Q#X7=b^DzUduE4P{vlQzoZbGdFP;hjbqu+p-s!W zbJy!-cgvJ$3?B_1an4R?b?UW*kj=uX^7J%qE2E|TL=uzjU%zY)<-MO`-hMd+_D*7I zYUGKaSWQ!ELwu9irTE?xg?m=EpcO$PS?u4eD|u_a>9rk|yXx?~8Is_!i&$S+(o^)+ zXk)G_*P!**dJY>Vi_nhAFWg;C=yS&e8SW}~TDRUuxX&YST^M&~WOFGwbo@$KzA1?d zOTdsRBrMvCF(bOTbHM=+z!(7lfGQ!N;2${RJFcnUc2ZWJ5<2GKDKhNglp-jjkQ z(ML}-1*?rcN5o*c)YZAZyAH^3Ol~Daf_TE_RCN*tZ%1fy7e*!J%sxkJi1f2Wy})15 zr829{(=K~;8k`J!1~30AWfFvUwpW%F-QY->J^PoRw?v)lVC3%h24C;kmWv0zJuaLo z)*DZO3j2Smse7k#QruUr<4pxZ0ll}{O>uGlD#x*orLzFfthC?zv;@@P;M&ps^>U&{ z>AL$Ll?>*8A-c0(u~>Kp^SNNxeO`9$)v~=BHKRNE+BeKunjy+;mdhX$8~>otOVE4b zy;JB!^3e26F0gfDr>x8?36hD!;Vaw;9%C#`Z|MIGd{;*vk*isBT`JUN_n4yMqSe4; z*Z?`hPSFU{;F%O&Vb+{#hrNPr^qGfcrL6Xm52^5CPw#e9>Av27(0TkL>AB%UxQ2-cd$HJ0=^|Arr|0n4G5E#d& zGY<)yfJpg0$cM*cv zcKB-!f$c2sG=(i5L&1^h>7hpgC@Ly$j}T?}_Q=4XQ0Oo7A8QO;nXbo>Au!O6Jsk0` z?hjuKA3uM7*#E$`YXB%%5v>V$MoR*WCbnQS=J!1P$p|yF8HuSRO792PAP6inoe_!ZzkcLVM%z=g9IzlADy& z_BLiDBF6U|{qFdmt$sSb;!nHpFdBuXGmhxCyS1u3$hH`=aeE=mG zOc4gS0y`KCf>5T%&3FL9cMJrDA)vowAP|^NwcquW>0ADS!5~U-`d#@827)Ss=*RyJ zLx6tw2LmJi^an;D{?LO!A@pMW;tvLaLjTZH1VR5epCVX^-a&Smq$!t$2eJ?^`=SY4g1?JaL9D#_ncQkRv##(B%! z;@K0|=PtJ#U7tL+cJaK9o(9Q)aIRg$zJ`T^jg5_qi*p^HlmH(O51;BL2_Y#RHJF}` znwIv~9d4Few>cSUX<3EXIPc!$=jR8rh)4+YigWYv^Zqgj1}-ixJ{~?L0Rbg111$sZ zzx_bBfrxRiVsY^>F>Zsd5My8xW1u@hU;s`mj9(w1zdkUoU|z+#hK+-J9S=C6nh10S z0~7PgRZJ|bt5<=ueSz;mSBbH1GTf8CMxyZq`?ez~@2luc9LD?Q?PQw6dw2MrI{D*X zC#RsKqGn=dVP#|I7Z4N@77>+sAS)-Yps1v!t)r{=NZ-H=K$fMIwT-ijtDC!rr&qx1 zz@XqaA)zs`@8aSU-X|tyW#{DPob-D;(1M(V6A#xQu+7dt^_YhOd*~;a_Az`~upqko{|b`Trk5 z_P+u9J6uyBd`t{r@i2)&5YWjvYu3x_f5gN8ct;G6ChSwKr}~4Dmf_fHIaLUIT1Arz z8gx6a91SwzgMPdAQp5_y^=)6uUc;%a;Z9^85sa0X^2~u!ihk z11^Ahph1VkYE8|CjP}%QIR|Tx+bk4ctio9$Q@4Kf=+J#%gS;Y*F5uXWPaxXhL8 zg_LhV`9$YW4bIgrF^+bTSJ0sE1!zzpEC&tx0yaT|+IUcX_3RH^U2cR^Ju)Y9!pXi+ z%HPjG_2eQ4wptf$Hz0@D@YX#%G^ipQMcxvN21)sPqd}n73-*;l+%^`fV^XNKS_R>E zFVlg~jX#|M`Qdt)cm;f+rDBXbySDC1xBl%I-i$_GkOGJ#t&S|$;ci_`sH?p6ZS3pI z_`H1nV$=?Ii|Ll$MC!;Hs-#B!7Ps2akMI%bH$oV6fdUP(Fbd<%qHw!j#H%O_(i}mJ z6e0^+f1OS3e>$5o7WHOh(LUw;P-~rg?*fJDKViN)9rx9G6s#2ACz6?ng1?CW)_d*C zfX8RGiKGGc=BzjII&xxzsVD#0T;fPutMb26Tt+FMB~GA0KTK5qw-@U4Ru;wKw~-60 zt7*Z{sSO}iMNsy?VDX>KTdA^I0#+{v;MAq=>she!{;+Q@8xIs_R~&xb*(%j zWSi9EH59j*{~6Ib3MCYq+jYqDr}^~b?8mx|IQM(;&?dp0OJr4S(jBpLmV2S^3?J5D1rQ z1rrV0`lJ*?ugOCL)~v=MPS&hY(4WU22L~J+o%8$bw&0^> zA=FD--Jka5%RqcbSn~%oOwJdLgYGm1nnCy2*ILqM_KO^5SK+KaIz>J^2IcTDr|3Is7ZjSC7^u z&K*C`6=oV$e~xm`<;P`CtiQ?j38%M@H2kH)t?MH`q4wO<{rjT`-n=4F$0P?Ulnojb zJZEWaL$S>gZbyA=%H2IqKkHvqK|*qGg;3+DGz#xN;?zOavMyfzIGRik@Aa8HCI~zv z!T!#Vw#B>I%P_gvx1j`&m^ibKn96A1%gNX4dTq|xIdg=aLy6Jk{kON6+=UVhhRpZYfV%6AGS*)?|IX7`Tx>nq+87xG8u}-)OZz^+__9bA*j1PQCZur*=pXgt`_h5@wmTAvm!HAF5)F z&E{Kg%p@ezHaqDW6hPqWx)lg?pF z>nrhUJHgTBs(J6$g9r2sbrXli*ji()Rl}2>ayS!MS2#cDQi;rn>~5$cVDF!P@yW`P zDVDDK&^t63exWmeb0cADJt%KG(o{fvWJ`*k|M&bGx_BPx&;=4#B>CD>qbw9r&D#^{Wn zrrAB*fFIp+)eJsS{B^V8T?;MZ&aT&*H(v5Pe>E;%ase!9nirnOr8%RP4HLJRgKu5+du8R@dDH>0mV;0m3Mvg6wX+iv92qY72BQTLuY z_k*sm$z@F)Pt3cY&E#^PxSuEZ=foVFdnbF|d7GheaCU2_{i^v6WBw=^%)4+F*zV9E z^QqjGJQ~+{{ZeH5-Iy1U38Ll@VY(irG8Fw-mKve5gFs0FwNjP2V!g)rTecPR^(%Q} z{Kk40PYEoQ>#Ggh38NiJ+h1#zIf3yC2TDovWNEEduOD{3lq2A!`Ba5CrYTtG&2+N(nqp3L*K-3| zcwfC$FwAr`LpxBO-q@g`b{@WJR$N3q+j*QscQjsEq;i#zJ>Iy3J?9X~W@<396rG)* zRiQ`}XJI`?B(E;#QPdDP*ixNqvT<&={qBThy5X_vjV~Sc%e5@wk>jtQj>X!_6gjJM z$!DE z`$2DJMUHdiOsx8*=(SwW1*@t%7wj{f8V^2O*rhXQMenbtR@g zr~Tz_2m8zErCQZrT!s0K{RUflEynWG9`(k3XnEXC_-X3(Lb_Td8g$i8+h*#r4CO|l zF|kXbxVL(E zPg)JHW@~D=w|+%Z*UNQijBi&rUf$fa+O;Q#Z|lbL;+)`bPqlko%=R?PJC~SRZ)a09 zUp#(O3r@i3h_4oiTK0d8HU24CsNe)-B#Y)+>2_)7}lErkiYeU3)dz|1Q+KUd%mL zk7N^~l9=c}tk=*A{W`US1_^%GLJ%G&9GeDXoz+qYA6H?7d$Og}5bh6JKQh@QpW{iE zZ*00S;xXsWcBamhX|*;HjVh)798qX0o6D)$&KaLj=*}^)DnvHam%-My9`(?DOB1ll z=D}1EkHZ`)eZ#&t`gG5qoLJI0Zcn818*y%r8O7>w!y1s)^^L|XALmbz2(?S73>uV! zfD8Tf!9=|S!Zd#~oDaIdV>+A`CQGVU<+Gq6OA7k)_~YO}S1+V>MSB9W)$MQznkcpK zNJh!3n4y{g1@sF6$O8p9C2rY*&rhPzAZaqE!j0d0Ui3ItR}p;MnR^*|%G84-a;yFy z+NZkI)+gKt-&wl2^hR$=-h zpP29RRlzQ%Sp{mP*2mlS6-E|i>P-y9DZLryaZ@)u3NjJoaUwOu#`cC;3AcZIw^`oEmF%|3^e11Y7{%l_ z(dI7gi>fc2N6T~jw!I0{gsS^Vb-cqSxeWN@dOa0!LUR+#;h-xXB@%ch2V6%~u6PxF zNy$@&@lp746gJzKclNz4O9fO_j7iwb!{6zTXgsP-W3H$Tnp0@{gxqSk4veUXN`HU1 zTN+c`jJb4X%fL0IqDP`vH`p@Ct(YnjYW<+8x3h>r?(I7Xn(Me%vPDtxSmhKp^EUX8 zCv$7JXGs`zZOr+mwB-v(|rPkK<5uHnw$Y_XMp&ZB~61?lcchRUY+c-^?ec_IB)BiTrnYW^_mI=>oe#1-h!=-y znM$BqCM>e_248q;*2hrRLe}3J&KxztpNWPOs~L z#M(ON8@9>{J&sEGLwZgBh@RrV!Q6)sLWU;wTzU)wCN}kW35*2VTUY{xhtHp5WDtT! zpeqDsfZrH_27S9-rX#_ElA3@V?SfJM?5jRaEzG`ElB9rqQIwV?Rmb@2fz8F8?B{^g zx&5_XdCO|KR%~?LBH|qyL_r%3C(}P1w5Xa^2A4l&$ni}?uyb%;I35r=X??*DPqVGa zs?}Nj3RUXi>ySP2g;F@jRE@eaHS86CdUBgP-S&RwCQP*PX z^}!NaHS0pI&i*y|kGNEosWW>Wp*yVETQ|2aZBb8-9=Suuo0_8g1S9Tv4IZdv=x=Hm zD=~$xneyCQm13U`dV#Y{wPlRY8Zi*uJ9sT-2@F&eCI8pWuYLy@9P6?~H|N zO0mse!W%K@MykU;$nhxg$1nGO$zfeI7hsh#m7@MikRCjU1(_ zxQ1ETVRrCq_$J24N-5l`K{*+YikpozQnR;|b6arSmMpxXN7D>lIP*+Q-+Q~kwm)N| ztNyS_T+^tho~9wqE3_|aJKDp4U3c_$nD&R(2%;(>9Q@~rv^QVKv*deivX+dEIat(} z6!z&hPS+j+Zi)2j@|Mw~i{#x(&Mzj?)D`#p@VOH~1f3Qyyv~nHrd6(>h73l^{5~59 zG&I(yo0Tyf=v~jgLlVjNYRfo|OsOhi_4HA2P*`nj>oRm;F?-lG+54*i<@3rrq9rGa z_WKzJSQ?}>0ga%k^#RHk`h^PA5uAIjk9D< zR+fsSlI2f^pMQVxDbuEx<`M^)w7CiL>~rswyMZeq-vXiDvC7KJm6zzxmE+%ioE6H! zr=Atc3i{ho7Q5(x^z+C^wPvD0BOY7HKY#G09~ycI(e~7D-J^hg8-No>4*_+ys1cVuCqzfvv(Av9Ccq-cj#LuLR`Ou(ifRj7| z2#Dd-Mznsz%Y!a_L4y!QN9U~PafGYLM^Njht;u_)E1uXtj|&9GUK@7LA7YI{_N<_% z<6f7fzDmLSC{9RKx#{33&%Ss7ir9e$`i0^l8q^N3#xmgk=UR3+zp{~j$`3)>H2Z~0 z=E|Hw$-z^PU~5Qrmr}9o&EV;eS5axk3qjAjRJT^po|1=5QU#G_@mfo4$~lX|HU9XAjFpLK{51wA1;? z662m5+YGN3?6d$1jPCl~Kh5$#j#>V1tTjg8PXfUyjTsG!9%?r|x^xCSKe2jxQdp!K ze;$A0;QyEgX!8Ltk@KG3CDaHF+PE`54rg0(IHN_9p+Tl_$ho3`tIMBW!#_Qz{{W;GFI0%9FbeyydHw2!x|$V!kag;kJQG`C$ETp>#@luWZ_t< z4`dJGLI4}+n;2mG7(tiik#|sFz<$~R?59_bfNi7&*ci5eWsvWn;0ALU5AlDSfL162s-sk*rVMqC~nEu&LU&Hsq`K37-KetzCoI z3UCzDHws;Y+%V}PGXG)CcbgGDi08yP;z^uQr4EPHm zi!6I+&?PqD8o-M}+NA$Lct0%Wwoky+Fzz!dl(iJ+&p6H>tO8)gM-Om4x&!{Pt#d%qC^D5b)Pn)r zp?7wn^&ACo`-q~o@-yKb7k&*{C%XSkDZ!@8KAnRSZM@iQkV_!z3Yh0AUi%)BEptnT zQI64myh(LVQn+DTNtm>ZzZj9^NZ{}u0#mEuDUdpLFjZwUzT-F zDU(%&D;$WoZPi+>tc#zU!?hM&(;%Ue0!{IK=NyLZmdBnsRdF{>xo^Y#^=QFE)z%iJ zZ}^)SN;ghw<`|Ob1nIaqT@ByZ_to9jS7(dSd?C8tnkz39;czX0;tiJz?oL$*otvj7 ziPP}Cwzv0*R_JdCVFtutJvo(EY#8H*fUTXUP4oqei%gqt8ciz_S*@xUEx(`wmntp{ z5!s%*1zRpA@8WKT6+P;#DRf+YWwn`YQ=cH*4f@RMu`JTXabs#><1E=?2*W}ehKRnEu2r?KCm^vFM7&@YPE+R z#@N*PC?r02bJ37>dH<)E;t%GfagyW!JXMYD(4$L0cg~4{8yiK1qCqS6d!~S=0lIOg zDz}o>U6h(NtNzBzzXj0#)7K9^Doa9pTh^476u1R8cPB~KzTZo{foX<)>oKCoW-bpy zR;OaD1@V@&@1M|vXxYSaW6dFxrs;M1y@Zp8HurzZ0!pv(~oT278=U{9ad_ zupME&59`oP&9ZDYmg8939oQ5iQxrK1`u;?XO7f`G%$o}qAg*yOS-jB9V#!@z`BvE% zeTVSY_}6!*cxN7m+f#Piel5s;FQO(qQYy|!9t<-kczU8a%w5`HD%q((gb`3BP5q>I ztBRy;==t$U6u%!`oiCHSYE8Hu>4{zXt@U|2+xwyMJ!YL2O~boQA`8z#vVEQC>_1iC zTE1kub2af{_AW*pN&Ipy#BjVM8&1A7S)Ehk#}jD3k{YJ6nnu?Tq_$qAWk0;-x@`iN z)w^4-Mq3`MHi9HZ5ad@TzBZWcP{hjiJ8)gW!Oy_Y{Vb+XYHU;YU5F7-eIowOq4& z=8NfmZ;PWOsr1iv#)h3!lC{fGxQ5oXh0b?`5(t>Pgh5jSqR|1T+NzF9gPRW5$t>tS zy6Q@2t1dpzRb(T`6P&#!)Q$OL@q((Dg^i=1X>+T$%x1hK)Y^uJj>AOM2(|?0I8{vL zD(gq7eKn>v1J@(A?K5WPTeN5NEi9%dom3s?uLgAvBV}B{K|UIeg{S_8YUNu8Q?gLb zD`uot-^kovMat>r>mHSLqd}cha_>*@T;24(7nQ#FMAG{KbJSD)zSlvd$cBl#@1g^- zkq36yxq;RGWu+$EHRFlcovW4*>DF}EG>V+0r{)+N5y02 zddcb6XADmhbZzIctql%^4#M8>tPus^vKE=WSool>$7sJp;40rWM_JB1vfk;%DMaMA z`Dk#(x;(*OFurdg+^m|pkw9`KDL9N@g-63qa&s?iJa|YD@o`_|;j6brIcd$<5^83u zwFuo1PfJTzYrk6Jxod(;SFP!#TD@^HYUxgwo1JQ=BJ#2kWpi6#0^~>E(oXwGRogZ0RWx1|C7IvIFi8Znc8<6f#R=^dE#&I$n3ImT{7(RPEKX)GGE! zR*CHpG6*bd;A9N42vyF0Zf->@+$U~sPIB@>it)v9A#D}hA-UHfoVDPAN&NjiVLdLwJTf8bTyPuC7ufC>Sn5Eh{ zS!N<<+>gO1g82!3`4fCW{dmsaExb(Z^)t^WaJp|5c$L|~T@!jHJi$`6icHonXi#lf z;C(aJM_BEcSBV?bQZPOV7wh`53?JdsDCflsM(XS81R`r6O>)HfHmc&%5lR%t?!kgx zQ~5d@$c|b(isHZ&OUih-O;ylbgt~PJT)j4l^(DxZ_jfE2TdFdm~!1p?7z7zR?F%}30lWY^Vs3!Tpq33 zGNg1~HGJ?k&ud#9gwmdUWwdrkXQ|PEfQ40kT07W29JM{xo+^@0BzB_$S*D2`mR#!Y zb{Sl&%^2b?B|D@}J=dSH{y47l6h9{+qG#Yf)}0*|#*s1)Z`HAFardQd)pgY*4i(u_ zm8u|x&u9=gS47)Wc?mw9#a6_f^WLWVJso%oZs?r8Lupf^)#O4AW^4?IQ;H>=(0xqK zBzGyDm_Xj(Lrrwn#6nAi%i!IQX~whV9Eo_ub>W_YZyr-%A`WdUUvM!AhDJD6z3EJM zcYo^JY&+;Wk8Rr!y9H{q7-t&4-7r$^$ot9?XL{dt=E_SXNQQjKL_XJ8`3`wZKSAS6 z-y3}fIrS0xHu78e7txB7#N9paOA-`44Z3me#RMiDT0!ovw%Q@v4>n?+fd6ETFf6Rf$q`3|nAI&K&wS?q>Ds^~Y@+beUw zZ`CL#!b*?j!eSWJO9rlaY_`t zSjD}|>J+J2Wow)ATX)#rWr0K__@X9aYo+u=9to)EPA@SZ(SHD~3 zH41_l3h_tWKs}bbHb^@2|ddJBxa1Yc1f7Kp3dt&)AQmLFKaF-ej#P zJo^f#>c&=ce&|cTYlMgohRsa2q$Fwcy^Iy*uV^ZwaOfQnEco=+EN|0vuV!9f z)F)d-^lk+Oy}1W3ZrJ7Qba?=r^pHFkOgZNPlO9d(ByY%h$XFL~E$Y~uja~c|nat%S z8gy3#sB0vfo}2B+pENR=5uD^&vNm^+)rf&I7)546mvE~qIZnBc_oqhgKSy{!FEXS& zP~f!`PqEC9%#Tda7@dB!cQlvk@<7GYYEMahh0TTCaVvbpOrCL(ki4U8#$aZ%_Q23& zlOgxR=zx>#%}!C!3RdFrLYgXH8)dOqdWZwOXO438`K+FDbNr6T`vPV&Ql>hVte_so z80l>|HxzgCd?H}c?&dxf|H<%*j0BQ|Ss{`+obrWY|IPe0jd1sGDr#*Ftw^Rc^0ZOI z@w|%i3#oWF!J>)Kr&F1v5BTbk`K!+fRjUn*QWdiif>W_h9jR z_q!CFBc|Uo()!u)O9C2P5B&eKGl4{+4-3-GFRc;t)z2r0`>$L zqWw6&F}u!lvAXGiqF;g8sk83$b{}03NTh8&O)Lyg_;6cvqMP({#&ps-yAB?w9)_mmEoTr@7fxd35X} zXpnYt#dO=R^Dr4j(Vm*Bbr1;H(?aR`srXL835R9zb5nSaUhD43kxDy#*jSwhH=@VQE$?)vRme zL4JuY?gw99K~x6ZTb8~Z5>k8ViG7@siM;L@+_$?M_nCKS(LHE;exN>d>D(lD=NSvj zx4NNab=xG>)Lai(d&m00 zXSI^>cuihe*4cD=jtLFQBmv4E+qTqDV)8$Y|B1VOAcA@CIBAZ78WCi=%|Q6I^W&0~ zc+4_u5|72;a&X^`6&0VVW1b$rdDyV};0_wF2umYH+ zkkf72nD4Z`{}c~EiJ3?=a8KRR9`*}%Vp%fX*O0{={- zcN(#IMGZKTeq39~Ph=RW=yt>0rMllEhO$iDbm5z22vf+ey$oZ41>{@B(Y2f(ug;Gm zww1iQw>sDpxKp(yhPxhbz1CR!0#o`nh>SM2|HimRKudwDm(`#x&|e~re2mvrDUt%x zW&h9vi4D%RfPQRj9fJ@vYSpJLKwnpy>8#S*sQF!y~e()R;CJh2=|r-`Q!8oNcJf& z#-44O!oO*w1aMk~=XmxSkT<+l5%3^aaU20}S7#&%n00-0ts8oMUS6L_^sa@t{|)I9 zWBK+ux^Z6n!!XUB;_fsqkMm-w)|GkPS3qXM)tf#lPG=@JhX$SWtweRd%gHL6jbPI#G%%`KT0zDQ_Hn#|5c3G!*Xl5# zX=xd{;u)MkiSz^~l0waqQWIyb6I?DHG$p#W^*um%`>U?xEaa8YrMm>-_oBCOV&Tyv zFCZ0*eRogHaPa<+YS>_NqyH3SSLK%UIh?~60qL07_Y|~qEcom}Jk_bHHnt22vaW&R z^9!#Y98qx}DF^gRjP+c-yav&RLRhzmBqMt|V}$;?I(!H73!w&pnNCjAb{Im*?R+mI8D zo22i)V*{SXCqKP*eb^Wb_) zY}i42FEWHW5Oa@=i!8^w8+^U|lPQ@=W3zR=S7RWlhORU4^bZn__2^)yJ7R`I-&3#6 zY&LYzWeJ}tL=_WnY@9`Fh;(1y7ri@aP?xx`sfUc&nfpXX4lA`jCyO=S6k@w7ljl`_ zs4yFmK4 zCK_J?Z7^SoV(2@4hIM6a!`Iv|o=ajYk*BJyL9YXedETu;`*P2M7q|I3YVN~TYafkW zo`^REk4HgQ4c=)O=46#bapx+I$EM6qZ+;AA&6E0Tjy}dmpy*280Vv2BYyB*`v-KKL zi;SEDMuZhYQ(R{!_)#L~uut@9e3x(n6cu#C36L2z;%$EoV`9Y0kPYzQBXY`xr+T;E zc6!39aCc#dkMrI&MygV4zF{FjE0}gY<*m2Urg4R;Bh%p z8~1BW=?QAGdjhMkjHB++jtb#iU#t+_9x`3uHkNRnW7=~|T++VzFmLV*GH%?_QAo$P z3F+-}u0b(RAZcF2({qNDmEca_D8%79oRff>o-~YkD{778&7wgATOLZn)q?>e#*a?` z$DygI%B7l2R%TUWZA*3J+Jx1bzx2Bp!QR_>r6=NfDaL7wK)&s*-wV)YgojhJX5dRG zx3|!ZSJ)pWKV2^d9CUuR0Y(Qt0vDJRK<3#S0Xl((@AJ(NhA6ne+_1hTuicX++lL%IHy@ylbbE86fD_5AwSZf>RUHH=46IMYZ$8n=E1v+F16; zW=EMqMY8YpQ};%+>r!Fw6&`Cvb?~&Bo-((ZWNMurQk3|@&wKXxT!wCbOX|i7%D3Z7 ziw*{~{&^Pc!%~CQYDlmj<=FmUK37#R%D^C4x;)Dpmj$W=Q{;;rzdAyZ^Jqq5to`V{GUSob@lDx|4!oR91|j z=twf83|wqV7bZ5~d9QdlG<#`dA>3}F+I@I;y!_^BHU-N{WLfmA23{kUI?#9X3P3>$ z;tnuG_BV|Ux9yQNBywIoA4wo0NoTYf$%KHs;c>n8+2>7i{~K`rDTcncC6B)ll6d+u z{krL{fVNLU6Bx2+kKgc!>D-5RB$vD83Oa$|6>6`Awd9)fW-`^Xq4s0;m zB&dy2Z}LknE*NRLmu2-5YPnIf2jpkPC%Z!~51pU}6WK92Eo_FP$c`;xc1U~fzy)9% zZx{9NK3{+?UV~SsCwHc?)Ok$r8tM~-oGoCYK|T|62@7fj8#D&<%0Y_Q$g1>{h;-BB zSEo-KMA@_N(-vI@r2e&+S?0OR(V!W7X^VkZYdd`^=hY`zhU8~Yk4d6b?_XVX0j7H6UmY|s4=qZ%o`0zzsAotXM==-^A-F@hU(#$U#$Ki%SUr{KT)b(p1 z%$ID|A7j*#3@XJ{C3P6STXQS2*q?WK+-MFp(!8s3`~OyILHyc{^W^c_-~+k;2&?-D@H~Emyj7rq*RAZDQjPXH|n`aH4JE9{WEc= z-;f@B81Y15jDO49-(&8Z5MlNW`E>(rO>2Q?M(nC7^%+xs^0-1BV%~}dP(oXZ^N0DV zg$ynbjki!<>vZeWU?5o^R$U3VODKNerW}#_lA7phNwcx3%4biHwuMo;G7OF-f3In=CHMEP$j=P z4n^xNlsE3GPELJ+fCkhR*vc0~mG{I#*{kETEefTW%Nv8%cy=EwzC;!cK2JT8PiC!a z(dW1eFc#6jQF{0@)&BYa|D!cv8e>PPLUIA0#~hy2sLq)^uKnT)31lA8v$%gV<=x}5 z`KL!@?aGLo%i7VUNVd#b8}1vD!|dNf)LPLXqf$N;F__dNYz@KPG;zE=(bus`dB~E# z-cWwLfdrJ(ggGx|&NvB2Ep2341wdrFt-cYLmOb?JUrx9tGHC`NtKr}#pK~H%a0pod zK#OW&J~BPQRfOf!Ldwlj%TemBiwzBQH0?>(0;@}^zBR1^o5TomH{B>LJ&40AQYq#% zwHY!XS?-^Sr1sfuItFf`xB0a#zhin^Fq5zl4oE$1v87TO0&GQ0Lzk z#i7J^z)OJWil=|BW-X4Ol%3^z$gxJ5gjjjkJl8LK!U3q495QeImYnm?U+W>nr)#5% zb=3B$cs2$dsfD(LVrmx;T+Ut3T?!O_r@JbR&vVFpi)zF7t#29iyVE4)yo05 zJr-T(v2%=>i|dPE9Ry)iYR0gx3X9C=uRJ-FF;8Y#(ICRk<6=<M{OL@aM)jj5%P5Vm!+ zVG=BBDQe?&wBz=~vHI>w=md{Pvlh@zVy#+am%yXvA%dN9$;`ngee3z!;#g$e5qM>( zfqG3`c_Ep=w`=9$glLXWk0&-k^r#!)e}{N=S0MZf7nAD_TdAb(-pFB-Hyxfy>Wp3Qlob_=@!=r(1XDP#rT-f%VPQZ#^IeF z`I@)eLtj!*4|IUW7(_l|CnM9H=d&Fzc+;syw;I111 zMRzTCfQSeoz+%5fL=2Mza^k7U(=7$(1@WwweDfKd3b4U(CK*@iO-~hl6oJSGJrH5} zHS*C=#0o0YL)|pJ3H=gW2ox zC5~76U*Z#+1^;u0ZK_-XcdAvMHE@RrK3nDHR=*t5(c0Sd_Vu#VEwPqQlDEgiFr?Gh z%xN}#&{#QK;_6MHw6udK_AW_T@v1Wx#I-yH3h5-Cs#9(ayZ!M{f7Zi(n?>Btu+j5{ zFJb(4z_y9;Vm!$f(Z=Z9?9^A2-f|-v_x4*U_{WSat)_3#kdkaju32gLjRVKZc<5=)93m+u$vdrvvh-LD?u%+Zrg87AO23r^HSmW1t zPUY==`X6^&2u*a@-ymdav-h&&()D9H9l9WI+YkgIot+<7{oJ$$?xmpyy#4&*R=psB zJ~B{oXZ>%L4_^fkiU@Y2I%_b%5srFYN_^^hu$l94*zH_H0!}$@qY$KM?g4zq6lylQ z3Ky4Nt~Y>p6r9^frB-b6hKmQ(Nuy}bqeqe#dmWH`_L}{R$9bc5m)ipg6VK$4K6BU~ zU{V#D@q(KZ8_z!q46sT+`YidahmuUf;kvHR$T%8gNH57kyGn6CbS3|*>p|?+nVYl` z>}M%stV3459_Z8xY$dU%*Ai-fjpn&(t7H7$$)q4X4nS$-34wf*83Q(;UxUGL9a#`r zAD}@fFR7gq9YOvY6CMY=#Q)B~tUk(1523xmg;5uNmA?d)$A_iB5Ra&?sXYWOqxyxI z@dm{Wh9~61Dd8!WZ{0!5#1K|jl|rxA69cLF_hf@U3VTfn zCSs!oUtb73Ew98F9(&n?I-7!GnzTAxb*mXR{FwPPHHhc^tVJj7#biV0xWK`kj za!Tuk7pOj55sq&)HCyW(mz{CH2b^(F|Gx#^e;eLtyi05iB68wu(+PRj{iTBiM!%=cYSkX8;p51m3azfP_$@JvkrU-X?OL2kkHuf zW#HTD84iDsi`;81PqO;e|2x*?ziqMq!N6+`bLrg8^<^k(8$3W!jo+Ks&t4yk=7B`ojuaHW zGh?#mNE(LezOLU;>^igZM1zowbNBX!1~WFdiVS#^#de#pGoGVCI_EluQK2W)Mq#!^ zrzXcVA77{|h33ytOx-(032f*XT&Pu!ZAUa8mFN+rFT`OIqRLB}-(@3(_Pw%Ow+@Se ze7$6p)dgU7vK*M+qtUD|7F2m))Ol=86`d!C-zfD-gw8gh9FbOCs4wejxsWPn>#64? zI&bhe>f)kjq>krJ5?Ao+DKTH_wSBA>P0Q+=vE*%X5<`;p8~XIPh-!GCgLu7=N?zZ5 zbFg?JY9Htr4CxK0=Znd|Gah9?D{>T0)47-?N%3 zQkiSt*~`nc1Es4MI-xtN=h>^d6y(cH&-{N^y(0SWlY9SQNq+ag%eMnoEKE=t5Ewlyk7>`Gkel+Ni;Zkg=Ll54#)FlAP=HIb#|KIfCW2Yk3%d5v=NDgc7D{2l*httb%Eey%`iKCCMe1I-u@3mfnN}#)6 z%958TkU>q1fZaU`ln3!wWBI7v)GJn_Aj9Ra*Bu&6Qg#xNzGsvPY z&w;KWgp1H~j7LDYDbw&!Z|Ks27`eM^Nl}&V_V~Bo$^YgevQ{Hlr;6sT(w;GL!j`wU zmt-j79Zs!inW};a!y8KMbft+vBNdzSTfR(oBc4L+}g6z zUR(tH&HRJLsxwWh7)E-9-s{s3lGX0uA0auO9+>#|n~qk6l_1hTaoG8_q&Q4Sg%4Ht&ZbcPBF$kCGk!$1Iw_OpPpbU%{Z0Rk7F`cEH zaeb?-6R0)Id~To5ChB10G#R`XaB7NrsS7#z_DfF@NFdOQ1}ScYl2B3&=nj}zMXb{zZ%Q78D+ViRVyZwxwxH-23YrYW-YP}zIDvjg;nxJriFAmv{K9}PQ0j9h^ZrM(! zpKaH*nJki*icdkGzVXHCWFu5lnWH=l3B{SBE$ZKN`}FGj?O7ZjZS1OOcMG_(`RWO5+{>gEV#L@;qV+F~OnG?Um2P$o>cSh@)316w!cV=?N$3&C^IP8i?C}vLabNBFXHM?S z`I2dVt%sqQnRk}`6^8m=v;jeeOx?KrkX)QkV_x&c8wa#0OR;NS?|-wLKM}jbuo0lS zcS6N3zS58Qetq>L8E8!SP%IWyN!(XBROSbmAue8==za>3ZD|qpIx_4$;nJ%g5Jc1s zj~Y-Tk{7eCW)X_BHlI&AAp7hD4@aiU4Y5{7klp+)l|QJ zi()}6Ac7#hNtKTDt|(0eq<2ty2m$GxD4-N60s=~v-U&6K2aqaFY9s+7MKD02yPhLP$Sh$p}VY{s)(An z7d+}ivP|5g$T;x$vHGt&9c%djo_QjE+GGhlNQC~#n|g&B6bEeMFuNZ<`v(#Q6CVJH zaMrPQ9;J-WliH-O~o#`J}=&PUVo-NfcH{&<- zHC=s4>z%b_3^7rw{V+2&uaZ9<0SS5L`H^MsLYK7b6{jX}R&WPAiB_(-kUecNDNmhO zoa}8%YvlQM|F5h>;|<&;y6nBQaS-FfJq+xCwntav^Wz8KVy<1UrvxIqaNy7@$nXlK zUG2&MK=4|}-mSy5q~INUR+}S@Qrm8e*Oy!1*RRXUb63q$TWN-y-$>9UipM|eKIMHV zbYOkzp%u-N>szT_D2KEWha7$dX=?iH9Fti5f+T^7gsZHaTg z@jTsWFe|jz9@toT&=5y&EBxUP$yU4=hD^rfXzFU$_ z;X1RHu_Gh48=`IpDEr4MI}0Ha@7&yb!^P~_GGM~yK4_dEbN~F3=b^{lFZAA9`_k)2dylz)=aa#zpUzCfJoSJ7@xLPMol5VvKJz89Cq@q9;$n1FHf1`LMec)I>1JT@eMtiH19BYk?W- zCoiR7wmrWP7oa--6WGmen)}6E!#E_T-Zi-Zrl@>$B-U+opIxCs*7y9mc^B=Nv*G+f zqL6BbdV@tV4r!j|cfrGH3_JbccQ|5RSAG@N{bfbd={BY9nn6vRUjt!v`d;__-OQ_n zhOION^O31;uX>IMTNoRCEjY2a~c5{C8CR1x@vV>C^xCyJvXLsqppYPLw)wLofH;6~acZFDubPuS_wW z8bqVXYtH|{u`T17Spz7a89@2IKye~^D6m+Z#Si0GsnY%v_?&QYdXl~z;!}@9^K*62 z?ua9Kp=^HJX1K1Q|7hr%KPcSyQyG`LyrUP~;#EH1WT8|Y_eo)7 zWdXC$3Zd7mo_EqCnA?O&%Wul>BTt)4k=u#fGlThDPi?rQrAwP0yF?YKXUqS3V}GgG zrH4G^rZE=Ev!dt}YgiavztLU_1fme`Hr#LmRe08T;FpqMB(}i2WrtFR3 z`o*31t@8F5hofykr#(={lkaD^{PL}VUAiHhZ5S`;v#(ZV{+YU>X6A$Kfmb(p*sC{c zf=pYMC7$RT&T+DWSP9Z=LTBx*pfBNZmBDQKVz21x=Q~M%sDmC+fA}nvX2da{-7tLO z#m$`!k`(o_0(LR}ikiOy2e%ox{fRw^B|F|i>Da?o`46oEe_-0j$0*SJ8|5IF3I@0u|#v@)rDSML$v z39rXy8Wc~3o`pU-4ssmjBcjqgSF{&YVS34=9JiM+1}=+8r}V5Sfp&Ou0a6#y5H#Bn zG-;yFv_{3sJfqspnnyyvKdli6G8x-LEPjLm%m_%Ro_W2KkM9jXfgy!S51xccC~jHFhSd%nv- zEFC(dK4OXxj{Sf5q=B?qS!u2Eh6Zxc`iX`v_ap+Id%dIL636#%e{`laR%8!F^EvZ< zL%vmnxNvhR+a6_~L-6}Kolg=ITa^Jw>oWkUzhck6As^QV!&Y~S`}H_R7NU8;2Yd;i zl=KB7>PWV% z4i%WPHcyJq@AcXX{~b&@k69RR4ak?TxJq`t<5Ac$Kvc(#QySFP*7pQaKR0Tj;fuRR zK%1iGg)l`2;#vqbI)N){nhva&(@3qjn=cielf>P;W~RNixPP5L z`%S`h#-f$o+3=C_Hz)}Ih8`^2VY-WL^>oByDc4AME5Vi#nv`a1;(Jvpv*1=yn(B-c z$rI^&!|t%K>6__oZP$FvXTKX|o07}*-f4wd!T6W}EPHU;Tn61vG~VGpS_yJZzr?Sx zy0#^2N}pQD$@v^6y`O0XF0|O!AKtx~KCOmBbTNO1^UbreE)n{&KZo8x&Wp}49iCoR{UJr^uU2efsfHuHIkwese@-d*nsR_$oQK=fBVT0|Deyc{wAQIf8?J!vmAw9 zx!(MzV9^iO)>jrgKVLLSGvH*Sf7?!cVKDdZpS5;b#<-ojx~^9|h-Y=I@%Xr(6;H3T z;QIquBmV2LiT|mo?U=sm1MXd!Lg_U4ixDs=ELOCsE3LpM{b3PB*CzkMV1I0Z9V>e26y9-yNCh5M$s&5_8Bw>W4L4h{Fc zl$$KblhY8l@|>-kV*%bUr;=(H#GV9Q=4fajJAhHpxrS++V-LH)`EfwqaWT^5vxOfH zX~s8NJhu8-{!H9fV~TLciTL%a_{{LW;p1YG<{1+qeKpDM$XkVN06vl zVkUbolO;WS8?q>gA7R`c>5^UsSSxH^9_tf#)BJMmz}fUfG1%3Sp0d3%z>S5 zdU9p|t&!kLU2$U!k;1$> znO3^T{{Fnm;swqvil)4~e-ThLtmVR|cd?pgguYGK>7TyO88!!x7?kc>+JjGTaxiKh zpdy;oO1%Cv+SHH4N|znURT(4(z?m6(hUQ%QA#E;yaqGdd?DS>;(rX&a3pPE?p4N0PyxkRp< zWt5#;9a&QGEr{Y>7rEWUwHOSImOnq0@Z~UiyS+D}x3q2yvg!>L=q?&Q+{FWI$z+ZhkbiV>j?QXLaXKK&ksTtG4{DLj3zNMOa(L zN*9Sleti2T5>eiBL0-r7(tlUsk;|iz`P`*k2g|U2Dk4da$+Xy^Uk{RsKiSMPltKm5 zZnt*pEJiJgNpCc6V5@h8BatqwTm4(-IZHW5vfyPyIe}Wl=t?6~@qIc0v83T?m%zYuuk_$>U_P^Ud>SgrB;L3Py=vnlwZ9T&NKuX=8u0jbrSxwWQRAizS#fx zgT&4=Nlr7pQt&<n#jTVSM{$r&b}S_mT6MQQ62Xe%tG^lOT zyNuC;r^$6ON6iN`&_uj$n+gAMfBt%nZ{7Fs_Ku1Ela)~ItffCudf1f1ItI%ZqNVQP zZMF4SknnS!OWm6r1cQ%L)2Fy||ER`?{%JX`=`DK-dh>j`M`?DeU-#r2Yl}0il=GJo zOKEeqbk|drd1fo%LC3{gLzj|5l4qY897laKHctBb{(zFOx*F~7yN1=>CYayemokUp z%7&4_!Q-x9@9?kYHANDm&~WK7A5&PBOd>hK{b-r;MW@R@u6r+!=LUD&a$SxE2|#=< zqAm|QvkgI1+crJ7(urhVYV0qI`+x#rz;J9Yt-zxO*5426Z!!S9Mt0wZaujFh<>73v zQkyvSq~swnV*05Dk=e(&WgS%*2$zS(=5!jCeSY(dy7zH^_f^@zCPBAI-J{1g@e7H~ zj;3RNl^MTWo!{I8E%W_mx`(4PV7hmsyd~(ZmEo03DqS0w^jQgZ7UeqghC~-@*PGyl zg`e2V#VXX;U{#K<-jVC_+(dn3W>ltx%;`!_rj;cgVd3>rN4kL8_!p-53tZ`bYWKty zV?c<+Bz>K!*JAw=>XixeFZ80XOVjI=?f4mSHk6yFuK9;BIov2ph$I8nKKFDtdja(ZL^x6ngr7|Z2uw{f|{-e{JRNa{S#1sC1aFLXA5-WjNi5RHodrc>0{G(^XTt> zNDX$@w7@^n$x3a8be8#h4WzPfv!LO5sU8jjlxkq!QRR2QhJX%1d%p05|& z5u!{nAIeDp-p2|Gy`rHxD+bJa|9{(2zq>lpPbx-gY}QL_N{3Y;cDwI;%WdrUAT5^d zi=53;`kuwb6ZIb#>UFBd zYb%(+K8P-kv%$jUOq14F*a3XK7@U8S>=idF{#FW)j;JN_G?6t@CfF$+>)w!fpEEh# z?ASkAP+xW7ECF+--qx$xASvxE6-yEkMw@qBt`!=fdcPIbnh@3^<53)CYMR}b%i~*7 zsONyIM4WY)AExtFzHSxeL^RNv2%bN(Jn-f`7dC|i$?ndr7uGP{Gz2B5C4>MI;7~>V zMDb)EkmA}cEJ9Pupg*eVT`mf6&!$Gbil_e24G~M4HBr~KC^DI9b+K%9;>(NKdlUQi zdCol9$fYWS*-P&Z&6*4NYDiZOayHRFTFyW8q9ly=C6~4<7$BuUp;ADSASYN^@fvPY zJ^-Hk2UDT?PilJ%BHixmnf=VmlMU=qSodV{rx$}rp;CPY4je{NB*o^qV^}H-Q-h@L92qV7EhX* zS+^2P@O~ps>|rLjB+2H%SaaRG!`asuXvF-qGy%}70z9>U$SzMw`nk`)!Jc>s4O3UM zAkYx4Ul`fbAmH{C^Cg#>Uw`hFyvki6>Ox`HgQpvk#p-52g$8tj5yH5GKZD@k49fc7 z6luTkC$v(`234CI2<_2S664}pGQ=^gJYgdtI;3|F$npV&On!L@i2JJAz|=U~#}lPe zZN$con)|TVv^7d}> z`-`A>xHSG`tizq@H)UPGGb@o;qhgtr&5^{tyf7$0W1K5h9H$N%!qmOu4EC24UQ1H0 ziLHpNcakp^|MlKuvznpHdMFIkPa@a61S^t**%ADO!T$zX*fN7Z%V>x~8ApxbaFnu- z2qHj%yik&>(0Re+4@EqxzcXQmhYm*89PS0$%n@c&l8Jpw2@e-bSD{(n*Y8+pB1}Z) zgKnBk`k+HU%s98hJ?cKrH$>PX{*jZP{gp_h(Nscwja}2vj956p9X- z?tjv(DW{EZb$72~) zEvH#hJD;{*ylFNSc8rQ3se8yt5Z8V#{)-()vGQ!1lse;W~FgFmeI&3RH0dnZ5eRo`h$4R&!F?H%k@||bl$tVnB*vsJ7 z_&7?u%7hCvJ}I!edG{P|3%Rlcjr{wPJI9Xv9;Da#;~{)wc?Gtvy){N54z4eRs9DNY z+1%$Oi}H6S@SJ-G;@C=v7ws;r_~_I&>8u}QusEhq6+u~3o4l#gh^_3Z@`WiFCSH}C zTXP!_jtJG*;7BXL?Rf<$y>y%!#&OaaU$SVVD*#NXVbDKLRV{ej6VV-)wi0$9p zq1266$G#!yl&OYh50FN1`@9bnL<*{17h^Nt&AgD0g(;NsXx@B87GJP~G#FWsl)_@rg;8xs2?(H=MgJiOk_uOygJCc2`72UI%(hdW7t zY!-E|@Hoi{*sEXaqK&T~OIv#lo!1=#5?q~3Cj@!{bozD88&qK{wiENXJ;9DkW3A{f z$}EGhzRnVFyHiDVrqjk}KO5DWHaQr>C8>mM>0`WtZel=xY@L;1r>vYF<>80zhm0J+ z=yDw(zyqT}ZeK80&Gr`fr2WI!Kh8eUX$?XrEaJnKK*NSA-A@7jrPXO8K#RYSJU3aK+F9MG6$fj^56ce%;10N$^@pcQiM-w z<=9*dF|}!WtwM8{EccHKTK-d)3!H5W)lHoj*-%`lM#Ga_gg+H`$7(iFU@o`e?>$&0 ziuRl*pYla161{=&iv$>Gt4R+EJWe)rVr3BzrX~zKC|4pa_0%&gP)ihLL)YxAAuKjU z;|kK=y$@|=yv5+o0~8iVU>8w{x1p63WsE$4l~iw5`F5lQh?c%6_3ow__rFCFlf87L zr}quivuhXZ%ph-M;VvNMM1P7sHVm`c>^WY_rho8g$2vtjAn>yjW~Y2OJ$-9=pY|6O z@-Ld=@tH!r*wZ&|+QJ-NT8-P4yy(n||JtW3Mn$ZtK+5bMxcnxmq zK~OB%y)98nzfpwyZ0ysVa`Fkl%hEI9SjT$f)%&JV$B&hxPEL+uAF^IwS9z+Tq_QGC zCrRDU*A?kr485j2%R9&(^5TY746h_6Na{km)K1Y~1WqGHVC$Ly4-4r1)1$s$&;7r+ zuQ`9x-OU>mx3tVO61}|icSGKUL&mPQ-|G!nxcJQjeO<0b<}0RPOU*+36e?hLe#aU7 zC-buGY~ii%vG!)BT3zE-MV-3B=YE)6KYxpLB=m-jr68%f+lqZiMU|&^()5Y^%+Yxd z^ZvLqJINIF9F2R7RPZrT32XSJ@jKQDyO6h)L62Tp`fgvm&^?>`hZ+#dYZ)Jr;t3#K zV3XADrKPE2WMk;Bh3r0%4k3%p{F*-bkh^~x24>syyatk8dmD^bpwzAix_hpNMq=?= z{6R$LzNzZISKiH@KJ20B(m@4xaM6;gQ7*W-uwm`{e)Nu{WNLH0vAgU|{I$Ny9MSjhDEfZiuSL+awsicLXKN4Dx zW&a|0i+Y1xJ|sQih-mnHB0w-%{w@WZC?6+#$iF@JBd8Iqv%VW!;;*=2jgC%p_PA|o z!d+ch`*PPXn$VBW2L!+-u;U7d8SOxToevcT*8?OUT)1<8VhXZk6$#OX(EK?JMEJkL zK;nOF#sIsY|C#;y)lB%!fiX-@n@9XbKz1qKD_7IYiZOBXnzkj&*IqvN%(FEid- zuk*NRjPknUjr@5aVD~r^qm#V5D>TNknCH|Ll09qD2`KrJ&MQYs7z1oR_K;TIKB@ia zUu+n_8~#N$XBI8(2C}Xjgd6eYN0pE-5E-=vRd&%x(y3Bk+$oX2`GD@2q&ndp&FzY6 zt|=6UAXEYT=t=v4R^(ETI6K31(sG@a9-V4WaRbAd>_vOo{C4m?FlAkJeFFc2A-ReI z#K7t08^JDAKBX%}{n;4A9hhgRv1zKbNo3ZbWk9gnYgf$mjj^T|@8YSw=;^hL3C!~! zpshxmei;P$QQZF4C1GwgApX+;+ZZoaoyBlD-qiNuGuS;7rcvx@9@eEfqk3_kArx?;aUpP z;#1Tlb#9pF6m=B{mxTmW%C)hkpX1)oip~(d_E+{QHFqc!%MfufvFLFuqrF=ZqSx)5 z!WN(YB3fBS?+x~rks&&uUJVQ%d9VVW*As&EBP6D3BRAgW=&3DNWK=CeaJvoLSSWMg zm>lpnndUD{;*_

Y2D&K z_K9r5IdlMeL%w~9rY4pm{84VOENRC?cA}wSAboHmedMG+fl6U)lN`k**0r<>wFBwj zz2PZfWdo4EC58Xot^6R~e@Hn55q|iykq5IW0sB&gumxJfQ^E#{B!S(xA0CopX*+u< zE>JiNzc_!U5=Lwa@lbw7)C>(Md0lz8-5=*|$QSJ=`O2XQTxBx#H{11to$(DkjUNiKvUtOpig zwGa}h-x=*lKMS1FK>bW7X?l4P@2c6N6#CZIUvRYSR7?;8iOrYpf2TK$<7;Hk~O zwjRIOw;Uv3VuTU@4O8@si~Y?){+&Gk_QLITD9L-N^jh+)zW!+O$$T3p=^XS-pK8Ffh~J_Gfybv?*oR0>1S8M$olee`szxbOdhN^2oHYbxJs|(FvXtELS zY|PI*wZ1;ySLjNt7FJ${0ONIZlsZK{WRi@#m#xCs4a!3z3@w1Ht}v5OnMu*K$Q64h z2;hH1ACUKF-jEzo7|R4=>}f~W8DOfjN$XGb6i>p`ru~z`#2@KoHGve(8SKvMJ_3KL zs4C8T+h)Vn7>f?srCKGmydv+)rnwZ>Xh_Mke%BWdRO@KCrh1X2kZEK!?xTCEXK@6G zg*7dCGFD{x#j?t&I)Dde;UETfF@05gtPOcg6mg<|PuA>1?+o8ysym!{9;nY)&>**P zH%aA>v=MDtNC^pvvy|=!F$1~U^a&Xi`EwttlA#@(qTvb47)*$W%))$1nLY+vje7sv z#7loi+VyJ^um8-yZ$<{V>!-!to7+xZVenn}mTlRs*^3|50m+s<<>SO-94slJjP=arJ_rA9B_ObYGg`I<(VA9BW$q7 z`m%0o<8aV2Bxh-Ag`PY#4Now8Uli4XRdpM!qM4J7un$}*Im&_NCqoad3%+~>N#gB< zgxF~7>h1Ug7lbKbuWNiPKcH7N_6DSjgnAZJ*8Nx&WB5%n0d3HTh=?rqy_}Lso9DR> z){k(YTU}G(!I>K0k%-A+_L+Yf{1tRMaTPc>&R$7RGZ`F>$$PWFPc)-9AfLwD1SMkN zG=DIv4V7F)FT$0w$!LtAYP;)IkmeF`n|nHwhjuE({l4b2?OBX+e~I zsEF+KH34C@XLCCVt-ZoPbR%Hz`1Da8<6KCQ#90KZyLv@%omfBT?XkrgbNcks7Zxh? zM%#%J6~LW{^=&YYEq0kxA`72c7@VHl`9vfTf(MwA$PhyVD12}Olr=hT8**5Iug(UF zj>1^WbS5bd6~N(Vj?Z}jLhY4k;O*pz4swB-_NHvEp2E5U> zxKVV@cFR&EeEY$^yIB-Jt|=hL2ViM}o8m`o$JB8jTBzlw2mhyZWidSSKK zU|>3V+lPikP5ns+%*e*L*Ejy^4x{<-)l+@}f^)GS8*IuA77D>vwf> zy+9i)>ZmP4DaBC}9a%E4O1XszO()HR9;aXelBf(FjX(m23;b6vg%@9(5}0Py^TtwF zhvsl{pk`ju?7l86h0uKwP}?^Ea!CZh%AeG$BD9xFQ|E@7cP=y9)I~160ob7XdcK!C z)NTcoK)fnB9Md%)F3Q{>ObU9-F6Jq?a)J&>F0?I%*tQsNG94l^k{)-C>6=8;Q} zv8v{wdyy`RKJZw3&pFBM*~n*8RS1423yFKLWA6$LT4W~$yZR+-^h$xJyz?i8`l?%; z;Ur;+JgQ1O{0z5%{?U?~`oRJ>v6~JJMh>&iQlB<+Qg2?OFx}6|&j@~E#L?vTc{YEy zqD}Ykp^GyxM>iS3OGUfHD_xZD-rLPuP-qoG ztt?*4SU8)6)!h;{=dZUL4al@gp@3CrHtS!*K<{~Q-V_)wW%9#aGJ}%sSJ-TM-;br0 z^Ie!0IGVIuLuuIrO0TgcL`PL&xpO{5hoB??hiPp3&1;f81nQ^WpG^c3I{;r;VnhyH zvc%sf55zgO0Xbb{gjq{?ZP&xiE+Ef?;2i3J=LiR~kQagJQ0c&DhWsE{P__nUZoVCP z9nqYgg&6Xi!k$2i^g!lMq#}m0bg}1<4}ZuKcRH>E*SE(xH2~N0*}Y5Ayl0K_5eG(a zS8nBqx?s(+Ig*w&Ej{N|8=gISeg;rpJW4hF@RcYuj>AcN*+k!yD)c=>LgCay2{II*m=1c$6XP5u?8nh8B+?LzR3QFf8J@}NU{UA?CeIyIa=+k zD>DYntu2e0MiOfn$DZh|9c%i?xmNIFHT4n_QhX~Oby6B|405>% zKMl1#iLOIpE;^LpQNWm@UgOoOoC`BU$DSg9TchM$_Z+@q4CMmS-m&Ip^Ki&W`X|JnH1C&(%q#ybkah`{vN* zZiT-sx{$J+O&h#SAK}%SJa^&&crkgP@*y#pH8(#ZaA%LFkkUhAyIi;X@7<*+QP@5L>HFx}khQz^7wU_}y`3<&!vvD_jED*-Y^{QLwu0 zXLDqNyW%LvI{n*oZMn^2Hqrabfll+bZEeObbaNF@J0IO5o#t!LYKqH25fkG_+orOL z+x-s0pVdp>nN{T)Vwz6;q`PVRy+s>+d)(OiZ0B8^nTgi8WXiF6uyS!EG(@~B)FLWU za>uLNRwwCo1Du?(1cp>A8pvpkqBnlwV=%f^zNbpL(_2>K2r#Ezcm>#1z~eW1aW5TBrm0O{5i8N8ZU?B6pQnG3GgMtgijg~P4)x4R`)t&#u(_m4 zY2T%yiR}4%-Y%IRYs3AbEwrvTx_OqyNMO4iBtHw5dCgbn0m9TuaU-BgZXQ!=63-qF}Lu_@R&r}$W56i6xh?SMGzJMS+sFA4)Ph3u_z z;@`l!kADR5DtCUg{Z&Li5zrn&#Fg`))$gnN6WmFxENb^jB{m+yXx)FMvr95|CZp{8}=Iug<>$qhPy!X5y z-YYn~IBNq^kOy>?=S$Lo3V_Rlf|>7!5Fzwnno&Pp+X zR7twL^-W}p!%t0)JAJ|s^aF~rp}^?jnqC^NQNh3+WPZLj5SWS#(U=Y6mEE2h55W3sp#@CYE5cNuCMMKd2X@BejusD0+r5~i=H>KXVv2U z^~IMm?w$eruocmV2DOfTpPT*sXH)y<Jte(leC}v_11uB|{WW`A~dooRQdNp~=zk*)R6T@0#2DhA%t2?YF*wErs3&!FQ zy2B5+1@K#y^4fkLCbF}@JjReOL+-(e>r#6pj1fqV_9^|Uz%M0jHU6cSb^()&R#&QT zy{4Q;*p&#BVz&UK7$NaDB}Wwo;M!0OlH2fGtpDL>AMj?sSNMp+SFprUslEr?=U(J_ z86w1s%kURUg;=Li3LRo1S=1$jB}R* zylq}>w%^lwByo1}sT@QIwB!9l$YGt{@;!m^MXjz(0epPc2X#rz7!> z(zFrvAx|;`Rm+Eddm+|Tn&UwHRbU=;W+09sEJsBEh}jUEfjCz!#4>wP0NbJBU4W?J zq+~{qILRyx&#zo=2Ha+r6NtwcfOxDA2(WlU<*LjGHm8sn>UH&FvQ;cFHjS-tSL_#g z{z)I!*lZWu4qBk;taV4ckdvEnt&H%zfPl!BR0W%NVd#jpPj;#f!q~(vyg~$3C>H#s z({BB#RV>c5L1hDJUbre~vA&@8>779gs!P|DBA4G>{rT@-`=K55x(QqU z!qY+E22Z2`=|L6>dp^BzwQKFto!$eZ@cnm++7X=-t(Uo{vL6E2K=UaDU{%x+yVs>E z8fQZ)H6`N@jqu{KH26xrub}QTrh`yV#JqA3(D2bng9E8%yKHS6LcLM*%7c*AMoS!M zDAqV`8`Otd9puEFhQ=R80Uy@KR~iBzzT~(F3}L!;Nf=-C9Qae*>Bgmbr!w6JS{$*0 zPQ;fV<>9H5)!-{aHa4+ARB`Ze6oP(KN6NU)ZLQ&6NVn5qHhmvd%V3;OjDm$Y^V9LviIE!_m*3hghPf;Nv{l%4CJUK z(SjlZ2=Hb%uvnlAqgO4{jAl!rTK5>f za|h6aFL7c&L8ZTFTlxpOpivkiw|_0zUgi_v#v6%MY@E$_Pv|WoNBtx$+2QBT-2_L+ zp_f%zv^ZZTm6EP}T})5Q zJk|J$-3)F0|@#Umh5?BZAMK` z{BgIQisZ>?&{GiT#OIS?NFq;VB#UqhV4||agLpH`!+cbIMiAOAwLYvDGTUIxm2DBh z414-R%d6Vgf_*Auf_!{K@|jX(*k3lj2pzZrHxxLTOH)s#;r_8ulQBFmR^crT`q)RU zlAmGxjk)30dbBmIQV{`Wzro7Ys=tqj+EO&}U=O3Ug3b3VN^V=II4XB(lAl%JNtt?F z?A(H?@}tE8*j4806WOynidJ`u96GDSE13wvDD+nL630yOJMq&}J1g_Cw0Dr*drVWU0Pu3Xj zf_%Sq-+j`DUqKvn7$5;c9u7hB>J{+BW#J}>ZQaF)lA=VV!=MG%Aq*napEW*xnY27h z)98s1u7%7~qYd9R#FJxY(J)VkSL-@JodWC@2|G3O>lar{RkufE@uR`1Tj{Z^AHoaj zo5FDWNBhS<0n|ylqUUWii@$>G=C7{sV^8~@%^;s`k7nrg3&I~@j#uM79dc;Z^@3jb zdp=}o<{5xFkJ%I@#**C-xcy;}cvN&Th)vr#;K_@HHOd9~zT?H<{Za#mqg_0Pb4NEY z4PvA{sso%@5a8uEcY!ktPMIO|BHHJIZ8IV-fO&;u(pjucu~FrMrb;n_)*b4A z`_s7cz}bWf=id-LR+&C)9A{E9T1P+=L#i4ytTr}MSJC>0!JD5lCgUzQMwz^0JdB() z-tqDj-vtu}**ZGwbUH{VIq!HlmNd?hZ;H>^c)>OClnpZzFi*p={s(+0d)7{TGbgLF z=6>OaH*$CFqHR{ARU&>2->R~Ns)z#P21)c>@UU0dT zqc7ZgqNkKfc(#hvhvWhrpLOAq1!^D?C&yNoXB?tkg$qb`{6FlycT|(xwl*FcCa%7k+-gm9J=6cqg&wM7NVJxfJOW7`+U2o?4qF$mSO-uWB{Cc?_un1eS;H|m~ zpAKpXh-c5NdrA+iU-Kztc3)11YjkjB0w9Ihx+4)Hz0Gc;IrEI|#p`AeOD}cG+atmn z=$I7`!KH4B7K-UHsaP9Cobr>$l&fJB*=~-o7wKh4YUuEe@v()LUK8-pdjzzDa+%b3 z_CnH2X=jp*Bo5<5p^v&BEHi>3r1XxfBAc+$;`@63x`e9nk2~NA@AO*@wY^-5YXa;t zj4hDp%luyI2ea^bjJ`H|&}xNXR!e61U55vuT+a`d!x9r8nE@vqSI^ED{Q4R*M_hqy zS4}G^t`dX0id8D@N0+`_8M9L>vN9}#tJ9ikknnT8IDm^5Hb$6E`(Se6-x*E-)^Od9BNd;&|LE&jnd*u`okcRw8b1Uw*zx*4Oyu1hi{VdVA=w z_Ko0SNX3*AL-3mq3kLH#G7%vvpFOXC#h!9_mB(XIN+(vJGt;#BcAuMEwAz5J4!p#R zpz63zU#U}CuuB17#5Se)$lA85BZ;PS1|QBU13g@dM!$n7i)zj15*M6)dv2=d+Y9U$=+vJIhisKW1}pgg{c8d8fzj=IUcS9+t> zj^@>Q^C^xf#k3>+JE&J@DrN(;J2|*kq(cMVI1!|#@Pr&3REY?zGwWOtBl)zUB;ve1+4eGe(@|lIrye+B z!Qb>$M^4Zp;Ld&For_1i#}|sdTEjM$JHp2n+`dfhE|tZDcS*+&!|Vx<&*L|@!DUdm zwQ&8S3W{6T>tg)iz9~0V!+_eW2$oOFdtFw79cOBh_N%r%H@@tyCl4D+)JcotOqIj$`7|8PGNYVcKbj{~N_NKaX}#NIH81ki z!apa5L9F^7P~>R4BS@fUITDOaz)D*_8_;>swYYU+-;}sm+)B1Sn?hR}(g7TCqIKg||`X(0M1(T`>!<2%Od+w@5yzzYIa{N(f4qEAX z9r}!dOD5Ml5)VHsPt2m<^3a?g$9@Ni@p$E0Ko3Ae(9IFOi>v(>5?_YXZ0LR)Gpw*+ z`6)=K+Td~ajHP+@fZ2lPc##kD)}ohN{tRR`d%Q`4@n-2m%l+UDmH_D`F6bIo-67D) zEetCO_*ep6qpiP*bs(Y*TL|wtO|r_`Z~O2%F;5z=FC6#v2hJxH~tOU_d!kr(amY&&F@JDt@ zDzt7_lVP5kfOKlPkX;Sp7SCr+}3T9_G$GZ3%Tb1%#~p%^4){y%}sB$k{t$T z8HYyQ#RU{sAV+A@Rmo)SnG_$yf-6aaX-uGLWV#qZ;0RN=@fRt3d=l&rJr;#~?G@$w zIRTbdnoLE8DDICqw}{l!W}a;K0PSCiI5Ij0t8XkV~4sqVSTEv`T_~4-?@7$`d;&n8hLTD3GK_=qSM7YuGFT6SSO>{$K zk0nbAg0vMSJ_n@{5#_DEGM7b8l{E_tp4>dA2F-nf*1_)ypwfx{Mzs zGWB_tf3w8b4YIB$65^%F#zWm_f*sPa^i&00$%X2d36B<7Z@nzBOQK~x&1?uMrG-m)&N zd%V>_yG+;;bB?*!%YGZ)!D}hgpQC z2Dl&XLN`vBOc4M>Wh@3d?ux>W5)RryB{iIYlWci;C#-rtE>cvJ3ow?~*IT&Wtmg2! z1%e5?d0pvK%kF=vd{(q1Ft8%~?N9`~#T)PUb|rWrcxQ`V7CzgRe~%R3;Tztdh@h%- zyw$zE*!NCE;lxLqw0w!}hi7lh_#O(Ns=m_Y2<+a6qLmW#7t+#7d_H!xE5%wPscxgrpIt=uaJGsEOwWyBt7b-d7r zm_~hjsspO`%%V_Th|`bJ%8={@TgJzbVdVOFMmzU7s{vp*JeM9BoSLs+d8x;Fy{=6$ zZWC@jBj^a^aLW**$Fd?eD)nVmhr@*fq!|ng6SXg17@G8meN!6Uu;^z>dsSM|{CPwb zVbd|)RUp4LyFD4o?lSDvO_G9e_NYs$#!#+8WWMgpOmr?naVMv0&g>J?9$B8=6fOiw zxZRODX(t-**OVbq#lU%^MNJRRX7{UC?v9zsdM*YkhMf^RKlV)K_3#g9MCUSk_kuK=q4h_o-A;$&fyI9*sPn^nOXth{tC+(U+7 zM<|t$8O**_!(dc!P_}UR*yC1$MfDl{%9=7y)iHVQ^B0gSTX$aAw@7u)CCm8oV>jU0 zjH@^4-<8Cj;w-a%;QsM}zab3yO;}y?c7V<+BJ}@&gh;t?7eu%?O*5tZzxb#FGWBjFVUH(9q>gO-^^N*;Yu4451p2)wv`ZNW& zrk_=}FB>9>;6S4HBks2if59@!OX{(rOcff!38Y!|DW%Q)Km~YXxnk4Ye8)6`_dAI9 zTZQF|;%I>Ja!%)5wtEc7P=8CF_xK7Z3k|N=bix-kQVd5ctgUy58-eHTWOB6^=>-xY zdquc-S6&W&GfgF60(3tkJi5PwM$pWjm9((Ew2s=&BdrT0%eX=gT3QFGw~A2%65r)K zMOwk4LmFS8%RuPTB^1aoY@QDx8%QfPJWyvw4OKe>DX|CNLF7~EiMFq7;>%XO+&y9% zUGf+@rR`y?n2YA;>V33KDwH5BP*!@tw!qIBm;cB6)&7({`ZvEzM)C~Br!I%)i2E24 z?SO~?xd(-<2`|^aBsA`e-%of^6W^gHZlH`IqCxuBX6<{vfro{jS=o&4(_H8SiQ3k^ z$AR_4bLdJ`rJyRC;y}KluKNWlv3bx`_x{&IOt*{ZCj1+4siHd@j8q=t=2Lev;+izM z0U_;r z@*!7O4BqWMA+2Qlt>bTpJ(a8D z0QPq$JL;Tee66^~rTr=MLrcg&7voEu8>|GR04a15C_bbF$bpFT-+a5^9F3xr-rX?%h){J@O({I3m5?KM`?;xl+P%$|ud6!vXS!%0kk>ROu9yPGwzxscgy#yCS zj8BZ-$&p%FzvhLPXE*|xCBG~Q6vtWt#j*Gr(Aj=T^n)2-jZ>FnFD{6-juej1kYX2k zN@of{G3NjIxx!E50`;JtUY=o4b7hP&Q0w}a-xL07T-{!A8od-Fnu%y|^xt_d*ZCAc zINSno{3EmH(s)je|FV(G`XxtK$Fcvh7gk|4Z1n;v9uXlT=0L?`riw}=eMAlj=R}6d zWxc}UzHm>5>M{_I`YDG^sCHu|PWJ)8B~nrcvJYg4qbs^%8K=EXcBV16+lw?ZiptG9 z23~s;8r{}i7q^k9&772V$ie#q&ip=(J7357|_m>3@ z`IY7UlNRkd+BmtXBXQEg=j@P)JkWbqD%B&_Qui8@I-1@uZr7tdV87!!7%QBZ@(M;e zTql-kMswRk4L>7SYf|cjc&mVAk#4L1Ca}3xWHNHtjLe>vWr#Zd?bU4)-kbPxI3*MQ zj6BAb{&jw;6MeFGHQj=UR?AKOy+jtgkMqa1RlPtQ&ytRxGX)t!afS6Ph9)by4HV_jrOx0{3|`npEpl( zd4KL;y#@9U_XjI*-Q(^oEXNsm%z4Hu!~>^Tv1SF8v;$W_y@{sB7E4dVLp{y)(FWq$ zx}bRf&^(*;LpHy^<7(AR0RKcC`mfp6hNrQ*@&xN% z_KnsyCU)17@)Nla`KA?qFjX#_{+|>0kl=5h!0e6ZxN9+-?XHijhhciXh?*-GM(-V| z{WF#o#t0m=qW*h0=6}{+ND5tgo9x$O_uGmzYAebE@=J{(l;~B#n$^ zqBc4saJKsD>fxW^W;;gLeMa*{-$DxU;21{BN|YSCFu`Ih8l$J#>U|(5bGXelDVsYL zT8$GVPBS`s<{m#y?p&Qma5ULc_8z!BQFv7y0k;YHrqWH!L(ABR+8s9w z0!rrLEVkX8^l6}j7`rqy)0B5%wkzabJ*|9P=nzy%8CvP2BOA;v&(Z8AZ};W=9dLar zKmz?4Ew7u--H`$>XCgShgD#z_WB-n&bpXV*d`RfDJ&s#9E{v1et;p-u9R z3p%<;E%@eg-h>YtWl*_Y#JaV3ustso+b_YTq@dN`IFP=34dyvJbIg(FpQj`(BIJnj z1~`R2xBiOQ^IM|N|9<|N`1E(?gN_cX6y_n2t!GqU72Lbua*(oj0VMZ&ipZVRSf#S- zI&)MVr{}ipYC=8~YOIg)#z5p9jJf6RHcNKpfAM+i8EVrJDYdUm?wQn(;$TK^VMX4U z^->DwF(w^QMgQ5zp zBvFO7a2L+h3pavhRfVpCL61)X&R&%$7(m_q63p-}i>F%xq6b}u&6zRhfI_@k|R4A!MD5#CS#1dp#<_E8SrURNJ8sMGIW8RJn>6Prd0jT;b^IwgpBI~KxxJxZQ#U{VE3&QA_Pdhc*#n3M3G3MMhkR(r+%caRmW1Hcw7F0B^P!Z6Iy(B$MQ%>^$z%Y$?rlXJUjRww z%&<|lCzLQWoi0#Nl6DA$!@p3-V|aix&MRydfO$Cj5xnLx1W+ol2ET)X)l|d@FLeMx zs^1t6z|gWs>;=0-V*I)Ro*2jqd^+r;tnmNhz-fEE{@?DM?ke5;4jK#w>UzlYpTL`@ zUp(wZ(?TNl+|436*0*u*9;u4FTdc}0wwkZ2jG0)WkGb(GGrX#!G~E084%3ymUrfOS zWAe*vSbhfq)8W*z4&id)Qub*&4}v`iwV&ip7{`BMH8CL}@*Pwquh6E@YxbumOaai> z6?$4?G5vB!$ij(#YBfeAT2htqGueZ;fDFygd#I$)rDjsT9@9Cvy*%uc&iTkns^dz3 zmcv17!**{fPYR;hi~T!Dl}%VE9lrnh^7))QewfdOJ*HEKI5cg&=34AHcjt4se^Qqu zD;?yGVJF8ieLQHMpeg0{<_L1&G^bUK?`|W;VDg#8CHS*R`%lOY$pZF1oTJpsxhepo z@k#~j|8%?gQ(LHQ+BAMSICE4@@0QbbtNCa25hBr_a8ivTQTcj!n!T_Mzn1eTdGb=y zqXZ2sy!z_R`&5~c-ku~^M$#I-g92uo*|QkC=P%e^+f1HX?~sIJ*D&71!8Bs6Iu+jP9;n2X1GRD95nAN+d z(k3;Fas$PT*cnw50FyKm1^x(F;i&dkL$Zw$ylxDXeEA&BfM%*bXB9m$a`aSGz|bii zyIpi_W$}I}o_uhIxwmnfv0!{0z+34BPd8g?JcMGVq#fr>PAuMvW{y~GhlFNJiH@_j zW_e80vPm<}Qh_Ol1Ld$56<8jmf3z^iSM0f64*gQ{BigM-rLD`BS)W80)qrxVk6l2A z#JhNK)AcG;Hf)}=&dxa`Q&f!7R}bkC_F_{hr^F~TE-J7}bM~^MA5Mh}vu@rE2YMt&e3276kkB>X zgMR4oUzi)N&aAg$I2{YzYIp$DFQ|E1*=Ll0^f@Qkltd;YY=ABg-sc9}V@B-OXJoJn{h`*2{+{cP!oNn-k);WI%39fp0LQMcP@nvXxXD-5uc>_;O zh5l+dMeyQ&P!-x7QWcCCb&+n%lX{1dRfeVY+gPqvR_|?ZnQ>cD5Iuc-y)E}RyA0CyYeX+2hTKRmgGFC5b{_iEhCxlsu9s^2Ssm!XYmIq*L6(!sP7Cm%$hjE!&;E; zzS?u`WK3IYs0kI!{y~y zU-9XA?5Zf}2X-_qFU{f1E(R02S5#lgg7IWW<6 zL9=&Xy{S>~36S1Xq(PaQH8r~Ne+R`ngOUA+)=;w`j1=%20Eva;zu-GWR72wmkOk1Q zU6KnV6G{bvMvH$3c?NU=C`IW9Ssb8yL-o)AJbthCtB)(K&_~ccMu7b3$3}6Mb;5qg zjTyB{^h{?*Z7BwRB9P7i@7)K86aj0qhilN?O#mO>-%AdGE|2?Nl-ulQq>!P*0XPXj zC|b%Yo0ZrpMAsO8(TO@uT8`@NJ>Lj7Nv3ltGsK?%7{Rf_bQ@+X1I>i2s;K7y7#&b# z+ScCgHUf=}xq%P|ApKqtCp2$(cM<3X7Kc&KK;2ah{p7SRUmm`C9xqzruhkEIfau09 zNk`^30EqspAP!3kitpnB3O*7^fE>g=BJ`|n4ucjA-ur~VFCDrmIjVS?*X{v3(-tj& z`r1K-yquX`On|;dFBuj`qhaszydvV7)Ih8lZ}_FU;8+BX*NH0b2Hg5 zE~Oz_zmrHdLAOqO8(o!uj-*C9NT00(tT-8DP4EQ|Iyj%Xj$|ZO{eVqBa$NhaDoZwJ zQ1rsi>xG3apjL|L>7OY>eP}+R5(CegGMjjR*+OH~BJJY3h`+G6;B9qknqhYCA~_Fs z*bCzt94+|<=$V(lGx%s&a)zp{OK7pMupaufVjusL<&JFYMx#0#Eq}y}3aa$}mKY{= z&-997h|hy63q5Ko2BUto_|@J87W><*Tk~vA*90aXJ#pq2Xul+NFT6$m!l&>w_bizy zc1zQ2+0jBkz6ZQsmjQ%S?Q9#eaxZJ8kjUq2x96NnjEKn0Qy7g7aA*lR%(8(;9%D94 zrUJ?`PoLsyEe}^%9j6cDjB*d3;4a|PH5pM`{ddZR8zD>XImUS1?~1LNr&e)tYRcC9H^OxVK!TnBB6v#s*MdXdWlDQOpm{IPZhy6*%x&?a6?BU2c9cG>JOa-#98&c1vftU_PG!|!v zX6H;xICaw<3^T$}ZPOMxL_CB+eh0|{XLibhduX+pnI!?ls_lsJ zOyMH3cnT`AaJV<3<3Hmbgu+{vk~@mi4v zH>He}7nKJK4pPe{GE43gwWE7^%ry+Xc_Y-6#W%Wauf6VGxUp+&VRdJ$XVEvQ^)|Hk ztowPIPh-VSu}NjD%`qEi^MdFgyeQ%Vub_6);`$3XXQ8Qx`vtb!S3%#r;)r;oHv@xd z)nOiO7qb}n3bVUBKFbTX3XS0H-VD!{b0CV*IRSt2ADY)nOECY_i*HCRgxX-g=fX;c zeNrGK3lJ*NhyL^Oz%65^4c&RDSRO zuS@>lfBzHIy|ZgG?aCuK^O+dpLO@rld5E&Mv(TbWNnLy#CKU5wf~$kHZyD2br#CfK z-p|Ob3Sb@fPvqTWL0b3%LF&{k1a ztFxS7hT-VdA!KZ2?B3Z3uD-%f)e+egM#9oZJj z6fbU@lPK~7R_Fr!J+~h9r1{8s?Bs%{sFrL+sm>CSlZ!tij@viE6PVbZO~AH?+5FmZ z0uzJp4SGlAsNhAjHhy}7tOt=(cS~~5byFB;ODb&&WidpG@_1%+?mr=8bE~25rSV0nM6j zgyT2Oi)S;3>~lhdgi z{k%5-9=wdbOYsEX4NnV)Hvn|YmORBo|L#1;%&fF2S8{6g+CVM|$x3X}t`5@C#ts)e zJ9W~#r~}|T<7w#RKx~_w2N0I!W+=`uaRc`k8-$%>tsZQu7Q_u(*0J8i=u9sseQ4#o zgpB_9cFQ^P;$ckKhAcr!`ljl6^k@Ew9ZDnTsT&xBP#gaovWx1Tl}@q^ycGD_%JuFv095odVZyOYV^TXv-i9z`Hgb4)oW2115SQXJa@G%?umR{0@pl%OH=k&@vXwIcKmhR>e7n?xAHm;`hOg6e*oW zTE!MB<$HxnpJ3Koh1MLkG^Xka)V0!>XsWEFb-&)e<5L*x*Dp60#1|{)!_m3Ov3O{6 zPFHOUCS*2&!1yWj>h=ag1tX~>ZXddEGRS@{ro@I2a@FKkM`rjY96VB4SkWyATz7CY zXQ1yes_0O?#`2utG`>!E(O$_DPR0)f*Sal=H&sr_x&N?>+YtEU=J_|xNaVlVBPS0a z2QV~(Ju4KS%7XX4Q8}!KKe^klZ2iaFA=70y=HQu%EOoi18FOdVfkmMQNeU&K-sbMI zld`8Cn?j$g4SM*R?m6{li-Y9FO?qLb$l^S^0`|ER0GZ_XMn{DfyA^fxt2AhZoUfDFkXJ;Zr}Eq`%?e(^rX6`tdpx9QGK0kjkK|Ce#TJbKs9)~-lhdw zyEkMaKQOs#L1O1IKya>qdlQ42bHmC9dKt@Sz68@CsvEU8rjMkf0TE$vrf zJ!hhC)l7bP2J9c6p;=?{KfF8BmB^I&nG~Y16xn!nr_;>Ch2d4hN4snss8+D+^|_oY z*l+oZpm;8RDDxXYm;pr%Ginst(r}1oo?o;Yw6Rt!sP}P(q^u7=FL$;mF+7FMf{ft&g}sjtS&&1R z`%#-f8_&?V-uMRZ!b0l??kDLl5x|M$mHzgT!oO*?{xkjQpV$al@S#V=+rixx%XNL1 z#bN|hqG)pYGhRRHSLeg94HT-BuqQSZ_tt>tBm?ry@-3+E`}GX!4$Sl=x)$6ELxphl z&as53Z>TNjxFU8oPq$B#6dIK)R(D_`;5iKT%L95TT}fqyAt2ry2qQFeZZq#Pf$lWc zmYp75STMWe5sxgf)x%mCdiY!+B(gNvV~Gp!^2`Cv*`9&m%y1K%1nZ7P(F9*}*AgdQ z8QfmA6BhQCkTFy1sg#(&{n0%xM5R+;&m(T%V0mdaeDF(f5zTn~@haMLqw{jgJX~+H z#q1BlF@KZNsm*Tc>AO6( zk6BZh7#bhTnye{&GR9sO%R<3b@6)x%;DEP}OgY*Yy3K@DoP%l{DIz7_wQ}b8R_kv} zlk~e{n70Z<7u5&2+q5i_BwG82C1_(DgFbA36=ClFXC1QAvkakoPLHalE-jgiB>f@+ zca5;!IE1t-q9z?55XM#|#L)BIN${i|-euT1ZDc<~v*3)yD|_)3tPh{FcALG*Q=8X> zte+(Gtdo;Ct0=Vp5ZWjfmR^;yDi3t7OQ~JFj!^mz+BhG)`=n-wQm#ms+ed(SpAtwW zf|o|N>&uD5uNN&y^*Jj9tTsGom|5~VgGQj`ik`+`P-t*B28&XcrV0^mGO`nIAaF~< z7Es_u2u}L~zxknR7VO3xgcA`k6GEOuWl-S`FMx6}SFEc2FAL<4MT-6oUh$-`sx%S6Z(Ab!Z(Gg?iTlB+Haq8RoDozgz;Ue%(u6v zoQe5JBCP@5dX{Gfz*|rBQr6IEgBsen0-b|gVw9&IJAeMxyZYpo&RUHMdd6ub&e(yC zqrsGDk~)qyr@XZMvtN5zrSHU}RU?k~+UIj>Mz#ZR_q* ze)J(ByW#_|l|vYJCrg}N@Zpoe(k*w&f_o8x1<-GkSLidAcS4S^b>wR{a#Q4^8 z0fp@VeIm3UNk9SZ8_kB!MFCCW&qXqyMM%C!+wNhb=|t~gCI{yyvo>DJGKbtxC}96A z{24824uo-hkg|q*uxH{=vZdnR+yhDj-n1Xjg?8kf`ZVf3*ysJ|kZGRGFvw6*rTv(k zJfO@frK*zu9i_jZFEt#F!N__UB4h+ABKT5u7jcj*a@dk@+9oJpU;HZ5g@5X--0 zQr3{+_|jBVGl5xp-K)B0F>0KP6f5s^O4cKo-DO#1uX`j2nOddyI6os5?=8M&k9fTg zzrb@d!Q21>_cR&5S+Api`3y@!(%01vT}R`Mqr0kF&r;Pa-HZZ~2Ra5CX&&@UP?(k_ zg|*&x*nFoA`^{o+)aah($n2}WGtS-Qu2J(Ph0Mq@)zsej?p}p&!91S)6Yr6k35qHm6@wt! z!uQ5nSNgqE8p}lg-gf@Qa%luT?(fBodNE8+%xYozyqvXjA`A?V;>VGcZ&0J_BR?Pc zYg^H^NOQq%*2JnVG9S^U8#JsMeskwVoAe_BMJa0A$(T3a+Iz9b9Dja?jEUz0^o|fW zwrD6{l54hxc4v@H{59vcY^l8MM@5hBA6RauRR#)--_$1&ts?5U@M)br;#p5QWdNhS zLVx2(e(c_VkxTohc5i-7gv+G-0E`_v&ooGn5+-2l-7?^_QnX7E3W)-CGM(V$@41+} zr#4c__eo0vZi%kHd+vg+$~*d9iN|`GEG8B0GHDn`W9EA8*YQNB+4s~BG5t4r-9oU0 zYu9d1UP-+vd8*RQt}%Et_sC>0QrdFW;5jj;rwh&MNX7B*s!1-g7JBFvn^wm1QPU2{ zXn%=xwMvhftQKqa*!8MYqiB)|DQC1{1nzD;2T-~!wO}0<35c@zr8bI^rv6Vq-=tYV4y5`A3p$lJKg?o&mvj-l{y)7Rs#d5^8D zzM9Zt7oycRI*%Mm7EHG2#Zb5iN%dD)nUBbEy@)xKt_k6{F|1%dby0i~yt71;7~~6+ zFVKuo_=78!x9RIXOSsX%Ji1lB{id~X@MSV-hP30N5bxTZr?Rer)1l^aSq0>3V;jHc z$`zlPV5Pj-H>-^41L4w3rE|0HmfKAxlCS){Sie16Ju@ZS=~hw#h^blSiM2PVnLpo> zvmFbOx_fdH^aZD>7)0e|9S3teC4z|+dI zBJ&tUjoDK_iJHnN{e^_5Q$?p_n~8gEjoS`iEgXq5$m_jPTx8R zakb98bWC@SAu?$D+E4#lSEiQF8YNPCJsleXBIv{E^4)Dd2L}j)^`Ql0y5r_|<#5~Z z6JI|p#SXjW(YoUkqz2KD#L7nvFZV8&d+i=pXEfKiDTJ~o@9qMsfMiAU)5OnJBBe{C z*yVYzIh=)=PyaQT&nebwnTUZ|4hPxQ%RGxyuY_u{w(gv`tNek|2Kx|zhm<)VACR1To~E7+2>66t!*#j)|$|Fy2Sg%VR+k1D{W$lBq+SbJBJ*Uf-q`5Yx!4#)Ype9PL&E61B!*^T^H~K zcU6<+xjb7VjHLYMEU9fNK_L1&9HaG4-$6e?)W3a*{F~w3!ZH1RWlEMVP@~?ckS&dL zq1H(mYqp-qToi>+u@vP1&3?B3o2pU(!{548@wuciYURSyG1iL-Hn(&(@D2J4>nHuA|%tbXnZer zymrXF1dTfP97_MFZ83~ziPmQ^yz-|^v{HSJDx^dVL=}`g%z|Id&v#~f9hfUNR0iZ< zPK(X~3Vx~0|C{gsPssNJB__W&&l!ai($F+>V%zOQlgbGCDO*0yFs`%y3{7eMG&7`w z)#2;P5XK36KIXS}XUZpVyD{}CT{%F1^a0Q@Iw_yi?o7cL0tl7XF9`ucr5Z}MfKVwN zI0-rb1K`H#RsN_FcO32pe@tk5ogTo9gX6n#Fw%OT zb-IgpKL&?d?fMN7EP1#1suXI|nwXs2wb*$4G#mWt?6_j9B5N@Y?FU3O{A&`+pzolH z$y^HsuAETq0prs60};jc?bi;JWkrpI!G5p06EAb}iOlE6kAL|zqxw*VoLdiaU}8l>c85swKFsT#u^?`F z#X~UuJnu4Yy5KH+zJf7ugVP{80=?cj^w$Xi-PZ1`^PY^T-71s5YTu3{Q@poJhI6{+ z7UGF#F$$i0O3<{6{~Dl!asndz;zf=`>~^W?0S|Ao#El}Boc8oCya0yLI{omV8W#-u zZF3HBegRO<2Bh=n_i2qw#sPWzf!Fq;EC6S-_AB)G0)PaW5u?wvPXYk#C~M&7P(To$ zzM^l02*R8586Xgb>&ro;7R~65pTCmjq6CRZ=ctJ+20x&Jd_6gTanjmuBV48GVS+!v z^hDBD%RI$U?oyUj(K;U-Yi3ZnG}(?O-Y{UE%-e+GGrnDf!1@c(`~9j?bb_tP^i>S6 zmF(l9yp=zYtzNy|W~coixWHU`!5xfuIxAjjC1D|uR)&B#eRaBp(^Hs}b}^B%#?{N0 zy5=S_x;4r=Je-7>ARgIps$$~%ct?X*zvdX^C3>u#Ikj7Hp`|>CVQ`Us>3te3q=eE- zu~~+RDr?2Aw7ONgA&Xj)-|QPo@Fb{K5Qy?ev8c z-mDcb1Zp}v+J$1a?rm_;e18e`gdO1vMy>>2>z>AKjc4>{-!0tQE$XTqm9z9W))^!c ze`BPZCH9C5)6Ua3dS$N;bJ*&GK+~V3pjaV8@nz&=z{U*!V0;>h5x3IA)Mfur$TwHj z1KKF40&aY>0}#_6e4_n->GQQI5cKOdU-@1Xz*1@fJX{zP^yreBu-vb;9`jCZfWJkD zsZ0AICs{7PAHa6J)9&~wA@@yF(t61Xhh={r5{8}mG z5I&}nH2h7M=!z&cS%6dH(EAUTtIBIZ$3v~d-oVdvMz{UlC>T? zsvs_!+FRa!Pdu22EU-ivB!wd)jpTt7mh|Gt-TX+e$YkXBS|#e>I$drtZi@*Gkth{F zl2IVIxK*)6#A+GiFRg44uHoYi+jsXbs15yRW z#Z_A&uGU1Yrr-m?(?JZv3UL%czO@Oo+Sby@xpC4c7R zYGHUQW@m-MrSlpA%loUzK8*FMf5+)nbh#Xg4^@arKCkO@!gX=#=I(Z|var7qF#kCB ztnDcu6ZS>%wB(gVJaa0@d}!bTqOUa$rmiz7awO+>S<LO-b&HcBI5$`gc=Q)z+wL^dQf}?753y3p{x*uO0r8$>o zRjxE@b4UZgOxW5FUN!kncvhkt6!8-9>mq;gS3{R8COptk-re!_1Qg&+YOGGakVWx?V-wi^)_U#a#*yrT7*J%D6$#pMX}!yXgLuy z;_%D_YUAggU4az~X@etKZ4$w;XB4?mm6lm3_#i*)nacL{+tXA@;``glXj|xlgGbF4CGw(-EUar|@5g*WxV3in|fO1PHKB#fk zNK+bCF%}52(5y6Eona#OKc( z^mv$I#$kS2W1=eX6(nMtK3Q5-)r&N2OG4glJUx0$y`vA!mw9SExpl6R=PmpiD|+dk z+vTM&5kEjI;|Y8u)MdoN>52~XuIbgcRk~O*$8B^eZdmf-L`iuT=e$At*?9$H>UD3U zvDrtH&M|fj!Ww!MVZOEyBQP&*;N&v>jPBvL*REI91b&8^Y5yqH zRCe;DZPOWK7_1= z{jYRpfjrx)u8x@1twoQ4t9o7iRqbo(@jKyk!Xdf8wBRFr>+~}s)T^w4*Tb~}H5QVI zd&X&PJ8W%jC8@-SesXwbMPI6a)YXjwA|--5ZHGr<~v$ml=CWDo6fe&1gDl^3$!ZmoKG-Px4Ph@%muGd^CQ=tZyP^ zf|!e{Yhp>Yt;nV$u&}V~T5J?#wjLRpdlB|me6ma*V%4=N5k;BdHG50-=GuNma7|Ra z_GU4?bDd6oLF|JHeQdj8baIgrKiQ$#-++8yVyvB?u2f}xapO<_>{KDJn<)2(5 zB802%^%FTmFSgrYzf%;OtW+i2;Kl+Oi5e;?!M+32EVo!a_91{js3r;|G58^^8r< zvnB&YFK#b_xMBCN^pViN1GJYlnQC#b5nZ0u6j{_L8p3lds879fdESMLWt1|+bHfdb zP=%t~M0T*)pt63ba62dALRqONw->X<$hcQ9bm?b@0s!NmFR$aH;^6nK?jQc@qN?*- zh^9CmN!8;ENYgy~rR8Zx!8)T6|N9*OAA8>&)pWOQ8x=)S1XQXtr4x$u4hqr*q=t@yv_yKZQJP8>kRrVrdO&)O z^bS%Zz4u;1C%&KGx#j40-uccw7HJyF2n2s#|YU^EPTX%v-2BqIY z0gm}9+QmZ!Gat9|%Ds}LtD;2n8}a?ME``_6#i7&Ut*YO#--w*aXFdwU-XdT$+E4+v z?bQu%h}JAwPmI|m*#(*+{dpDHp>vS(BBIH0)JO0;8kodknYo3HEl+!X9E*X!PoU4{ z3BZ#O-d(+J@#gO0`}$ou`D@AdcNrvD^tggDx3BK0QOHcQRdk-kx^ z>vxY4C%e-hnCxW3-s%2x)C4(Gnpv|UwjdSen?EER)3@5A7|*i5bfrZ{ZWR~06w{nt z;(eRgQ$AnssO9ugrB8zkw%tl$ut}Hps#lU;@$DqJ<1aJ{VUjye*vwhieVw}Id-1Xq zKKgnsGsK5}rZ`2nIOOhqcx9&xnY{6N&?y_EnA1vV-K#Ay9>z(X!xeLpQNF z5?-BWoJegS_qItO#$A|8o?Qw7OHfUQTvCnR>=D9%l`NLq%F>OM7#Hxqi0m|)Up@(b zVb16BJ zJ2v*5KIn~ET3;d`tENff(Jpc|P=8WY5~(w^7&l8tF!rM2qHD~#PtL_;TW;4LTd>7r zt#6W_+pa35Nj|bpbR%Qh&Nh*aEe|^b>LPf3XBO-wukl)GPq}oilaf+D6`BY(5u&me z-g}MAaxK^EciJ#|QW>NYmA@~~=VA7a>fyJsCpOU{sL8wDQMR5U1MnFffHW_qDhw-P z=#*W2^hq=->EQ5qrADwg?c^v97)V3 z+zvxZLL8LipYCWSJ3aTfLwJp0z$S0#Lo-E0J^ksI^SRKEe0}H0X4F1TpTxEuCy)|o zm^X65FZV;^LQdhM#?wzaKuOEaWQekus_jw^WgJgb>JRHWKs zNkdFSIN;KDw+ZaeVe$%H$h~)TA;feKi8W^ z&D^jz?p8hi`OCXd;jod?phgdSDd`G1g@`-r)1E=!_A`g-s4U&Ps;Vc9pbw#aQdG0D ztgMx9L17^^4_Z~KH`HCj)DZ7H_3|z2=mz&|#YOO=-S4h;GBjB?2UZ%y-VojpJ?Qm( zR**c0uVrP@@XiT8`CE;wCH;=HS zwwDNsCN!`Vr*Ydy)L0nGT0jmJ>FDGrd#mZJonz07$1%}Hqehmsr?w@h5^>_~^VSm! z8{~UDEgK^%EnM2eN!N;s2056@>IO|pDn1s_5BSZkH@CNo*i~F~16Nz_w-OhdB7(s2 zBUrSH#AxQN^(2E%(vB@gIrZY# zyPPKdaTZG4q3N>|UVIW{y5Cwqv09LP9Zy%G3^v8T+otiV5qF`*!z8trxbEJn#FzvQ z{g5b%bK|{^{>vWct!Ty;uVRC1+nVY6Ylrw*tN4u7*3pONT(&v>7gQpB1H>GLg8(M& z2vq!f`F378y7T!B)yGsfjAgVtKHz+4y$ea9Vy^ni))N=lQ5$1e9!@uq_mcDeRRKj8 z&#&U^N&FdXT)__E`*Sfm;>8Y>E7^Lw+7X~YMQ}@E@s!BSQxW&UZ|QMI65m1exsrfe zkkL5>rXP^{`uAG7>`d6rF;~c=^1k&=4zsfIaXNv^iXiQx1q~O*j2$K*eLi*XPkkqcxY4=0Rh4xPSe-OhxBE|+rN)LZqY?dSVw zU0{-|z#V&wJN|B$L4Gx>{9NeOynrL8Po=d@@KNFSZl%xj*iz^4iF6%^;ZA?Cp=@(d{64yo9ZBIR^=;!ldTNREQT`f1>op~G{!EjkSis9OPT4cih z9I>;?D0R|Hr`4k@0!SUUogJpwHK3*pHY%^kdQN3)-wTe zak{yjui2d1Yc4)0s(daU)9tcaUOkaprNN{=?T^tkRf<2AYnU6?z!V!MwIL)5Oi z##-fd^TvHLjD(TBBM9-)qHKRU4MBn3A zw70$t%gVr*=u$gnyCNm4kAkMVwUiuGtdMR?FRNq}H7p(kHc?)bpFcEt{e^g)I0~Mk zyDEGTo~K0LX_rYJj2C=SAU6K4Ege&6EZt7N?%fJ{hsirXT9GZum~$JT6&VfzGCY5Y zd266a0tbX3R_Ha+kSESEx5mLg_fAI8-_!`i2IR~pmp~zq4DmoH3sl(*Ca6# zn<6>0zLu02lAQ`^uNm@3Y>$H?uA0(PYVrHeZ4tsQHROG@3`f3ITc#yyQ>vDVtT>wz zWF_cqm1ypzqW(Ay76Yq(MwJioNjX@WoF|3RhnFqe<7MCF+I`Oz?6@R4=x*NFEg(-# zLvncXj)Ev*^o>Q&mVij(S;Dp)S+htgRIPYtV)-i{x%K*7vRcV-XmAjepT6JvY2GCi zCBLmvR*Q^-uI?WL&+F6p%q#A7iG0oL+p978aIi>Djl(U36|&FLM5sH5N~I$ zJYS-rWx@Ri=VgkonBKv^v8J#bUSbMfx)}8pYDFNj-Y9OAT%2HTz3%PW?H1(v9n=Jr z|MO{De8%Jm^Xv(ViQgC5*)3d3{MNz~j;2EjQYYL8zr3gO0*A5h9&NdDP4Mj_2s3c? zp|*V_e0x%}68XMGETr!}{O2sZe+WGjtyJo@;tKNZvQ@HHuZy+Bp6I{6@yIh;$i(N(Vz1CS^A9Z2 zJ5w3`?^uN85&6`v!|p4=AIolrJd4qhSl2Z=XMt{li9Gp0J07kv8|7vj`InPFD7Zg|JUDDSd0Mod+gS+x0bSEh5W z`H`le(E5c(|V9+O2FT*}+csITuVYEwGzw4KD6chX)Ch_l9{-v+rxT zvYEe23!>7zE>#h{7wH416~-6uvOFLheYcd{sZWlu>X&nmHfg2b18hW}9$c>U|EYqT zTU@{cfSGVKKoK}~0M2D3#9VLS=n10Y*N&LeP}|W@NL7>lBiHyctPAPsg&bX`zCY|= z7i+aIrn8;_n2#@o=<>CR6o%|A2|gdYL@pk$@73B^%fBQ!yzjL*0xUENlg1#s397co+gu; zVaK}k(@~gu|NhZBzs|BvE?W#yH3{p2^RowQC3|CHJH9qrM^2e*+^btoUfCCNrEj>r zqs+=#q`fH4zuibBzE!I72S`-q5!Bog`(VgySd=;D>BxCdlXHa19XqgF^m@^?ha7r0 zuL_>#`mQPd4S;mzM_$9UJ)isUc}-MgrwCTr`zbD2X34fvNNB2^Sm{AJRc2GS9b*C$ zjTwis{8-4B+wBHpDxUHeKm9?aAVhqtqYy~#vrv|{y2>qb=+0BK`i21Gw zO+$@2b;i)`zk)&|3VS&AS|@2sx4(nlmLiK+0`Lj6U@XQXj4N|mA@aD+M0K9?0aS%t zk*QfnEhJ@JD}iyBO9Yq)TItB8WOj*ZAJiY)C4F0VXkPn+Oe9yfUMs`_InJM+ z#T&O#r8kk#+>mqUvbge1GEA?oi)69T)?4?mgWmdUd@7usr!Pn#cX<-47v2>mzeJ~_ zdEnFW?6URFSmNlDAnhpd;&A;&} zHU}QQrGnc;v4+_eEBM>TGz!4&F@@H0}ar z401x#$2-{Me?q0a0Fqt`2Dr7&2ee;`4+0VZbcJ$~rHoyj8r%uDagG_W! z8O=9{zN+ef2c6Nr1BN{X-odwlB(NwQO9)(!sqsyyBO0LV*S8+)@!0VW1o+X_`|z*b zvqonGfYbL6H9;bYWwRYlEbe#E0j~!mbsyDb;$DD^do_8V$2I0V=+bx4DTTxgj>-vn zjUpiW_Z@T|Lf!tRMD~wAob2c4z-RlFNoVu(H&D}YCFvi~EgCW%!o-iP>x|-5SML!i zrb;@`6_5?d#4FD_C~2=+qy-Lk&3gjW209m?A40HmK;N06ARyb-w^6I^KEIOK&_Z6B zeMw=rC1><%vjVlP2Nz9<8T{w3(PL}x!omIdhwk7nU;LL}413XW@OGKG01m7o%^uIu zzpg{aK#dS1!pn}50$Y1d7zyD71eh`-U;TIa`G`mGs?xMcFKYGfCz1w5{@$1NvV{o z5Uy>OTX%FFZbY?+Z-S^!eRU;%KApw>2%$$dRM<7_r-&L8a&gp>NlF?(ApQA~m;L#u z=8Hx0{M*y^=V$hpvwHAEEbWgUU9m_43%_l|S;*uMUhx|IoIbE>wHd$~0r{T7HoDhy zJF{&Vbj;#5jkrd9{3xfLpGx43=0|)kiXZxYCw2k(R{|i5yn*)p^-b$P3_in#?Q#EA z(dYLcE+;O=*P3npMMW_hvoilsK2&c}h^p-iWnf_TfN#>lx`9A1166^OBLw{ENO?-F zD0GqY_EF3YaZ~KqQ$)pSDeULSI^d@bzXAd>+?t#6A@GL3H}SSc1aJK$|4E`VRw<_9>&~ zKmOqSL$8tFZ{+{wiWA~|y%5TMP)CP0d@86Hw8-pT{g5xrBsF*9F1tz-HGOeS@1;|z zd3Y+_30h1c9HA=dZe-qnjvOB)bns1ZDp#&UsLZMy6PLnIcH@C^CEw4b&|7~wE;{jR zg@4l?8)*bs{|o@pDlz8)&oSf$U5A4LLg@7j_sUZz zQl5UWGe8P93Rj~cU9EZ-H$2t2H)hCr;YAXQ1Z6t$S=(ZnWA|nDPp78q$G0U5kdWS(Bl#Y(8i)BH+V(&MeiGbRRr^4}O2iKt{YLG$?l!K!k39 zM+ki#aIdElr~`;{O$93gc!WSKI{@F02!J^6C#C`bx_pVFjeqg|`28>cW+-xv6EGB! z8U)a(@W6LaDEeOwMHB%i8bXZ3+07C_o1}E;Plh7@X>Y8_uC5xSSQxRWEH^`X+Wk#K zBf+w{N4cH&s)D_0==uU@jch)YNa+*j8;*4(6*rwnELDa+W(Bvjr-L-YLw1M^X zGC$8O;X|fMzWAAQVQn=f$-Ve78Wb$Jj$}Av3GTBEy#36cb&sw{Y06DPN zd%_*cwMk$Mrh8ZuJ!#1c8^DI!V6($^5HI0L$qIWbKrMav0B)@3CZ4-gb)5bgzH#Rm zJLs}!fYswh{H5cZV)qvp`2VxVO>Z>xBP{39f_`g)*Dxh_vI&GaJAqEHqT7HwUFgxC z^hg319U~{@?yG1MQxkT>~gn(V2snJcUTytf@&1ThUGKmiE4EOyZ;Y#Q# zGp2z>12Z|a!~j>J@76`Mu0S|v+FG3)E$CTbi3gA!I)Ew z^vVPl*EpR*F%H!s=Is?Tt6J7qP%1xssQ2=9mvinRWDN^%sAABR#!F#+{R;HuP6=in9Kmy&yJ;tCa`e0G)Ry)d#lcxfU1Blcu#f_De26haKSU_Id1rzKb#o-&8mEuhT zdtS99TV3m6r()vpQudk{c(NpvIj#26Z-ZAqhOhqUwS<{EzPSv?026fwy362XG<;qJ zn0`Y8g_jXnN&%a|&YC;&oPi36_r8jKK!@=O@Q#0@k$jQUIltoD^t$3>8!PV@bh$U@ z0I;Mxn)Vqf!bQL}?2LsNsdbm$lQLfhoujWbd@X#d{Ym-)54P3VL#noz+S9NfQq{{Ged} z;0^z^7rF2!f8x`iSjsU&1en_{a!d0iW+dPGa%hPU`3Zoh>X}gG$14VxA+<&2gX~?T zT)0wegEUV)AjJCM;6&Y<{i{_HMiPA4L&{Zl2ToCQM@W@jHgmfBfr~s@uBL@!Tf8+G ztl*m??_`B#c24%KjbRVV$8SACCtLdq0$4OWBlL^)5GO8%FEmr8E7P_}(PyXJ?#yXb z!T-z<{wuO1Fr5b4xX8}RI$#k~p{<@kUtrTZuKLdg~>1Pru`6{NViVn7%rHI?TL-(*(&x@wcFa^Gu_8!6& z`OwqCjiHl+{nMT#cULc#)BdeKcX;ynBTI7|6YD~~$3-n7H?d2@h_t}1U=0YHW_Zi| z{N`^?Q$Bx7%9sVCh_lqf7iyIA)*d+RhEQntQGFa_y-~@uxswFPTXNjDfCU(IHdgdwbNT}{vOVp`I;)Sp&6TM?=sB@^a!vJqK8|t3=UU>wG)Kuy^Tb z%{s^j2E4NRLdaO{M#wd@S&JQFo&sZi>JQTXHuc6!zs40015@G6H$H+8xfiV9KXzEU}E4CJ3#hBFsn-60sRfH3;07Nkm%2PUDfIDp9MC3-83z% zj(lA6vIfpqojj+avbVKjfd2wEYBHtAcF=0IP&sw`CStUrNMk+@spQeOGT~&DvnO1* z7o;4b;*TXr2f{9$VPH=m=J604i8YDFv8l>t`H|MEd#`AH2xtRW5NdqS5V($AOxM&S zgUf$}p1%nZfF9ENaN|3m-6;b8H?!O{J!l~1A|OK-$#g@arFf$>D4*4-co@AP zD(jrSV1qmcJQA0*17ls<-;QNed7esqndtL3?UsTsdfDfi1?d#a=5)WXUsYQ=GZIJc-{qu8M%=*4eDqXC@XN-D-*tRURTE zKg*3Bth={TWnJ!(@f~EgdI%wrCNV25oqY8f_ma^5b9JGX>v76$N*M(`Rh7Td%}d``!bn1-E$Jf(n?SK>U%1XClihpfH5ac_dE^mneIKyktUqX|$O zqag&U4nQNxRQ8Omlja0;1n~Pn@KaXt@;6wg!<2*fo1vrs@&BAguMw~#JN@b*=OSyR z|2xmdAFT>AZGirP+az&x2>>RlfbA}KEB2K*AYJi=2HO#qKLhx2c`b`&h@_Vl{LTk9 z2fkg*24B83OLKMs=z#$_+pk|{7$}$b)a_IEQ}vBOfSmsqgH=t5Bb{i8{R=w;SU_Im zKm-2EQm(nemyFyAEN?DvaU zpTEh;B+W5;(sxvpZ;JyzdNXJjU#1{HEYQz^$Raw%);r8SzB zgz{_W*HOLIQ(b8C;FPfdYI*6c{`J4~1c?dEQpab*oOM+WtdTG~cZ27!T!Xg?srEd{ z(tK!0`Lz~U9LMsE5}zaS@1LL{EtZUV(#RATUE(9(tfn*!Tdt`-bt-+yR2CSqF z*7-Mb1;GE&=T_@q4sG8*56!<=EB~(wk}N-h+hG<99RYtbErKP3Wp_-|^jY;wZ|OsF zQ7m8IFM%0(mnH0TWWH`rO_hQBS*`fqK$qPS^qpG9Pz*qpHFEnqVm;F4Pr+Uk`QIzr zr*bw^@n1Exbd(@uv&%L!G5K;S!W-kP1+km@HZ~~*G~F4Z1a45}aq}Dxzt6=>#);fF zWnp>QzV^FwpPQ4#X+2k1R8bS}5{07#-XIszcrmw|e?{dl+|2in(mw%mFeGnguY^k9 z$YXP*-6fa=Zl0en$UO1VM_#3eX@BqitoXnDf>k+^@stI)K8wK0zT$_UL34h2(0>7Q ze}<=INQz9)lXLa;Ui;yH_zrU11*d-AE-Q=98m?oj8<-21inqvTPkj2Gr?W*H%UD=y zbVnOH>PPA;GaOEPsYjmettF|;1sONd1BVU3VS<1u8}8_P6w%pU4=Zq7ty5vcO#LTz+mAaP)iD} z>TJutUYj{~$$qX6BrBHCPBKc+bs70NDkZfcQS)@@*Nx;x?7dqTb`{DbxPi{vAHOw}kDSgr zXP6}U3|ev~I~&h=gW+R!d3A+H<&&qaFw)p&pJ>q+&R2?lxnH#SpnYN0YVtY1G@$o7Y z%N{Kc;Y~EAm)|$K&C#H+Ui_uEVxrr@Ca0ww&&u)qCCHvy?}{4gu$^zOB5=rd=tJSI za@gMxEH3t)+jdEWms8?s9Dj*eL^c`-%D@%!3;u@eYTk zA?zm{thpsc^(H~%!*HF&E)#QuGKZoH&)-v|(EMkG=ARXsZh+4NvXVF&`frwOsxttI zK>hV@jyJQHL;2XcUb=$pRVF_bY0!-QsW7$)`>-9-lYNey72{DKG95$}Z>EKkX9ft4 z!Rjgfh$ymFmr5O@|3bDhh^c)3zq!$_;q(`6L292Gj>^ITeJ@5ERa8t0)5>fK(4LXFvir9%0HwwL8K<@zILQt z`)K+li6}Rq?+d71H3Ku?7%PC@Qv`X}B(Zr5WAw2-1bdwmXmA|Q%>E|GgdXkUhgh1u z?{;$C;cb`%Wgf7A%DWz}K3wb8F03(-Y`9wz;HsrLPM5?TTGvD!P%&b~1P^K^RjUe4 zyi?(%{W-n&)R$RiDvN2dlvEW(b1SnSe=MG)-+Qp=j!J|IFUO-6cV~J`!SReq+Xus% znqGLdiMf)VGMXP_AR|WiBnfNrDT0}>o%Zd;`iJeJ;or2(QfJ4+L>j)j2I#>M#nJRJ zL^;t9kH(VhhF;}eI%$|3E#f?^}?!KP8}U!4^KdF?g|Kz zEvstn+dftIV}*_n8dQsrQYfq_)`rR9(o%qGgh2W3XO%1D<29z+tUGJXw_bkM0{J?5eHN3~ zB|jhAHm)u>^}hDdi3;pG!vNM*=+5+V@#1G!x5*w+)i(tMUBHPh4MmpaT=kZD^=x^_ ztHvphDIJmzYAdOuOd3x@b2*A<*>CM)tW5_->F@_6bL7IWKaaA#$)(A5XYQu+FuJU~ zTmYySIL1M+(C#Cb?@yS9HUKb61_)liP#NEnHtV-3pR5hPUW< zE?FFq93mI=*!JYj=iOcv-ILvl<#=#SlsX;Zkz}NH3%VvVu%2M$g^oLIS(X zm1Y%lLljo}@r@U#$nM=F*0`uVJ`WNm6r|Kp%s)av`cj7+J??G^tI-FNoN|sN3OLfE zJ|@;4EfwaQzKY12&B)8ob%t0Vq$klLs~rQS%Y5wS>I1i~%_b}Fo((XAsHVe%Z+|o2 z$${vWPV~NAcVs3T={g8FSos2R{YschN?V|KJJR>dT!D;{zezObTWT)(C7y~?nk_tk zHqqkX*DZFTv1hE${S`hehz+V69o;fa)Q*yHDjx-kaG|4sxECEQ(3idV>bK+EK!d?J z0H^sgIvWxlc1&{y^h}H(5+{o6?or-v8mBgiFz}BX(KyyA<1D=WyVX$>>MB z>C-fE9^<>pVo^W>+HVO%J@^iC$ig~U- zYe+4DVL3Xz#>3S-Xgx*+cE2lyYW0fy?N-4vuvP`~vMl}AqtAi}3wkT(b+)Vv%-)6% zX?>3Z!(`A-@-+^PVyH&# zQs=@^VmC1*QH|D4D_b{jUjk39|aJ1%+lf0mh$KDwQ zHL-dbbO*w6BMHza5r6@fA`LO+crn+^?{kJPl-+C?Q(UrKORDwPR#INl+ELET7#pw6 z5PqpjZpfgvMgOX>W%}V$+)wtf%$XO+Ep|>gn77)+5=`MCP*6=pMcfodv|6;{%~BGr zjVdYOC9!pFN^*IV3^k(5co znqV4IC`ld3(5G6k3;cO@UHo0YYfJnKn#SCq(+O^2r#sOX(*x}+Cr8~k`lsI32FDF_ z?ub-@Bg!jxBw@p@pHY!YkzkjD0q7@A85Ne#ZmTz;&7r9tv*2!hUBl9-(vU`-7L&(j zZW{PqW3S0@UV&PrdNUNSF(A32=qi`8vXw#QcmvhG;vC`|Uq}R>s;CD~dxNkVt9uiV zoDe$_J}E<-oI_5@SF3^(~`E(W)uXh%(A_m9<=YCs-fpyR+eGG+)Z<*{o9HWbB zo)JSFTGo@ZxB3eQnkXjU%oymY?p%(!%OCd z&TarOuXG=q2`Ie)RF>r{)l)x)7qZgy!{tLK`T_^cRkuyz}sbYnNUdEdd;ho@qW zP7s&;WVk8NkV)D(B$klMF6xjR$9xt1b{oHB%3|kk{AomX{si4U?VWoKA*~BN-hMZQ zA%@f+7?hou4y+KlyngCIk;^W8k$JF#6Z&DI@kh3^2Mg@!u7kukmoCwVhc0awSU2hp zA+`}g)-ML>1%yo(=A8%JROi{`@O=2E_t3#ZR_>b~Ti1_BHWvkj zwuiZEU&+ZDw9Fk-JgBrw-_kW2exv7t92;@Pe0PMuhql)eKiDqGRSxuY1HF-<2CQb_|f`BN7x}Du6ssf4j2D! zP~={5#K64&`eszM(T=%}^o!-)2?Kq=VR1s9A-mEdKi%{xvHF6^`wK$0jS;RFOLHS^ z=VQR!(4Kl}heq|vAy0L9hVD3Dp0J~8$!=P9_~b){tnsUOprzpXgDr(at>JxkYUa{w z?4MB8gV8NLPKJ0w#|DGXtU&^oLND7;(vPWDRhK2c#God!q4Dlc!mHruAwDXp#;UsT zB}1`|fK+?7`P=t#LuF2bGw;~i+TVocmoB1mj*^?=p6Hf17kxXXe)8^RUWdVTIEgG; zXr+av6_<(H+q4rlWwH#pNue6#=yG^tPmAZ)&$xrl9y183hA1nHp8LBD6rKl1 zVc`KwdeXMLu(-|A%V#HIWhr$}cBP`^zUIB8*NYm8CY5sM;xw?|94v_&F_k)hl2|nOR zbQ(IZWWotm5*+8g5}|mbiM5xq z3u=O%amY$Ec{ov7XbUj2$jF!V8$Hk{DEK(0&hWnZb{m}=Uhw*p7pFJSU>k>7KXl%X zDV#B$QEO}C)6f&c;3V}IdxMliLHLq~O{6E4CABa`*!4(rGP$9ec8!Y*hnHI9KsOvk z_nK_E zQnq~u&3*@+octJ_(#|>eh9$yLgP3!o=~+|=0BpdWLJh@8K-9LNzx!U?>dvgCf6WEO zb+rpNv9AYTqvQGtZt2NS&F)AK7Nj#tb5LoqW$j_7JBZ3xuN@X7urm+ZFM2R`voyWlY}FFZ?mktDYcCUUyF zv%_jYJ5gC=#k8SF^!S*5Pja5p9AT`Y+vclYH|R+|=U7(>^EKzg+VK$*Bs_41tc7M zaH8*^cHB4E8sL7|XU_F#LB|zDW{#~^dCeE;jXeoCV4joVyhh7)_rrVH-ERFtyJWWK z>GmI-b{EPQ1>{8-YDdpPa{^mPc_v*0M%X!x)>U=g_HuHFKU6k(M)f{i@)0J`9Vxh0 z-+*2<9`XrnK$pJD=VND*D`Ab%O(;myA}31YSfX_i{6ap*wEanuBR_2%N*WWR=$8J- zY`~XR^WzIcg|nOsYPe{=4BK5hdw8_iU zb&XIX5HFFJYH3q|xXSC1K7GU+43(CFd6O=~!s>V@qD1EICx4Cb`y`F5)0D=Q%{!bx zLwZJU?5MPqxwDz^MH$Ijndx4~v!wJgu25WuZVmgBh#1z0NfsL)HB~Jx^3xVkj`h$HCb2crdTQ7BrWgoC)IbJBbBFK)NlMl%oW#mDkRz?F zVCr*q1WPuPHG0eXOZI90_@+g{Whc~~)e~Off-n=WC56Y~l2fLqJfAlGV)l5-QIrS7 zl}@<^<42*blCMJ&dM#|M6E3@iT)-o;$D1z_5){2DJTEkjAT>klt(BHnOfXiZZ6+cg zWy6G%V&ALWIdO?u{P+;Gg%kWNsZk;XioQLgy%02u8?5HmwJF|u+K{S9z&1x~mtgWq z0&gRS%Q0O6!Ny-=G<63M_0$Xu&a3A9ksK`Y;H5~+!?)j@djS|?g{{;(i{PgOE8cIQI&iBawM=ZGtzz-l&oG7sa(FD-9dyn>mubsfp8KYB>ezWjH zAV~Oozueg)m_@4W0a&Kab_OpW4!6qursCzs95{)?vFU+VtZ&{dI8k#UORl&rFv)Ov zkYj|0)v2wQy2)d)yrEw+x8XzLhd3h-js%TQX(x%q|F@hapJLwA+$Gx;{^c;{KsU3# z%0C^<7T%;xKBO@BjBNg(w^k)@6>{J$xIOVf;3eA@wx7u_jnG*JZ+gyl{$zMc)9F?M zTjb)_ftn3Mv&gVFUUTUP9QRif7M4-!S{tOS>%n#}=?F*Cu=K}=B7-;fnL_Q3w3yzY z<}+M7S4_Wy(n6%Cv*zPyNM!G9;Di)R_(y>l2s+zv2;oG!LyhXH@wz&X0LGrYz;P6- zo%{)B(}qRC-M^lbz)F1t0w)C*K7I<1!ZC!wQ#7kAWB33V16avUn4U?e zPs^Q}ltoyg1&Yl(%HL&rIyLyH>=xLQi^}J?TB9L_wf>ye#2mMJ9vjhhzNGRmi|H9a zi+SK_EDIIDGLW#k*&l^$}A-_p2i6f!5}FFX(V@+yVk$^7uM z9qL@_nAv!F)1XsDDXIJ8Zu$!T*Hgh?`Edp3MoU9)W|UObg)M+}u;48gwa7Dx+hu7L zB*Ud^A_Uh~2Ub~lbu)&C_r|UgLk_bOXR0>m#FK_now8s&o)06X%2A<{eI>o6R|v1X zuzA2qzUp0L@4BL~&7(4_RqW+1BJO*@iP{gX3Y=l{G|19D($k9D&!0FC_;m8pnvf}B zEK2ij+odhXwv|E6HIL*wjJ4)L;A7qq?sB*le~$6~i`{spq?E;X&!94A{R-%b(F4f9 z7Or&eM0)}qy8AUl&6#UDdfMZB31+-9=1~vFyDNxZ{dRGmJcnM-mw0zZ zd?njs-cbGkC9-a+V6ELlc}0y|v(Ww~w_PC1bV9})3!qyz;TRONvU|0^x)?rG;7&~3 z2^DTWJXktl_~5cvLXyk7H+r7j&G7Ehxh0x=|ut?PLk zWR`>PZ@Y%P#ihR#){yxuhc|C%A`h8cH+p@9Kcw5-(D!k4JqK-K^G3gJ%g!rJgWkKJ zy%8vdA*95PS6M2atCnK-qXP2MTqAxSik#fDDF+ataPTb;hqdK9ko2o+ercN%Tf5H% zGw*x_hN;@jh^2nYGa#$Kq$MSpXL2X0t+TQoKMDCq{_F_|`$>@-ie8iL|69gE{@SR> z|JOfL9k!k?{91aw&H1=cPoCA9{BD#y`B@I;k%uKQ@^)3`L`apo&2N3BrouKIDUVPVg zkPgb3-(-yZ%#u%xK!cjo*Qh_UnEimqToj`BY6RtKIw~ZzZr5K01!o#B#<^Q{?(!al zZ5OrZ9znE9zIrN}90Un13@ueeT^oNb+E-3LeNg{}x~4DZ$Xp9`WJ3@zu&(G>D0N+K zK)g#vW8U+l(AWU+NFM}qQq$pXz{eT)&3hDrO6-VirfP{|6ApY_B!8Q24aHItOf5%g z@=c4zXCKq&$wC<;z~IaBp_^;0jF5}k;VaJd{lyZD%}q;+`QiRmXL;;6 zTpU|Lp?u}2DTDmFWK{tW5~XP7^`xa5jtuSiF{_&BPT1*f{zVmpO0Fe)Sl<<90kJo zb)H)&)xNd$d(e1GFT(B%8OQ-$=7oCWWKqL`rSOP1D`vJD-g9tEyFF$$ZawO}hwWfJ3BughJd{sompwe@BJ8a=XI zSL_Kq-Mz(3iEU^!%O*|}97$p83gG+Ex)|2ozTa{uKZ&)~B_MS&MXDjc z9(kNMh$HvJYDl-ALU4DkW2Fp%Gy)0a(fe__Tj1Y8E0;x@O(%FGXL~#Zd#9Ehh;&BL z$)dj<(@DNQWR28JP$FZ~SYFI!?!C5-JK$Vr^`t1Kcf9gv@-yIkR!HFFtfM}6k&~9z z4WTcBZTtD7;54$2_0qEI)kp)0c9I<4s4A#Ew}mh^wP0De^t>mLGR`HjPEd11&)vU` ziOOxil?vJunGSU}WJ5byMt^P0dh1y#AN$$O0+NtIiA}=v_i3`d7}tN953}k&n~I&# z8mco2>2YX?<4;W&M;`P+r4LfsSIJDx#!xgY9ig#nnk_*$P(&Gnv>*HwUwBl!nXs(X zZa>7+3;m`(S*4e&ZIjPfS{W>l)28-VqABB9ATq;Z)Y;#dZNF2a{1ul~j(odVyAb_3 zw60`IOy+zAY?WCD%*#x=(3DJIjP=*zloB>B4`-X=bcXbG+6Fe#(a|RC^WnsWzRQ>5 zh^A?2TBDgh#u$E%K(uEz?>5^l zphh(%s&FTIQ-xYnS9A$d4Rl=73~ZiQ7;QYw6VTeLW;RASHG*1c?RP% zu{L10Y1F{Wi=H%k-Ls;V#0+O-x876PeNG!)Ot7ddtJE~jy!nuFe_G3Rdl8lH>VrLO zJaoR&`!uE}X>?zr{oZN^qyUb$o|RD)sZyUDrTAy47lLXW@X0m!_>=fU4Q$l=)7~c1<;Y4&-zQwB$%)Qr$Wg5yaapm0P zEyecK2F`brhsW8lQQo==x}7BBfg!8k$Vwv{eLp%j0T{{;%;V*~{|(Fhc3jaC^G1WH zwP7Qm_W3hrsfQ)+a;_R-);O`RV6{IPSe}X#W2sa$Pm(?vY_|>N{ke5zzHY})65A)m z{etWSSm|>CY_0JRze=MV3YxQ^6Pt5)poN=)hcb#7!qV630Bw~YJb*bVS5ZE7Dn;EtFA0S~`btH%h%JQkz!tOvo*S!s56rn~4IlW~g|&{x zw31xQI`Zhy5v?Aoc$b&pq;m+WIP3Acq5@q3vx^F4$Rfqd%2>`b<}4E;RG!%Kys~R~ z*sa8-lzB~2SvoT&aUC-Tnfj`lZ5F>$#9ABb`$@0a&IP}QD5vSO3hl#IVCr9xX)g;k z!|)ZpEC4u)POAZoJymU%M{B%e7=T=>gjNr$`|6~*L%zclz?mms>rXO!H@}7D!-dkJ zNTyF*KvtR?hYZ*(?Y_Q#i0l9~&BT5U^RhN{5+9jySlU%fZbXK9IJ7=kaTm{iJ_{#= z7DQW7r`>yylKA-(FL=*0ac@lUupl}xIbS8JQIQYk1{6c$NK2f{G2?q%N0J!iktU$q z61Y`(stk1QCmq(u7LSw1dpwNG`3z_@P&+DbFxvgy#shwnP~MHo;6T!aV-XY4Iq{+X zKHW|lEj4@Rp>|*)6neK=h9g}tXD)(kMg&Jel0%%RB98r~0Zwz6FY#>M^8aD)J)@dj z+pSR$6p^MNAYG+NSEN@F=>pPAC?cIe=)FWmsubzcrPqWSLPvTNkQN}JNGCw(gwFcn zclLhwyVicoDdU`R#`(^VP{x2fBhPs5`<~al=A0K(D|W6$sfzVH6?dcV%x#J^I%17u zQ_AhfQY{)%-JpQoP=eenF($ljPhH(BuL_Bhf~oi^HQ<7rzmeIMhKIf1T1mb+0O+mn z+V2WvZ|t?IScM)xY@%7omix|lj8O>SDc)ozVrHJSg+=6qihWC)a=+Uj4Rw!|`yzb? z?TA#P)0QPmE`f*uE4ixyj5|s8OJN7^5W7M+phQUwe0puQ;xVgnBW4AEMB}Dj?N3h# zmGQ*d7)Monr`FfmW^aYboW!f8COJ}y99Fn6LQ4s;?rl|mqCehtd@YAzZndEZ_K?gb z_jxy^+L*>=^{HX*uGraEcu-4VnO`IiJt|8|<( zi=lBJEE{p32aB(f;V8CPHnKjH`-J6X zT8n+r<_Wf@cZsWf#yTWN1pBDtZYQY?Q212lE=$pC*|g+^1|7tdJB7z&XpJXyT6xn zZ|0iqj;(Z+*#`v^*W-8N`lGn*{TKJ(9zBF@7k%pJ=2!Ru!sLZAABFNR{!YoZZG_kJ z{v1f{J9|GYw;QY3Gq)bL<2iKWyw|uU`wc1_8>&e~P4k8T>iY{%H|(9p<{El)lM zJ8%UClJP`z(mvAUKTv-2Y_%HK5Z zcs|o#=s;U-y0UX92X4-{e6S4F9BYi^8l)9>w@VSVJ`FCOyBvP5fNSeV9NpGPuq{UK z=9j_ag!(X2+!`);XBy9YG|}D0{|+}F3+ebEp#eKn-mU2ixNd{l+s*!_3`B6wrk4yW z!qfi@LOk(GwlUasW)(VL#N3*3yjt7WKH#$COZV%558eM-e8E?J4|9|*quGYx58 zO7OdOy$zn4#Lbx*G%HlQ{sT{Q>d01+_Hqp=|V2AASmF#tb+8fWm4afPMX~Iw1KCCMbg6*O)3WhPTxX;rL+oY4F8#*T{G+|@;e>Zi z2(^!?zAM&us3)7aMpQTKAWtTfxZ_Hg#v9LY^i6#HhJ{bUtL&~U%Eqw*AIJ`;LMIQc zp7q<#41%aw)!_uWsbey4xygzhkByb_X3TyEbO1HMFDJG&QYf}%>PZceGF5e5jfPZc zuIYpQ_=VZ2t=CU85dQ*SAMM_;Lxx73x1khDXmLQG*di$%bQw^@Enx}q@Fbfo7eyo( z&-So=wBmZqvZVb~fwEiGz6T(ba1`^o;;kpc=vfevkdUGEX*5R;Vg6$P>s*)2mij=C zX1(^TS4Y-ygFDn-#J!f*xFC^=UsQ>&gNHOK>ZAM5F*LnMW{%-!X~9XpU!(wscsETq z4Ha4A76sgN7k2;Jq!gv1g)7hd`RNCWum35yE|;cwxvwAVn5Kly%H~L+9i;9}#*L`g zz20E)mie^K99H0^tHr4H$>d>e;P&xO?M>~0LCeg*JDY>ulv{q5I9Ayq#G(aEkwPUPVE zp_p9&wZD7LX|VbPJ3M^T_*2SHHD^}wh_4^VkUdxOWKeDd$?gx5R2qwVB z>QH3>^W+a0Yuw5+jnUNGyM|r0I8|(%c&QYVfNSRDUCJM&K93t`y9+K_w)q3EbINZw z%z0b*bN%H-7Y?2%X|sme4`2va4*NOja63*+q`67i4st4H7>O>u2>!j{L^S{2B94Jh z%p@1leBhdjcMZ=Kt!&Zj?&fO16|9DL|9|4} z-(=nVpCT~-Uq>ANuBo&qJb_H?7Ql;;54K4bQI%r+e&j^d#c~wcFqe(4nlDMS(`eGm)v)?u$tfWZQ_COw z?qq`R8tbwdy)Jevz2+ZVad*K@DrK^cofXGC)nqB{QxfbCc<)pt`ApJ zyDkGDJCelsGLH{L*^|ZI`C5A_kE%Vd`u;{D9+=#`PZ+!O2Ohbxk;wq>?yEJs`5QSM zT}Gjdjl#LXOEq5OR!%-7vbCTXIB1)5|Q;7G{P(iTSE;vqJ^_@9OMb zQb=v0>rx+2-*I}UJRI;!{ZhiK)sW` z8+!&7vE7XRn_C(^tU%Wh!u=r;*}L}cnuIDI#Pv3YCV7O3F*)W8fSe6~1D<>z8|$$t zjru)ArnN648G%Dp`78Ks1@H(LJPO+F)?GsYcC{!JIk_=7yfrlz@LV36RYqZ(G0bq?#*_ ze8cvd{HT2{p7y~*H+5SRJNnZ-{+vaNBDz(3qnsA19@aQx=jVLMvkVC+xG^G>g^WHf zA>J##(Fu@zfjx7cD1Mn1?{s`Anc1=fFG8OqYgB;rViO-<0j1O(Z^0wpABM{)whtN=+?~a>i3tZ&f#-!yqEk*c^jF|I zX8);&{hwO5{|7a(zpsZ+_mRso13%$IS`U056y|}*W%#v(`Qaj)xX9Gb3R-%~5Zo>< zU1lAj$Hw*UmWxE%VVNey)|*q=PJN0(6>K__DX2(}*PQpALBP5f1*sVIOh}$$KA?n0 zvY^qFxe;q0xK_JNC*X^d7iOmJlPB_rv>|&AU9l$HPFY^hS9f~m=4XmQ0eZPJvzj-% zcl!C}CZmbzmB^1eQ7PNF{mrfxia0VR0~{+`k)tO679v*(Pu z#wTI0veQeOhvO!hn%mjdKdPdy4c{zu_i7wZY7&Jwxoi3d7R>V;I9*1ws%B0uEH&l9 zY7j>k7idSwnx#7>F}-NS&qPlA6c>gR=^ll9TYdrXFUa!sp$sbWjX$lp4r5IbRVYB5U$@ZYyS@B9AX%S21X@I(X)BOU-#40!RQgdwDt-J7mssWz2s zPKr)R`!symZ$Z|DVdPx_y-<7l!t0~^hL}u}m#MpYgjVbLnseBQ)*Ydu!Rzb$i~=pL zh*rAh>a_md0CD?rMUO3iTcg(sko*YM4Ch}Z!#?XVNkI$h2oIKfBZ0R0?Vi?w&i9>f z)|S!E7)!msFM@BvJa<8!)bd({slR@3$RYmdpss1xNt9oA(cAWn>F$)rlyVwRux$F> zOu~D$`8|Fj_bKsDay;h*)%@+3nvCMtWi;apX7;Dt-he32opp!dkDxL?*M z>-p4JBMFn@ly<~QZHiCy54RfyMvR{N+W17%k-u;)#0Qggv!+A&fH~ZFtWDr**wF2fUY=84Ouv8OBhorsz2_Hu$F{pd1Ojz4F3QO`d zd`v#T?A${UwBnyor!10?V5B|c{Oxe!2>VIS#?S6~){GxzwixNxqVC(Ps~e$#y9duz zN4;m!IMQhKZ}Xs^{Bs)k7i{E=_E_Nj7{?mD5XO}le>Y}2F#8VfOf4$C($wKUwfeKj z7V#GeZlDpGktXc7yf+)iCFN@wvk1!03wHH^ zKgSq)c6%(^kEN&7dchhCCiz=&z^Op2dm;UNVgnQ)Uts*+s)%}|D+|i|1ZWWnm+^hPpRK(dGjT@zH!x?@ zjEUcW;f6IT1|(B8-u*-iOLAfs0DF!CDn#3dQ$a;&6Pn= zQR7+5`$v8;`JU8WNFT6qSo<}7WuTy(yJZ<j!X!B)q(xo0YfwkQB!3A)Q>cMY(N;TeeO|#t`sB5A*_fumPb|j8rQg zD$4&pCr>}HkCEcBC6bn;2S8pThn8aOZ^yrT<(fk`0e#|WUd7P#eE>#y=nh1yiG9S%Lf-@sonV3K%~cm}9T znRCD_Sc9eOX8RRHZ4?%rg4CJ<%>Wlgsc$E(VeqSA#@x~`O}M#-&6;#uXaI2qIv&6f z(#Y^?!IoPq_^fSqS`jlpJuD6txV{$+aO$2#Dzq$IY819!5biSYjw*kUT$LxavOO$= zr_toxPcwt$ZRmSBTe^&iq%*MD=vhs7RbQsRM^9}bo3u7g>zAknfd`jBCFRhC*XHmAA8*yY(+H6f_eN^M3{oBe1x^;9 zm`=tOQwJeap{1)Wb}IxfME)}DiCh`vTFJnq<+{m7%HrEJN)QqN*jOf}jKBzL|GnU425^t?G3s9|*7>;)8C@YYvStdY1oAGIucFi}MKrI2mZgBgTg* zU#mB!yI6K$Zam94l47LLfF;u>vT|Z-$MhS)1DL}W$f<+6SpI@cn)DU?SnFFf~eI*bCmBD}PFejSXX!*VO~7GqEP!6DanJA;4VMPAAF?cz3gvh*c~rI8X zsh-yl^mWCyGkU$^8%bwS98DPK{?6?0I^ysJh_sKy%Dd2&DN~i4snu#p@M;+!*O+T#4h211Q_D&9T_R@bzGkegZb&qm zkN7d#d{aIoygG^Z+wqZ1cFe5bd^X>#NSQe0F!85a$Fe5`oR;@s_3Iyf)*8wN$!q}# zA%@i2244Lke#*tD(YkRq0KR&;3f+1JvxF|aQmBn7pT|}#nLWZF?)y)XGWssHfCBHr zg>2W8UY)4|FO5*9@#vKVHGtTzufqk}309T&i5GU+R8tz;MQ<|RPHB1rM3+qaq@A6$ z*q{fo;m17jd5MWH4UfMvg<`dnEBce0=v%1bn5HAMzdlW@B?KO3xp$pYsNnbzF4A#k zW1yXL=avG?Q%Aq$25hl8uBCQKYrWW%E=OAV;GI7#PL(j?!~9pt>X*xe-hZvy@k$hwQkfhWJLDp=lQBAW~m_7e%J1G@^4 zR;!UEFPucUh7Gr!;Bo(@1e`qovqpK-s%`h^3asd10gk4cD9(+%dU6s&u|#_r+KjU* znByD^Xd&25&A*6iKDJUt{DHT?dQE*+)B8g?qX-1pMGDG>u*({u_C+fmx6G* z@0)f6^b6gMOykDk&cUDI&%R&leDWrYZVLYnq99zsmW+gZjaF5MB|*sN5fXbF6i0tHuD1)ZYLd;eX`YGakUH2oILv~ z9vu`$Q)4EuCQ)tLYVI4ZW;N}LzDS*My?y?e{>$NPYmIZ+ZYJpw93Ya6_*LIB!`_su z`8E3<9c5y1HFZb3CXHTs7#o83I;%QqRfOuvInPUxt+fjEg$#q$2w zV0AZ7+3LU)!5+~w3wbatlO$rUCk5=ioZNcBTV>Rr2TM)&vu3jw^&rb40vktIR1VPU zAm$L*&!|b*0sD9n&Tn2OaLO_9d%dsV*88C?VP~U=ULt-%RXB3iydvnZ8Qz$)agP>1_Pya$-{RN1QptB`HAXvz6;}vr2#M@CWrBmz zhPwq@A}ULt?~fx7`1DrO=2to7YMcGN#B2!hKYV(Y-3K!#2lr{mS6ph)CSLL0<{`G& zV8igBJxeE#&Ge@NF@-B)@@-2%Xj7qzvy+qoP|#$(|3J4%Mq*Kysy9e%*KnR@YNZK2 zx!Ts~n@pw;0Dxx~2e?Z}V2UaiUHe(BNeKm~&W1RWKb*=qW(pHGn;t7O%%v;bmio}O z#`TWtdkR*RFvTS+eI4p^L+xk(w$n`YKrHdRh*56b4ex=_2Yn;!raWXJs#-eg(wUPd ze!d=4peFh!I+G^5yq*lEs+{p-rc0vF`tBrIxlS4>6SBPH27Y+{Vhhw49~WXvN3t99 z2cBwYRpZzZgX25^9mJv*$|32_rj`7ij_u?9ETz}WF}~knIG99}?Rmc}-*jfv+peh_ z2Aj2>wjx-FY1tadB<3 z2Yy_PdyE(zw6viruRm}J#pE%Q&Qb&DX9(lA`dast!GhJ{!0%XiOz&L=WA5UN8De3z zC@b?KrXXD(w^KE|?)aVhUHo?iqQljjzY`Q865AqJbj2=}cLSc|s=LBiTy^&pXWqO~ z{rh*;>}Z@<0)+!&k08!}GIK(Vuk6m>{3nY0U-5gX#L#RqJxz`;`GN?EJ-d1*t6K@H zY?4{AfD>z5RecRE^H55i3L_?I_;&g5+if9WgTN7=%z;y@BX)G)Lh;tx8x2{B!TFBI}LF*r>^l_xEw+sP^42q;R! zAk>t{vioW#UMRrP#>A2_YB7RtS9eFsRk$^~WRpmFs4z9ai64Mr^}eaN!r;5wNGY~E zeee5J^5~Wm;sOxZu9ZH+6iq!mxRhj0Ig-=u%z}J$piHH{nyNSQ!;(P*lb6u9q157N zJmSfFqT*`vHbz{elk+RXun9pE!Ome^Fxp7t)31bh*yOomzYEZ9lDhv=1{NR)mkd6J9}X6x8~X?roW^-VI*9fQLCl3Eh5~W!YEk$t|jF@{7ACx6j*tVE4@1<^Gx}(G2}7wp#cK zZkYT7kF<@$$^2CN$m+iKvqvg#ehU|$n}lS?ur*YT2@{RI);CUCn4Y};Ns^ahtpj)F zRz2=x{dp{#I@UQ+n*I?rK6D+m$4acG>vi9N)qUCC=~5c9S@c>_LhsO6%G_SIxm~Va zWLV}8yw_e^`kt}WnuQKM_1JunU4^yG z!qm}la^xCDO8105CYCUnS+BK)Ih~>GEYLC3kPF|7t*W- z_$5QA2j_jNH%W%P<`-0{n3&np^nsZlMw8NG6AwR(Z}ly+%?m~3M3?jOgI4^NccOr# zkPi>p4W9M<5+Mp8&p7TGylILYy)@9x9#o9P>TG-BCi~hh83_n)cvKZhaX1N?mt+n| zWG)8OsW&rbHAqX~K2wZM8~i7dkM#suRc|F#$E-$k0A&Scu!udeef3CV+ln;f23VrZep$YB0f0nH zfRSXwSwg)`~+uRFW?L@3m*z=HM2_(Aiyuxg8J0EB5dYmN<3w-{n1!!@@A@;Fz zeTtP!wbk8`;E5=5aV%uZAB8Tcp|nRmP9tC&i|D*<9>wvL#Flf7IqRs@!eRV$6294J zD%+=B^km0QOWnihZeFpq#mC!y!o8>Ds0*dVx)jo}KFSwsFU#e{4h`e7noqDpuerE% zJQ-#RP8Kdd<@p>kB~f>w&snCM)yfMaSPjvky#s;eukF25BonD74d{B^jk2gbvQ0*R z@iApiM(2Ojf4!`$a}YsYDGF8G&T{j!Oxrw9G_ig%fabwKOV76sw&v3O*=@$9IdJM8 zsjlDP{_kHoP;g&c33#f!?HnI^@~4@(AOG(3{)-$K8^CiZkyUmq{kmx}uFS}c11AmC z!NuKI(6t>A7{ij5oN>Ce*zs+M|W@W1Nlp1@+5A^M=n| zVe+-($^OAspwao|L2ZJN_A?d6^(xG6>;My&ZW%js3V3oP!__UJszt%A(L4H90cY4J zubY+#!eu^zr?oG39-t0yDB{kHLiJ@E_)pJF4Rs-w?Llm{-B`eGy8j%w*Ao zE8}c)<2iIk`l2vw+twd&-V#$6xyrupwCx6TfyOat=06@1OmF5<>@%U3l)>UJKUm&1 zP$-&LDn{4AM{hiC2;q1!_u}pC5!^K8&WXZ?{MutvoLuCwHL-$evR!!~*hGTf@M$eD z8#b?GccWgrd_8!?ENrZ%H%gz_Qw?}Q6S~u3`1s(L*H=}Wi*%Qq7PlJ-II&_VP3`gs zwdcXE6MQbG)Xf3Sq(I#}e~MfhRGc^_7E=3wW#J1;V-Mlbe;81{U(&}J2j4G4rbM!+ zBcMmvszaKE*MYB;Yu;AsBMs~snTy0r&y&Ebmj-ISXE_&K*z2}J4_$+2z@@>@A!8@# zccPP)O4T}UFBlIqVu-}T4EEtzUcWR`^a9%J8QZnCdQvjy+nl7xm?#%H zmQ0eHrh{=x=6dsU7aPn2PRL$>f_qWd0c!K4(Q_?rzyD^idH2fT8l5H4P1g@$BX5;Y z@>KVQ&WB_9bBd-r-9LNAKgn$(tPK;7h|sDEB{DfK@lhR+8nQ2h>ra7lv2L*8$Vdko zyU9$iXLw1&E`dBrFKCy3L^c!T-g@XQ<4bq`i(;TKNx*&xIsS#7ifPin)yUU%kop0#Tsx4KhNUCv!vI?A3R!e^+1x!@0?HplaePYa?(V2R45te54vfkGuGR`o`uQda_l3#qY>N_VL^yNq|rO7m!J{Zlx!>78tjqB^!-5edPE^emq;IGjxx(IW?>< z&boZo&~a=h3fw|vqddA_mFgPh_B@o$wgq?3#?>*Co(86VIsRN;62?3+_4{3j^Pr+) zCRUL&>;2jW=i2C5YKAXP>&Y9+%WW1x=J!ialLN_le6B|+;$PtG83WKD%LhnrC&nzL zm(u8FYELZq_vsDXWvR|~-a5BcP|$;AZ5r*wFa2m|baM*Td@7nzPvk{3x6Vp5*02!q zkm81U{l&l@2DB{rO*ZFoO`Z}`KbiycV{EtKS(s3wf2K8c`ai*AzzF)lEBO0WE;oII zv?hio`GntHxY)J&hNi5n#ll1V+NVEDQ(A02>UIyWWUO(DjM<||o`in`$F_Lrk6MzH z$QM>#FAr?vgiuz>e~-fS@c--t!-Z)41(WrI7trU|Q5zk&nLwN+I5GLT)-L0p!@V5t zWdDWZGW{FIKh@y;11})<)L?Cd<>@u|(I0rv?4UCL_Hj!wIYk1%6hOa{@p~~z0f<;y zq~`9^Hz2Z_*tT(|#;LetQYBFVet6xMKN58!-V(J7Wb&jQ0y1-Fzn%nbKN?r)MA`Sg z;hkc@f6;aCL{o}od=H>WE!~rs*Ulc6D0LgiDE1bIB7EL`gZMN@G-?gfgTfHwd? z{u0kaJFVd4UM&T;L!U8@P?5^YhNd_SYvZ$W!8*eCNhZAv@28peB;2QHcrVr1*ILMv zh1Nd}6@ET`-k_w@ERfs}4^EikEK(i)wN>DK?}BR=Hw(Ugi-mCr?3`0;th5WWbg3#r zDo#8D4QtGY*Mh%eD2H_gb$ z^v|H!-8n;0BCZ`YC7+Yqpr%RBq<=m?r80i~6XbCiNHr*%{FKE)|4ieyIHN6m;+d4c z!?{g_AEJYBlKItwD}UdjRo|m*qLK%U?ikHYvFHY{fatkwy>!$C?w)5m`v(fuy;GO8 z(s^N(rX^-Uvj3?E-*M%#b9}x+f;U`iLa?%((29O7(Zn?S?8{`BmWlIwOj~UbgT2cT z?*}$=oRIB#tZ)R~)(F^f!Y?QcD?qu$m8*=9u&6r722#(KAvjN4>&m z;Vd4CW8VXu{bg_WnlbIm%5PG)AsqMR{oh`HkHE@~|7Vo8m1|H$yw)kQK>*X})~i6p848{HSX^ zzi%zIG1omy!%p+M`$C$vzc3cTvBU2ljzU-3e%4;}*PYd%lz1=?7*gfLF1Ln9a8;qe z@2b%GiaoN0e_%|Kgu!3T+xN|@Dvsp@nBpxk_aRQVh~wCCIQBo{fGViM!L#g}ZO;=J zE@=$*Wy0ql?;Rr&;g3-m2D!xEHoT{xROT$0_F*RlxI(>n#IMx^X^9?t`Q?jORsUO5 z47vZiMsA*8=p~2oSYER<>=y7^d4Ezx!H5x(@(Bc4|5=c2Kn1pB?Y#l1e9_lOMRv&S zB5@_gw1~q$9AF+>Y`%XRu3udD9IF#OGyKCDy=xkxcyhN_%0+6ld~Btc;VJ3q)`tZ; zh`!|Ax$#R{uM6KggSym+TS>0@TgJn6A-l$xFJ<~k9b#Gyxt|U%Y7!=7c$9IKnfGmG z@5OFvfy55k{NI)AyhvxG00E7^)iuMK4S;~~BEtpCuNhDEzl(`&xF|6nCqTF^#v{F| zTU)4;5vIgz&iYWdpFSKuQnGnydEF>n<8$HjC>Kn&`O!?rGMKSyT^Ovnx4M=tREgRm zZ!}2|>X_ z#z=H7nv>XHER4)DPMO5S_f#tmYYCOom`nN8eZ z!8^KO*3mcC%gdiUoxEUY+9A5|om~apvy6cAy={#2FRE`oT#i@&*}EEYV1Vb;cQL!N zaz37QJ|=F=#sAHSI!;yS{ay2r2glMa4&AmAlh!whoyHYb;j?XpoZ`YZmZy2I61t4< zBaKb@s(lmUy*)@@9 z;+%J&R)gO7Fp=<^W3GJoUzKa=!lu5lX;pn|L(KeL*G9!&nZ#vUPP?*7)$_-b)TX-- z)}-9{&6{yX_w$tTZ=Pz!>YxziZ^i*3k$-m16qp(RDeq@f@0^wbT(2~FZ43W+edo@f zO*E#fF5o|zXrPy!SHl&j)@RRg*tH`ikEd>_$R59{o%JF8%EP_adp=;LvCtB_>!CK< zTpb6nk9<8m=(4!g7Qvm@1HUn6;duCWiO}A`l{`F7X5`6b^6d&s*^~>_! z`tD^jMG)>IV2A#|yCN~JxVpr36_zI}-jEFH_&9cVDQ<8a-ga#kV8m?2D9Id?H*W?2 z(-uM@M{s?9Z@$Ul03XoWl2gz=^b;8b#imI2@FoPCqoLYB>e8Fj<3G-O$?jh$o!=N# z{V+{Ztb9k*cvl7S>=VwZu8)`HrUdiU)eH23K>~#~8hQ9L<1bHGU~wNr!#JYA$>bw8psE(l$u6e1W-cKPb9k};7R zIrX8x#6#^?4`zEV9*7{AbeD6Olh_3y`Uaotc0%LN#^_CH#`n;~l|{m@4+&YT3{ z`*Bx-ZM>I3CG*4VjcGcu5fl705A9wlE+IkQIEhdt`7hs(9>(NGel0!J%-op60-Cn! z&fC!BdK`${KE|F74!wYY^1*t7BV0$yK)0;_3lX8y{}B;-bnt&T zA~bKqorM+*DnODNra!~~ltr>T*W_J{j)dpn8Y^u$`}S11`8i(M@!6|VRcg*b#Ku`K zNB*`e!t8doX62}5u)dsk{^0ZO=fUXwlN z^fLA0a<6MSvc?W{L?+m&7OUPo5dy1Lz z$x#ilfyyy@fm5#Ira6Jrw|Z7};<$z&*&LAm=3080!;s{x;$4;dH-ul`{l4Ab-$&Pf zZkYygU*tid`gcLXpMWJtQ{!Jhzve)Q^U?;nkGGwYXZeRaSv)sUXMDzw@B4r|DuZ0J z;YNP0SmRH#t?DB)1N6!Vy|n3d*+pvE89z2zCldd>Pc$byH2C;0@Afx+`R@Kv26n6A z*EkgAz~O(SHaKkwKm0^zxTLfIGw{})jaT;=aV2%{B^aU~;(zU0gEtaW&$OQw9}-nT z%vA*4Qkee(&-2{zPWgDtu-|ScJca(UvDr9H?m4>RZH`m_6O~yuBJi8(<>Otg?nM>8 zV+M={;+#kAqD%|lANZy!Y&c*nB;4jaD``x!+-`WATGy&d3{Urs8Nj5g8;)r&P3NO?$ll4m%hOUqFSzLCP{+o=Y{NY-Rp_U3y)-3U>N4vE1P)f*<4$O*l%kE9}keLk?pRe`Hvg!)jKD(qL-`M z+jn&r*`PnS`%A%LZ3p!G-dA8bTqgLaF12ju0xlA~>ALM3uH9o-w&7!Wbzr^-PLp)Q zrFOPy9!L)0viP>K6nuZL>Z@1^yw|L-xCP+?DZr5;!CbLHcyH$frKKI(|CE*V_gM;!J11#Zs)Msn0- zpd8mdN-;M9Z$HF+;b2&k`sI1Pm8Q z3C4T9Et)Ft9JI$sQYuEwPIlUL6FrO>vDu4*Ln8JTa0HV_1BZP`MZ9R?azm2U$Q>2w znh~OwH!XWoDPDe)A02vln6#HMnk+M&63VK{ouCz_mS?>c6&6~Zu4LccNk<1)8=|pK zoysLjdS6O&_0pNDWlDQ&G&*FV&USDMt??P}rN};zeh4!+>;253O6h)O_?@?pskiEn z3D+YdW$#esqBc@Q=YvuSa&(`)Q+)MyAZn%^@589#3uvT~WhiH|f#I?O@6I~@i%pg7 zCrT=oMy%EzZ|RIQnIL#~MMJFWqY_ zNx`IKosEcYy#(#*us5L{YF`mMF0Wm7&c5QuyB?L>PZXDOf0+ZqL3NM$RSf{SG5;L! ztNmLPKRT*Ms;1QbvxfKI zvWR2nySogj{UEr`UKT5gemf$U4#8|Os_%_VjBr2 zQ@P5ayY33fCD>l^{ocbmvT)|Zd^x4?kKn$x?iHvp(qP;v1?aWijp4Lpy})^!uR|aP z*Vj1N#s209Gebjtx_L7&hXCl+e=k%S9~fAy6AzVVe7515b^P-m<9%t>3T`l=uJg71 zahq<)@N#w4z|5)Y$!mWVk1H!VAum^>SrBYKq1^H|N-dxqUuq@R0sgPyJA6~NRvw$+ zw63)N=}-#N={39TC9y1JQ?j(BawuAniyF~91qEXh{4%oAe?`>x3sEr_L(}9n2AEN) zQnNe#6g`5j?GR-T;&K5P-4SY$&@cF`x|dB7s}@BoUvDX8#dsilXSR6pAHefOOS7Q?$KLcV_ut`nQ&WY1 zgwSOg!czd3DQaiFmDrlV*O^pu+48XQ?QawL@xut(=p~2Q+ zN|vl~(D-SXaIm{I0^uwCP!eu7{s!^5zS75xm5?S#MKY9_uO-$ksCf-Xugr6oxjB`= zy>l0K##dzRIl<2}#xJ>@K(+QDfmh2)9~NOGQ|d|Oyu$eKR-%D7X4kfy0oA``TSdKSInQg z)||K=w)pAPbsJ8cA5V|?Q&iPt1tPZKZ^Y~0ld%7*{G3-l*Kd*IK>G!A__i)ycUDl8 zJO25Z-sVT_nB)b+XK}c%PS>c+crYxKTvh@vSg{Eg1@v-!mjksn;>1`{`xazPxO`Y4 z`S(casEJ2;o7ZW3ac)(PvMW7CWAXvrW!VtL-;<}!8jt?Kb4e+^9DeP*XE^7#{HFHj z&W=Y{s9L?HAe&o>b%2{9J`tNO<8it2rD>*(7}AhIJzL|#M6g`F$%i45s)-IMZ%KFE zV*RdrQJsT9P0b4am7Qifb^Y2T}B58c;ph7k1Q3gN>dw`DboV= z9VtTE54siuP)h9W(K>^aV((}v1nL4+Du-y>L(V7qiW>l5SsrYwy+YOpF!D+Bb3h4}^?Xhg0*Z{@>)WO=KE04faR% zs~1wYw<1WO51zeM;Uvg7UaQl4BFV#^x9eXIYrz1w(5J{mSzPq<)7VH@Q|H?{9?SbD zv?D<2{JLGXx&ac4X<}MdJgV<3Ps^K3E#K^2r!2MTPx6@|QKnx9??-m85ccGA5#H=^ zk0%do+OT(VuzgKo3x=XLL9auH*QR}mSt*tI_Yb~hsjOw#rZR8A=)12a+WoplO!g>Wkd_5z8Qh(C)p+ zXZD}0IDP3@hq(y6mc`jNjOoy2vmZ6gY);H{dp(D!t;j!X<@X?c!q5jScf^0&hvS)hN;3Dc{ELm9Bg?MG)(^ zSuQh&^}7B^v*i4&?kd}BF7?bk>$E3uOQyF|B$+-Wb6aON72+5Y&N~?KnCRhCk_qNz z>KqGe>}`61+F27&T>gS92bLkmh2q;V-VxM9PgMupbb#Kp)z$(NYp=hkZEQ(Kw;!^_ z&+A+t@fv}%cv#H#g|9*oWloNDm1?#QH7JsNn+pr^*^*GRuOvMhh6Vr=lQMVrC3QtW z1Pmh_%=?B!S!$y&T8W~*XS0EAzqbvx$WohtP0Me@shT2DPA4&074wL0Q|+%0N1`3gd#3o zxC~xJU$lPC-cjSHG_#y?li`y(HNjH+2L>vFZDM!i*l(;8(G7F^1n^mtC5kY6{zwa; zBz_EwOU(V^{bX45F2zGizdK0JO6(`wvX~eYAWgmCq)Zk)o+zYQbXG&wY*8v#1T@b7 zX%v{C(vpl&o{%|v8A1}ALC_HDMnQziG?h1;s7;6P@wX2C5BA5xtlY3XhO$>{{CNrQ@j(v37o3zP2d?g>n~Lpmo7e#5=bd6#RS<=W?*@BOZKfA3j; zZ~>ExagFhe=U4ah``tH-;D;}856&Wvo*ubm?L9;6OeL#D5D7>@bwr1urgC)6b(bcw zfKL>;V+_@iLM|Tj)*NrilNtTlKyvUnMD@Y4rOl&PZ)H%|3k!J?Hy3R)+>hrKM6Kp8 zh7{E=*tc#-(-ug!`sJG|wBWFF*uX3M383E7o+0;+;cx211GP`}15?ACu3vRKA4N?2 zQqiGV_1@l=hVu5xRqwCG-bwl*jyG2GB+<~|bKlEEU)ACYDcP;A(;f2k%=xoOg{qPA zKo?e)HI*89W*F@UKU551O5{#mrAc|bouv8{o}DcCPD9J7fue1xcT@kEhq3G8j*_yE z>iap%G+4td_GBv~a5prE6A=#G6dWiUx&#*knHdIF@(@;ff921*yPt?&s20(=70+a& zLJ+xo!px>Q)+I)B^1^HKG^VsHTBcHp%fk+dM`4ok6#l6y=5)0+RhK~LqtsCV1#58` z-+jIZ_mC>Q1#PkjVgx~8kK>Wq{L^dce#Ndt+E+cR3D|PPo?NI(h!JB~C@rIEZG(+WbIeatfGX1@jWtQhF^*9*6WJ>m7*z1vHE zH%no`{a)FQT+_S}v?1*co4N8gnMfvC)Lh|c!|Dw!Ks71J&}l?V*HN``g})~v8ty$h zQa!}VR%ll9#-p3IGU_{0JvzS8dcatQvc^*u?RTdkT;5E&hpPf|-L=yf*I1D0nSYqy z(O6CY9i3OEQR9N zi>Tk?Kj!Zg+b+!X0{i94v0iF8U3-P@$?WI$FfU1Y?tQdnU6o0#AYgZf?CY|~p@4($ zL4=YdJbIe!>m2%3#(C-u2b<%4Z*tS8^NSP%!u4-E#-ZnEG8IhAFk~&Cdb0xam6#Lr z8x;_}>`Ht|-6>0o*Wq}CsO0WHGHpl5-rPy~iB+!V0-tx}iQ<*kpBDEjtC28gx2eZYv24VDpoO-0OEn z3|QS@6v5PFwQUNC%Btzyogpv68Rj)x-ii;x=L$Tt+q{qOk2LO^0nIFg1+TSApmf_T z5shXi8RM55rhWx?eH!v)} z8DVs0#ZFVo0fImuKghe|J5I%*Dnq%Rbw@-=x6xtoBZ7GnK~fv|k*%QY$e8<$OfYep z0F^wwY`1VqPZEVM*d$gW+?MSqUCN{aHH%2@wNQhhmr4kyuKNo<>OkG;gCB`fcfP1K zK=E05p4dK6pCkW>+3EyD?V)m=!XA0=mNJ$zpNOoXr<+BAd)3YCbtd0u)|=MV!strp zghmF)?!-or`}pI1gSWoRe9jpanb$c5I()R}n;cc0#dj(qdd)L=_NHt|`h!gmbzihq zF+>}-tw(M%3v1{;kySg|_{J3Bb0%fD5p#NqU;2V;e6{Ct_t^(8sggoll2*tCeH_yB z3J=}#nN7K8c%lYPIK2;$RvEo;knq@`NcyeEsNrTt&14w*DGE)8xN;79^0}XiY+>ISDqj?2R>wmq*Bqp z0Q9wT+zC?$;l17b+DEAhWTrFxxtBVJA*|K1yJ-t1I;K%Z672k<0p>&|trDpa)ix~4 zBu%=KXY=~XXSM5vnfSsFCOX89HDtNat;=faRzLWKwxEWCzfe~#-Dx5 z%bEwsL-mad3Pxz%UCj;`h3*X5;{=4&y}5g`rzm^%bT^MaDXZhdVzP+%WE7BAA~8(B zMVyFB*E3(`*JPGxUA*;HCQHv8e4!iG?*5q>$D8# z&G(e_^o&^hNpp=P;rma@8gZ;^=E^-mW@J&2@m?zq*XUkh`&@f|VO~K<*Ix2t;QZJ$ zA8md``?FuolxTke;fdC4vcM*XJ+{p?tKU1@Y!Jak6v4yu*gWEHoXO&6x4u5Qjrx*8 zHx1x{REUi(-GR>^KD-&5tr0$-96S}>byp46XLwTQShBHNxO88AuOi;NnJjC|EvUw? zF>6K|#54Z6xK4?XllIGVmeiGs8JzbHDwcD!HT4G=ZU#*i;jdtl-(qgROsn)YLyaJF zUG&FuaWNm7+ah2W>GRk8WLzx;utKWmO#7m~QF1FZT|@_qo{aC3Nhm*=*7f|PGmB-? zMTt{M`alriT+VT5DQDYZOL|>+HlvkYh=5)7r660yoZj5tzk*?uS0xrUEK}p06K}Jy zEW?hs^d5iWoNfze6b(S4==u6hkcFIlcmc+09gZ5fl+gIqN6% zc1nu@zA8L+{WF6JTW-eW)&oh*755Rw2s5G^I%&=S=zw+vNvEhvUNQmaJj2j6v&u8|`AUaGqJYr51;DE~{9T^I&G~ zr)~FFWvDgTggnQD*QY9=$AQeCt;?<@a`1tXn6Teetu%#PdZvrvw9Ml$LnB!bP9JaS z3u69y4$cU)m$L0(rYcCrGKmqW;IuyfLZoomBf#FTLW-yH(|YAdIdgPzMQHbUa*r%^ z&c_jHn$W`ayI)tX43 zN;~^!GCAMAm&-`&%o{%AwYiGE0v#oCaK#(hLe^t916!}YDK;EyL+=&oyfc5DqMC8; z4dP+5K2}9E255dO@kSU{_WJT`eJ9~hM6m^wUAu)Zk`abm zEhn*48I`Aee5%6ZnAu%%PoWR(-PjzG?J-4}!Adfv<*?&kcTg|LMU#_tby=E$6xsLF zyQWTIPoZSul1$YIUm-@zdBvp3phSJukQGtxQ9arH^0C8dy_s_`xk;5m*JQX>__mCg zuZ7`Rs5XFq<4#Y}1r&s$I^@ZPbUf`EP=DJ>;Y_si&7mVEEo1g9Qnm`_^f|rs{j(1| z!SMM0^2(QcACbGr8-1BwbZ4{2%|X*G7iG+t&3ztBxhj<_3k%8BTN+a8jIh;Q3+6`A zqMfWBiNFu0R<=O01r+wFP8&7|vHMZw6>539@?722HZIsvg}wr$WK9(MUN822Ps(#$ z@BspVisTblo!J5u_Vb0 z*)gWAE@*WXGFOjz_xsm6RFw{YIq6eg~`9-Wo^ZZY&AIxoKCjz9J6jh{Yy=K?I zO(9Lh(|XUiv?#&q&185XDD$8T0ElW*<8W{r@ALiK-~u6^fi^k>~tmN z5u2+fqWYW-<0dW(Cx|QNfT)c5QYC5A$Cdj)FRoBqO-J@u%VNET#LAI;uYKazvrUOsHBejWI!2^na_ULCAcwxxW!-NO=&FnTn}9C-1IP0A)?D^?RD1OZ$3>>;6iZ z1cjkzo`#dBvP-a|0A|D>Ejxr(-9Gm&Q<}^SAMcWD7a^HI_NLc^lCd3j_NrVtAldvv%eN~j9|k;Ci2Pj*{Nkg(BmyL ziw*bhNU_fZa;L%G3*K>En{)JIh8OelxRPLJTj;dzz(E=#w@ub_N()Mlx&m+FW8#+T z{p1t};=PCkee|{lKBm?_D`jKZWkQr7Q^Gvl8PL{~kB+lEOBm0dY~nKGZOc4Jq>WlJbGEJmN zNO<-~5`L#TFoZ0d4Zi#Ba1(XLvOPi*`)Xn2Ie#tm%=F=T#R}MJe_^VF+DLEXyzP-d z9y{S?wB)zT`sQW5-~j7xL44G)lDuVG+Z-bA(0RlU=>UknDweF^vSZ2vEYP zfh>*u=jE{5CMiyS>S5BWJ3#VIP*H2RGq0G~Gk?wsu=Z#;sIE0Cor$fe#n7cJW$ah= z$fn*l-nKB-t1%A*XUZ62tV~PTILw-0Ji1X(5nnt@gTr+|)73V6vmPL+qGm;RJp}?lArM}fufW;w;@=c7TbqQ*- zKfxL>(Js<{G&N|&THDQ9@_qtxOR}p8bsw#d8FKp6e+NRG$Z8MUu!GYrv=yRAf zMJS>HY$gs<`owWCl*xIZ2`nL!sHy0 zSZV~hoJ;Q+gCiQRP(&%YaC5G9>S9-9&fNRr$aaYSC7h*LUtzUl_0CY+ zS6r-_V?t3MDAPm$9t~{+L)QF4IHc^b93I| z4^~4vG`t_o=aaLPHTdHSZmuu;KmhrzVd^O%l;$D{ z1I!L+fsOQROPH_`XcG0|M;U=?VVBt@yfGK<7r~PMCy=rxB-hicWRnNrBkq@$t&1B@ z_qgjNw5yZ$UdNcJ-{L5UF!Orz=)K(S0jwv9TG<400f7| zNMpmA%WW6FDSr7L(kz;TW@1r?O##!(Ar^{B`QMOl!1L<_qoavMJx zzqq1ojFm3JJ+(0I;zf+$BEk47^YG%kMR!8BDJ zu_-I7xnwEY#0@mq?`UdeW@K_Tt#=WPnn#*9v^K;nX=q3pNOqu!>A=3|SU9>W?zqFB zw{MAmP#&JVw6?8%WK&ua=1(H=CY|=yQw9-F*Bq9zyc8^}H&#=$I0ky@0`04lK^^zH zG?+^NbAvrooQt)XaF#jb592| zYgfJ>889QsD5>X?k$L&FwFQaPDcAN-@L>%!zid3>o;|(k%382i-Ddb86$5odb5^mzUuNUH%mq0JUQIjj?8#~y*#OAwqu zx=u(Xb8)oIN3(x?pQWwRS&PDi;@g~^#A*ev-=fqKG9~`ty7ClYN4+g0XKq5Zr54n( zL=eWc+xGHEkma*bYdn3f_X=#Fyjn+KpeP$r-LE+3y${UQU_k#F@LV4(_JaxZ1NV-x zNtvJCrw?l91rRBeD3f#n6S@8B?tX*P{rd8dbg(Dp>U;wDCHFCgvJWBUdoQES8im?A(8X}wuzK=XB#`vGADi0m<_bdSy|uzkK=StKH3sk z^Nw@KI-l@V`Ky9N#PJDQq_8jA&(8>72?$W|t!KIeV`8qVk6C?tXJ_m1199I@n?5rL zSy!ItDk3c;J<@%6=B!X%HEIz<+MV;1bzNXH6$fY*7x<23=E+!jLsHxbd%az$WWy3r zUN$5;NzKG@SNLRMg%qB!Qv}WKwYjhYV7aE&_!LuY?Qw*=*d1$vi>y$al4e0B$v1c- zuh`PGzt(~&lM_N`ud?8E3f{&}U#In0^KmDq@=sbeh#Dpootcbdvu+(k#^9xSFM`1G z5()A(_4Q6k;h9xzSz1%gZ@cx8!}yV$5j;T~PGTRO?y=eMT#Jn6b0lb^77r4KP&mi* zWJI_uQ}pVxeHhy=Nbm%ofV1jTx{o(hRy@U^VhhnP{}gzZH(mlO>5h7kak%U(H4uR9 zZsjudKVWhlc;ni%{YDYwEI-61Rz^Kujy@fk8>m%AdAH?UvX||tQDbn%!m>c;qj+y0 z7VgmzO_ID0Fq^F5>}({h?CDrh>H`PF>DOc$?61)Ya#O6l5tjwa#?`Hbh;%?O%F(HN zb87p-`;!wPb$OXZ<*@F#`75DmQ21(ON^P@P=z88s3ZTI^#0)s-=BSh_lRW)16A`%+ zPpRFdCGE{#--ci7QR3XD*YOARt~*Aao6yPvzxrN(Nw@{q`gvdI(m@% z15g3aet0ZTBesB7%y%SzZndnW???rWnZY@~K$~Igj=k=efjU`4Ap2F%Q>tfTG)nTU z@lo(bt14Swp3rWqC%6mA8txC1m5TB-)&ve~+mEk#1okY_2Il7Y{C18NxR5@*J}U=R zk4Al9@~C}F6GMsIRiI}yS`OA)tf;oj4OGr`CgmV;(ZQG08FGDFTCp%&<0#@_K=4mc zXOJZy@ot=F)}~-cI(-7;r!NueAp+f+Gn40NyIspRDE zne%D`L3w!=btpU3)TuH}rjYwd(2%VFvuxk(*E}D2NMsp4G+VNyzmq#RVVy2Mb+(`p z=r<;=QcsSbR;txIh1+gC$W&lR``o}V%2vnhuq4C})t!3C9B83AchJ$sIiJW#C5vLU zEwbUknK!#R_ZY6T%Qq>Fm#){Qb{k*js(HS!&ta?cg-O}p!PXH08vtyDxI%IKe*;?! z<-#2`V737FCL0G_%L-SduH^hpl8>dp-F6v6epGkotj)z420~WDIoVLGUA7-(*L$wca!G7cMOMvH zaG?mxUS*OmmUf~0#rzGjzDx4WRtwyarX*lus>RHyB3#eCo#a7V!<+nP5qDq{?!%d9 zfg0n8JOBaRPOqrYllAE<&{>X_4{u&9hE+&=9DMmZ2yfT_1B8!_AJ*~s4TQG~h;Myb z2`dl0mP7w^)jJma>f`oA8nVXyTkkorq3T~3T`hXbFGY2s6iJ1J#|hCwLhMSdH~0Fz ze~CL^65CYf^yYvrz-^FH1FJw_EX4;`j`mp(mOQwbkb!$1Xu+t1rr0z9eUm9C1v|s* z0|VaGrw&ulP>d;(S(DZE>afjW7QV-QY0th!le|d{xwogKnuU|~r4gIw!sw!Cl?iNh zZQZyKBRQI1m;s$ zk0P&zGWm1LvDrtWK1Y9t}KOc6W?FGPLFqJr7k=ptj8qWh}^`dX_>(i$!?^9DF#m?HJf11 z(8zxEI})#}N_Iaw6!C+6_$WzCyIQbRf;^XncjWBNY<}evB01Tr+tKg3G@H9s+CeUg z7l*8x1{B%>t2{DL8vFh?FbP3syd(9m-F?98=_soc+F(c;MHBDF_BlJY$NnoI`_Drji|^UI6? zDUZHa64_dU=|Wxt$yPP-0j1y4*eWa0$JU5uLADtzDUl;JX-wiW{Xz@xktJWA$Mc0( zD0OAEm)f7NlU+P=b=l|XXD^lJj5I`owF`yI$1;gM_iINBft2$@^!CC%hJNbzx7xH5 zzaB(L3zJu`3CZtRC=Vm zk7qkX&@!!D8h9d`Ylci(!9jhz#fHzp8>8-y1s49R`ML6WmCGgqtaSLgFekIoCYK$y zFP|1=(_=J{X*E|~zIR2Sp@pk}1g9)vR^4=E5$IcGWm>98Qm>bV*HcCE5-MUy6xdrd z8^JYFwG}gz1Q@yWRI2i_iC=S8ZtuFey^_e$xAwx?*XW;FbN4CefYOjg;H|1e9;3wX zgxq2Cm^VKG66eNqiY+xsNyBTgMPT>~ai4i`ZOn<4!NAoPvwbJ2|LT*e9a!kK4o4U3 zN+5p|U*=citx7c=zzlvB5g*s$Y_-y^05y^SRCv2ur&l+ zfp;4E$x&CUdS+M>ugT2>BwDi-6xkcE1U8SglB|hmiZ|cLTIA|?hvH-=X30X{X8V6KC4EFh!MQG$ zq!X&z0IO3B&gm5yPjSsHgE&KA6>TO&sJN1eFCVxzub4wuk5=n14Ck{Eg<;n|#dDfA z;ZX7XKp3i;)a%{kah1nNd&>7Z?k%rDnU3y4R}Ba{bb}i*U9B7}@(g6LBRMtL@a|&N zlh`#i50+c_wsw@gN+=zf=QCD&AK@%QvEzNf$?I*A^6^81*WfIoV|IRSng2Ksua!5? zBbolQ;cUMZ;r>Kz903betR2Rp5)qPDGD2QyoJ}!*sPn5lW?2I%ABeTDLp8z%+3ck~ zUXi`J9nh|C(Vdq)vg(fS#)H?qK@eFueXQWMbgI}Of7_L9{J0EE9MNDeL=pZ;_+Y|K z%Z@m=QUhxs^C@<14B=Mz)mD51*)YWBJJR^+lSBcX6s+3Y{YTTlfz&|w3{P2UM`|Z& zKkz(rY`=8W6m4PCjk?g4|z{i=GviQ-Dr0n!`+eN6oyP#oHy0?K;6Y*+NCoKnfMfMm1sto6_c4mWo}c%M`Y=EM(Lr&Iq&4|( zqoW9}|LW){qmS;}v!HM0jJ|`D3PbjHS`ywP1{@+crc!_t{@+GN3#>2tZ5tT#vAGWz zhon$M!0XRdzzSg!$n zGkXjJP7Fy#m_6@XY%ny6RsYRWCWJv1@i{Zl)f4iTj&_T)G~ILqcm&Oj446w{C6be%IvmAwvGTFi2TL$)u7~;DiL?ph7m+> zy_Bm(&CrORqp8&!jqOA*|0`59t(oerR+E}()OR*}eSr3%A`FFAiNnqY5*mhlDkad2 zDA4_Ep?DuW%X6das(bBCAGE62voyb1WOXz(%_Ym)KWvQ3x$LlG&_DW?Lqy3=lhsCa z%<9q2i=y)YlHNE-%_YtRoMDMsd+p43QMsSv2=_>c-l>B|-xMvhwa~3^lqc6@birLj zZtaCjFv}{JLfLI$VR^!ZoUZdwfvc@AgmkJ8R%Y{VSiMr<^6J$Qm54kcOd&l5Y3(uY zVKA+#RR_mvmr!M{anQqQiX>}za$lwWRdjcJ`EvL9A{1@e0l9!ZFaSLJpUXx-LEv6-mBg4blmj& zHm`l|C376lOwbxO2)^C-a0@**`g4())GbOWh>eB49Xvv&>je7s=&SfMMaKDyBaVpD zB48r?)*mLxDT{jmsX2gY8Vz}GaBJLF+CROKFk}Nv@tfFjJky0JA;9_+duFs2vfX%A z2)hb71V|5SMSOadex62%E7(#7h#6AUY67e%{Yly=@rn3JafQN_em*~HJp+VnXiqi2 zBSFNj3CqvdL=&ye2NEmn<@bu0Gy`Jj2lJ%0B<%+-;jSJEx?O2xsiNAc z8-&}@-gPJrAuVSr!w`XydEA_v5v^c$f6S>~2hOOBgtt1{q`t37qe-=ho8(m1l;rih zRlo*MhQC&%xx_#9}WGz8-e!#18M1-541O#;E;>saPdd6m~l&zbH=!yyHdNJcbILTjj}rF z$xBBZ-n1-D-B`wtyC;_RaOqMjdbE>Nb*3KYe_b$B;GF|lr{_;y1Jw032EMp1l~=|r zS=@UhV=!^7fkJzup`RujPBOCO@6jvB5P{{G(jC@Hz5#(@Wj!N)1J-EW%c zGuWi=<1{SiWJ4Q&q}3|(Ob)(NDFcU0f4H?6P20vKYxN{rgY<3vppv9CHgQyS&Fszn zrJ#qw7fJM`(5ztI#0WiE%7$8bNheBF^MNM=ZYWdbpx(t%HXboF7pApc2a#sDsb+YH z?ES->?hE98ONFnSx{5W2b0mS&LJ8-%9ZOS?i4x;5y>5KFx8mz$QXtpWezxb=t9|va zcd`+OtV0`cwV}raB&`m!n-r6RWy$pK#krRBWR^U7+FeyK@*m}P_vCb7TknDbaU>=N zV@}U`&f^qG3z~uXE7U(!&-`5;*r<;7JI338gN6r!*blzAML$w8$|OH%zWUubWL`B> zP_z64u1BT}(?EwEI$(DINLaW5)JgHWf1o8WID`oF^p^2gEtkc;7wC)omSoi=oATkT zf~8iJrIKjMBKAEaJwpSU{=7gP8r&4dFRmFFnW7trZlY5ttAt*Rh~hmK1alnc5_f4- zH`~yo{R0-$_dYah!s|x`#MQQYX2iDg<)=bKa{fRg_zy?Tk6_*rYWp@7R61*Q8lNC- z;S40#QdD8$v^pKgXo=)fpt=I$V=@whxvh)KAPX!#q8h34J0yh#j1~Y^a*9y_~Tik6y>o>#1Q6Dkt%jI~-XX)U1i1pf~xd z&-8%Ejd%H-Pm1+8So2(1fup`QeuQ(e*qMt>TY_zgs3n;T=~`r~BI}m$VED$-cFLfG zj*1pUD4u!1@!Di%vme7Pqi9-wu@zD>(`Uv=zksiaMx}>FCyfV^sY~*j{{lKCsBt>f zX6}YmW~N(?MjwSwTm{LlhqR8ACL2A@n%>#6JtNnm8v^CdY3d7Z6Yve%f^8iHrQeK? z3=;d0+B5A^RSyVs*aaQv;(SeTQr_NFN4l&w4ymRXs9RTX;U2^|t3m7)vf{HEu89c} z5lvuzyMsb%Bf>}Sx}~#NbnrFP^H_duo9T^e^}Kb>+Yw+CLp;qjVv0U`go{fpRk|^$Zn5m-uM-$F)&IcAVBs7HpE15~(H0k6#2EgvV4#82r>`+_X}k zw||jhpv@7O(h_^OaaD)W5&8^OsV zr2lC@Ymk}m%44D{*ZJEf+%AjAS`4pN4^QbSR41qipP!h~>96fcrvG%3NS4oGcRqwl z%rg^vfuG{aJJ0KGx$2pY;_#S(BFS|USmW11vh7!4F{xZWi)Mq6?e|2Sf_%3Ulrq-0qC5r@g-Rle7P!)`~%dq2^|=*Fi) z?DSX#&|K*B1>_5aOur*}OL|bZE=80CHLST@MGJAA4LLeVif2stKDQi2cWlzU?BV}r zZ!a_=o8D5i4V&2r4v%A5{P^TC+I?pl42KPM??wLo#%r|K-h1EDZytuC1Lz{8^X4X? z&@6C^NKAd=+!|um>&djqIJO37-%TgN#2B95ej~*JX~SNz?ypENe$@_r4NW!Sc%j0y z0;p;;|9la#-!7iX#GmWwf2esCmdGR;y2~wAbeux1EmpE!7iZKLAy%3Nx81`DQL4xM zV#U_ipE>#D$tS`DN7^dXJHk##K!H98pgqd~A?Gn!@pq)gFQI3O!tCFX#HT4PFCL%p zH?GFcw8j*ggV<7Dy?;;vS$NjIbc>FJaGlO&;n0=5oUXl$;0-qcM<+KM$8guXX#Xmn zz~8ST#c%7Yey#V={ZM|_xLd81dmu8ajN_T(r379z=6ymfobaKXP6}V?Yu^m8#!XLg zOZQsNrQnCE80G&KF@^pkJ+L#y69cN#LO)vdwLWwST(%f7l&LW1JLS zH*4$`qJ8sUihg<_zkA!FxgT$&PdQE;w)-!bQGW3d>6@QJSG*mt|79;VL;*;nN!&CP zcpt~;BA5=KPuc{fiVp}{-fT<-*N=X(v+pYwT|%2QBjVXEKdr`~!@w}*v2bq8-ClsH zs~Z0YuKhxxvoudo*8G{~?uq03IiZcpZmO07@CpjUrlhkeEC%d`B?NuH(MW( z#%SK-zY(re=U}2s;~(e+Gqp)PJP+?YahwXiUf_J|x#@q!Ku+)-DRn3G`udS~i`}C4 z1r`AI&JPKF=lOd-Tg2~;C8Zc}Tr3T1q~$_fohJZw=QT!6;q~PY zmNg!SOLnD8r74Bz;woCs&n>mxWJf`3C4fZ1AG-yc5XNi2dK%kq%#Li^rZF&Y`|i|E zJ~Vzcvezm|PQUO}-jx3&N6IJtD-$Mt8xxIGs$SNu$~l!zU~FVA=jTV3{&_8;Np+{V zE5>vlrha}H3i~VBH z1BUx|-;!-^>1|@gV0$IE-5}E@5Bw*kzQxIL7(dMd)dX48j zB0Qji``JtXY@yqB-o5PA{&b3eW|E;_O~k^l`wRy>;~FJj{&M9!)o`vtca68#Clih} z(SBaj-~G$qy#V>Y^u>NAVE6|)fNq`?Vi#)zY~>IUoBr9I3=NG)+K6nV`-sNh!DEF5 zi}4zau-rlXa=mm|lVrQxx3T=fhu&Se`QGp#pfZK(_8jnC1;lq=yYvHgNPOWl#LxQk zc!b;q_(QDU!8SDhTXu{&C!>Ai>}=5TRCPZ|M>27s1Z+7{uT)yzax6Nd8TD}&b>Yv8 zY!oxvYN8>_bIm{pg3no>G8FhA)*LXvlPfnJp%nW8v8 zzbU76d#;2QozpXwMd8M?qJo-e-)7#nF!0v%uYG& z)wlcUhRez#`xJQlU!8jy-|{(pOOOabLf!}{#w&Xu%Mva@YRhaB9{!ZgE+|Fqop$4I zVfY^c8H0bj$np=XksgkRS&j1n7N=xp%lbEqi%t^#5EKw#Kp>ZtY30}}Kx~f5=<+C* zCe22GX0EjH7mI4`#|kF$>=OW-j*n%$d|dAa0$KClwKs z`K&K+6T5MIA=L;PLXq2MKS=5Sm+$_aBXcJ;I_&*h=D-WxNiBW%vf@2Il_EZW7*l8} z9_ug77!Mo$qs=bv<>&gu%O_p|O}}2+#}`>N=M*!olY^yv-}3+D<^G%h5c&Vd1+&k9 zIRUCd0&n;+#f1W%gV=BTK=ZWaa?1cff7`VnY8fYoMiM150~*D)g#2R4pl|#(zPc{e z0_~YSr-qh0P0v(vrU4O@V>y6UZ2}ScJ6g43XERWaUV{6yrHwJ|L(*Y?Rq5iTVji&v z8^mI5M)(pro4h9_BJHJ2J(sc+W*=Xv{cMhfgHNpS<^(eq2^uy;yAC)xQYs>4<5MPe zou?!RCs;iWD5B^NwIGAL>qMR9&$-FSRFtiFsZ@x0Zq0JwH5`Trk|u^Aew7{;icKs5 ztW-u0KN7^)jNwIXh}EuRM^|6Wn>L~-6knlmKY`<{=?yIv#X{a{ZzsYoOY@L<)AyV{ z6_nmI>ObG-@6tN2UOW)L*cF;l1Dt|YasFK%IBPxgE_zn-8%_ zZ#zR$ai8p|{UQ&K{AEy7{$?_F@b`Qctgk2Tj3VxU;qYu3lk_8zw!4oq8H0l5%MTkT ze$4j7k2nfwXwpVp9e)tQ?VIUM2l?8uooYdsk5gwzkP^7qQ4XxiB z&kU4@EhG-&`Q|$ia1r6+Vdx`XFX!%p*ng1>WPMxu_j|N%&eGPMm36W;1!J}~ZqbF% zX?f?O&_jP6f>rk?;;a|ht3SbVPG9^3Ruunkf|fVjPXE9{E>%pLpjX$irLv+P~4X6 zczPvlQH%EwITAr#^i#Q8_-RQF&mu7JgWjoTZ-26)D6Z2(`Rw!94&U@Z7idSc3Du{o zu4TX=q;B>Jn?#Q{{x~Ne`1KxSZuxumI3TK9ANR%4{Wh0KeLg-MOQ&tW<=xl>X%#I_ zZE^@zl42|4d@kedPr&X!N>=<)P2#J~uns*QGIeqabjzd6-^2(C&+?cP9vz$TXz=V2)* z_$77Z^|LSB*0*b3GJCF)ItmDoZAy-mt@z2PDV1wyJPmn|cYjmx7H8K<{@_pV5*Q=? zIU`6&4|MM^a=_9}imStWhI;?jyWpy2JVgvzt|19_>oIg{z*wK-)dsGA3M{& zy$ShK6IK1cGgSW$l>A-Yl%9VEYgPT`_Q7roVzURP{+b_08x3%;jkIW;grAacrIT6> zY)QGz{|=y30)BHiD7^O8^WLE4fi9+;<8gam9QglG=gKq6O?m!DjV1q6KEspNsf}oT zHp)&lz19q-&8>Q6qd{wNm^Y@UT&f_UP*V~fr@BP&DbDR}QxG5~LH&nf5}OEz8Z>_= zR-CjD+lPkpldX3Hgsh+ol1NC%GapIbih$HKOY0g+4qrRTmQ7ZothYY0=M~}DY)#2T zGW=3>oJ68sTz^zpr{bO~Q5H%K(sO}}RzFwk951vUca9x5R0zrCP2gSdI5P>6>dYe3 zb~sK&AJrs}Ob6lfH8`CHusc15gs&)oY>C(5j{{sT#&&cE4ljDUi^u3;<}=tS7%E*8 z$E|r-UQx(y0j(pBV3Us1frbOg-At?z;ZIQTvi`*?juM4#Juot5q~ppfm@)Nw`rXlD zfB*^kXD8#qk+TK+#^o)oa*$B(7CU{XdV)g2 z8&MBVX&O@)R=&W6^6gD#Y}w)RK$3aE%XWgAC?T0c`tr(Phpi|{VNdH5sD-Pi*~-Lb zq*bX6+MwU#mDg^@WBy`tNcED&8-l#Wrta%TE?-Vn{jJEKk5?QNc-nu=4uu+y-l6G=AD`nHEKqrLfXJIF zPg+I9Mw)(A$m*}65G70xFW&IiB)xuo3xVgK@olO-crVFwDR5SfEW`Do?sP{rqK7(J zwA}rF>Z@BRtmAafH0;eglaqlKyaJyJI%WjwmCh`9=S5zZZ(Thd5BcxxKzZ>4k*unb zsn4q=W2{Y=iYfNDdI=iS8Mk}GKsHqgDM$v^1a=12hD@V2HsI0g7{|&=KW)NOtq=|w zgj%$~iaLSYK&eTwS(^c7}3|s zjx)BSTHA#4qbBbrGJ7{Exov?yj0xiv>fbGdwL5FYC%t>3jUK2?BhKJtO3Vy4j;7&n z9^ktDT4(d2OJl2wzEzEHN2t@<>#BQ?>}62{TibdzrNmE;X`o1sj(j5BCO{NolQx zVra3)bI;Tysz$cg>qlqJ#>)vF&db%kooMN3+@?I2UUG+XF0%5e9N^api|2x31+&79 znX{tUC_kYN1{aFbXb^7ZC@ONf^g9dwmErzXocs&cm1)Zgc2LT|7bANGaDyr;=uW%h5kWp~fW1I(kUnDRQfv`De?8Q;tShd^V%>Syv|Ej_NFM?0>vwsKqdH)iF0onhiRQo?gB;kM8bNcO$ z%-xLW=~dAr&uK8zs^*v(e-qzQQ+Z0N1)PhbdQ6$dk;aZ^2V|C0-o46oD(rN`EEgN%9Rq!6? z)GM-UQjga8OeDu+Es?adudH%AW%bV{kNa)tX6b_Zun;#_3M0=?$%Nh+VE*Bp;2*)c zcpadZXA*4``;pJ?j@Zhz=SvAnd2Q;y+i7XM^((RmRKC-e#UBMsn-SydYOKy5N^<}@} z!v%ffj_c#9FTQG{HPQ1fb{$6MYrDa>_8XGDgvhF=mPF4)7qsrX`uXQxQGdO z{D=4Q)AZT@`g>|3Msl=-_ITMfni6P7Zk6(#jIt8Twi{E<>gN-b_%*+zyt9e(_z{In zONOYCR&eq+w}IAO_Uk8VHwQaUarIpQ>5evME~}ddhgTUUv4W#{HFuSqK6>|RQJi+| zD#=X@dGHx8o@HM0M}Vpawi2fO=6PD(YP~5r&|Wet&ds6ATkh9)&DIyGPWob7gb*FH=VILZdJd`kL zD63uQm(65sNY#E1&u)0q$hU$s&Wi+dF=u$M7~{5P4cS9OSe?Wxo3iuD58#NlO;s6v zCuxbEqGN7kKv?Uy?)g9Y`3^$>H$L349Uq1cmKzthGj#|>sIkAe$R*MwYv$g=*y>%v zq}3OupF1JoFw`yXes6Z!yDQ`~^T2()-)@d3_=+<>lt4F~gipg?$M=r&bJ6Vu?4?lQ zi!`y>N5~Vy3L($qX=C`bf}70D1TcQ+D*vaw%l}5tl2!XRsm4dReVs_yYKjFyZT4cH z5aEj?TE^x9rJyHN9TTCmZ*i_vO;etuUADY^*!pdb3634erXF}#LL&WnE0IL&49?~? z!7$pT#-y5QFx4mOU_6?qcp6f--VGrurr!EbX|^+#$dYmjm(*lfqCYAAyNorGlS9pX zKt#u}6|qL1b^LO{YFThmQSOaEN(KnAa&o6MMR!d2<&N!QEQpmx$ZR}`%M5|v<8_+- z*-K@D$5D{pC)>rXYieVE8)JGkdZI2;U~_jp#&pvXgI=~{o|Wkuw%g!iFcmBxv;1tL};)D?&XS@vVrX9HvnuTMsvCxqNtQ@!IY=-*nS z?#AH1&3(4B@Gk8gzNGK7+&8J53l3ddPYG+BL%_z3o+NE+)6(N6%I&W{yAq&ZpCv8> zZ2FFi{@HGGfd(TC$y0ZogapiL*eGSzHMydY8fM7f++~3ABrWw*wDP&}Aoe~wZdpMW z-zDozk@f_nCgx#(#+gRrIlR3dP&oJzS}uehmxhmh^RDW#yJ_J&wvS7HYwtrZt5 z@n%8r&p7%SMa*TYSa}goLhMC?oho89V;-FILWZU#6Sw_10#o$wBfa0#9r=IQd+V?& z+ihQXA_yX-2-2X^E!`j@Eh(KMATgyor-FcV2}rl(q$l0o%>?Q0?wp>-cdu{lrSDqr zUi;g7pS{mH?;l=XTri*c++&P;^!SZ@nnNot0Y)>M#7Oz7FcDP{hfw)s!keH?y#Id z8JewVj$W0N#}Iwv+zbxYdGKmdZ&JTyO>?@%?x9UR5psbCI|D`?flNbuf9(iUfpI%e z->n+M5h|6_KVR`rD*hyk%ynbYS>^*4LUl7W#zg5hPbd^^Iu?|4*Q-^pXB}2!jqO<2 z>P_*d$YY;mm6eng`E`|+`%W^g8L@rTxtv!-Lo-<<<5-wYby}D)Q3}9f;SGkzGKEHx zYF?yJ7N{ggj>)6V6vQ_RGmX@mr3P!iJVGG{1ABsz>0eFXvI+M-eo`y2WYV&SC!43^ z9K~WZR<>;$@LnhlyY1PEyYn;G6lz~240qjUK`&h z-#w{gVl7ADKU$Ob|r+(EFRW$w=n8}?EvvzwWHh6%yvgP{&d4}l+n|e@|gV#xYN_= zwgfBRXIsfH1HAUchYMGa3ORciV{L+uo)t_Sci{_VMrD2!mP;(-9x@Sl9Q0&u{MP$N zf4<_MRQ%7B#ftKZL89#fgQ`F(Ko*xM%SGGG|7j(EfMvDh=UY%Ko1%N;r~b*z>6K8c zZQ}=osZl7%YOBiEE`;N_P-EXO@SUJpDwe*S?<%7qi6jCO2JTf zg-03iM7rdB)0Pd|j&qRcC_vBIVYOggILhAz4#PW#`C(i*7XS8aV5WZGnx0v-I{OmI z>IdE8jR*!eSw`7A?(qVq7HR#CVua!Y1-C(>PEa$o2QEBuiBV;h{aslq?_B5Ol7>%* z5fuFWt0Z4agZDeh>^8y`|5+V|Lh$D+{z=9EL|KFbvWO$xze)1RRxJ_~_P zk*`LdNI$#H9PTamSm;*PtK^>1faao+0zt=|?5&Zen=p!}>!+pp;SqeO3!@8DsT0CV za}IX;ch-gk?J{S;#BI5odQT+p>&bZ#cjCKDJRzLS3u!dr>v!hb{Cgo?1P3l+^@(>6 z=$(#xdQpeDhi%f&a1-O|&;Bmt%U{=UEB*QV|4p>Gr#I7KU})pQ4M0-FsZX*yW_NY0 zh(}F3opPUr?H*?N8_TtzfiZ&+4ydJO)Ciw!&6bM3q)Dkro5tzxX9H^z!DiIHK3U)T zD*C)8d6UgLr*n}yR-TbQ%vlt{>sh?9Qy75>0PC;O4MMo0TYLMefd;-Kf*SI%o~`lR+~?@53g4!o zM7nOJvC9PYkJlsx&NPVCtrrJWY|w6>XmlnFiD!bOL>%G$*luwE?h4EufZY&7jr|Vt z0%piMa?PQ3*%RMLb!W939d}zxaaW)*+<3QS zH_HS(nS?Wacg`D*sD6G*`jvY7-0@Shh>z1$Y4pEh4#bvZqnqaNFo*ZWfV+xk*hh9J6H7F{b z6afG#*!NBggB#XgkBICmL(WeqChNI@F(L1l05-ZbFuXs8G5fX;$Gb~#%M>_bLZY>_9Cz(*I1*Fg#i3`1I`5$-0XRHvK@H1m7v-S(DVV zm*jK$^6IGItr9K0scH70q5xG}w0bPof1W$bQ>~CGend{@1UULsC+l?=RO+tEzlK z)AUizYa4L}qwk`7cAzen%xuYm-8F1mHN5G+-D`+)3xxHD@IwBWm$$Zv4f8SMg5#N+ zp5W3dQ&E;24J@deYG~mYpBHJ6a{SAcrTAIqy{_~QV2M6`@viI*hC*$;RlHD{ZT--Ps?QHX z`wP|7e;S{HI(H13J_&UEp(tULM2)V^&;iakSU(IrlD40F@((|LtSu-QXk2!A8X_)h;=` zw&b4=^gBgi&RDbg+0D}=>jYEe5W#xg`EM{L+iV)&23*{ZaH0w>XidA`F}MkBnz zKKtn%cy{2iA3bt0`)kr}=5OZPZR^uX4<=JXHHTMy&C*BnXX$vO;0}xwR~>?$ToBdN zsRPzc4a>P7dcNbQ2mPw&N5z~h?95qGDg(4_dC-+w9+Ov*zG6NhA?d<&94|lWB(TxQ zRD0w)PyX2hxFBtzn;KRt*ZLQse`KgqbPZpct@=1HaxgtA%cj*G`Y>8FKoP7_QRTIJ zJ89B+)u&xeYMMPc6_1G-ZRFr}k%mbL@J#?3!{v4d8p}{ zkE>T!R7N+i@nNnwc3ev9*^>#Ku-U50Msrb7nYM$5NZCa6xhX1CHLF6HpLv)nacpx@y06# zLi-)74g`8aQKd#%6@=oP*5nVkqde@eg{3VUa7pszR+%(Y&T7unU7{9O@;VO&!@LbI zjVJuUDG~FiKA2mvNb4#l30JQ)%(3+50-0a$LxKMQ!a#l|Wy^tD)FOn?%q(*==~2lQ zVbog(KY2Qh0&AQ-7t$+x{8H?6LmMaRe4gk-oi;=XUX*OsC~~;8a8}l2&x($|wiqo1 zRYY!*tF@fS4`5z1MGXu~i_HNozzPNP9UZ69P`sK^WuDpg{oddGf zEQ74=D~l6Y#TV-&``q}@as|%yz3r81H;vtSw~t71)L{gQuDJXkB*#oGe!Cms$sea- zoBQQaqraFcmC;u}1mg=L)eoWn!(*CZZS|xNY`Y{+&faw7af~_aBMrp&_lbGOGVi_0 z<3N@-yw#olo4eaAa&lq=xVt6ex#RKv9}Se+1<1MC6U@eJyh`paT{{6V$|}{&1>rVU zR?IiO;ew@2t3Im!$v)E!GaXvnj9%x!`#lat2e2>zk(Q%2mZq1Zoo)|O$(3MzI~4y1 zJeCu8{SE@WizA`$plkzR@g_h|u|N2Grn9G7?NpUXyozC|8P@5v<#Oz^i2hw7n(`Vi z)ATn52c3lUq+wF);(!|gP>o>Jdc5K{F z=g=z?{V~oOEZf5X2iz3sU;}ym-li&gqRFxHarw5u+&n2YUNWl70whxS0}{QsEJ!3RdU1A|N8N~u$3e%AJYqT^;v;M0 z0)C4(J>3Gb#Oq$LJMQ~UhD+-IB}Bma`KJ6%h4?9zXtS2XRw~Qd-H#ZyI|{ZJ(*b?# z@F7yE)VPxB`HRgLA072vuIVnJ*mIx$@=&_-fVnmV_0ZR0%WEYtR{qihk9fI>{)U-> zIA@`Q<8?VZex4TUAzh;o3z|)(Yo|Cvx$mHm*m6YFI_Gbq9ILO`@*VUW|N1N8f`*x9 zucT2+GxF62OY@~O-VrLwqf#%{aY)nv&0Eh$_GqS72P{Yn%U)X=qbwa2a=`)oZKP}f z;y1tH61`Ate*nNy61i~(9+`4?pxUv?67nJpOa)5~#TB)FPEmrg43>lf z8b&5Z*<63O4TBRb4fvbF{T6_~In;V3FR`MCEy4gg@)uc#95~&0XnzN7Fsa(+bp5s! zF0f&16M#3Dev=oqv(Vu4V8dF!@sm1M?i85c#NUD0N$j4vl2maE#NcsDK`Y8Tk=-KR z?;vpM&NSR`o!D(oSMOaSZ4F4?usRBP;dxN@Ykm;FTGW%}s9=VsK1Nbs|2(ZtPv-q2 zQIfW=WMqgIyyPZ^dN}#=lOV9T{!c=iXm%Lg#v2XlWG>J zrLE1C#-x@(0%7{hcK-&heCHZV`eBS23Wp>v|HT@6WmIj45WBuWwGBLtbGhQ=8gofP zW_%CQa(O|@tDJmGt;99Wr4WnzGR5_TQDkjQ@m_tcc!M6jy z@SC>yzA}7Kz&XMJR`yvc7a5Hj>D|cyO*stg%vAsQT-w4Q1mAgg$$;3W`%@EWX%N1Q zAOv(W4m}nMXzBWV_D0$Y{*eA{XAxlSJbGC zP*x*AD>Ma8$Y$0L%CNo1XpMeNwrsa_{mH43>Pb6#0t znW_03g=u&C8@0_V34T#)XP4NE_YSe^z#HS!oo!JuadI<RVsi}9}eKD{C_qXu2Hxk)kAWPPM!zA@P2Y1YH}C<;e~!|*t&$= z&_v{n4SZ@iB;1er%U(iN{*xk#7SD!dw zxREUQ`XX|TCN~8$lCM;(%G{VGHsPBK)JHo`F6@#5?dpy^9>W|7*sw9)BZ&?NDZBQL z#7)mf;5u$8mgCitT}rrx1PptpbEv4@?q--$A5jm^IB=JH2wMoOVGA7C!G9mMImjOQ zNH_&4*?dlpKwcXhZ)X}xHq^VQtkdo4-d3WpG#n^L+}^FR5YkX%}Inv^|G6j9>1xCL^WSX$tepE|O)IV;>#fcdt4;Bc6aAx8Jo~)>@uF0Em+B`1;-W-2|RWiA|_dbca|!G>I>jlM^ej@cqAQt zA=}zTTOm>a^bpdH8_xX>zTidOw;NLJ4=f1;N#|{yp;H4`W;|zVXFO`Ynmgl0ww#4E zR;hd!e#Lzfg4AhLPQ@KYZ)At%n%g*1+)qfKHVQR13T<9c_jzCmC0tyTZQZ=Ht-tSV z-+&8wJFw;vVmd2_wJPW$?(aap=Ec!JZQsv0#K@tze;S+X5xEoAx!ULbtVi&Z;^m44 z>A@&Gj6?+~0h^C6Hw*3doAss7lSaeweLY`f7xDu2 z1E~g;6C+7ol-B0`#&UkVzQUASS_T#JQv!#XZc zq;+xi&qnV-&%NWCIWv;O`-nLLcWXT6R?bM#;+=Rt2x3rg7_z}Fsw^{+rS1Vps{-Kk z0|JVBwInCNYL6jV?dp&ZUpJd4^qy^(_m@1+;b^}flZqrogjXHn{dKeF9qVf|xl${i z>v46l?MCm*q3w*QIuVcxRa8UEIq91Jj>X#fhlShGgfsd(1}$^~#e|f?Wn;x<6a>1$ zeGgmG(Pg#?XQ?Mi$Saq)hU6AzSlp%rDn7KHz7BMhUKS;!L<2n~4XH2N+h!KIGTKC_ zG+44-=vgJxPWL-5IkF|$1X#vfBAl*K*Sosvcog=FTQ>T>w#*L@bmXE-#9|j)`|@JL zF{-$8FO5HuC%~iAsk_d-7tj*K?nej|ue^|Es8{4=zSg&73Z7>}JGF1T9SH zvLTHGTw>lH+YYo3oyXZ7-pBV(_0MApw$x@YVo8gZNXuZ@zN@hdoj5;`0h(3*XS6dUb9_F4bP$*{Yk*W0=rT{-1gt#;Kry#VqT z9Cy2E%Fk1CB;T=tg%e577#i&5YIBow6g^>~3xyYtD=1EuT&|Z3R+(9b@4OA6ayaTY zwqbM$aW$%UvR{IVWtkFNMtv<$G_h3f^QQM%aUsA;2x1emHM^pPG#pSAN9}E$Pc%+M zxRWDAX<*?{Qu02*hWY(NFQZ07kU3r3EMlW*vRX?flMLn1C}*>#!ibgjy^XAhFK0Xy zpVI}p;vodWK(CA>By8;84tx5P;}wIl`K%g^3N;-ptHo+d3`ecPt7nnoV>Fy_K|N%b z`+l}gcF;X+AuQE~2)I7ZdAATV$cK1&rcZ!?RV3z!0lU!}iMH?=I5+;;NC9j9o^gmt zRq5^5VbfioQ2@pf@R8ZUQ?1a~6`Y)GFYPUuXWegl+jPRc9M?c6%a|kfD^O_Rg@$F& z2@D3w403@&9aw$f_vZHOn0*?e)nhT+2;R<}Ru}^7zm?C4Pu9xnU=7n;J!ai=dweEB z_x-K}o*{3XCxZX6w}KC7rWN>~w>m`o)d8~ZMv16X7I(4@d8s*9wv(kqurEjQZa6UjW`CsT?~(q^ zoJXxf9i259T+GlY&OG@fjt!eE><{1xDGK2H4(0lV-%GUzPl1P8|~F6*1{H_VWC6P%c(ojy1Psvic(i|(DVDr(^!j}yME6_ad`q@ZHjXJ;+E*W6>(1DHan19H6QwA z_;YML$wl-J=63$N8RO5t`KRWb{v-54A*isK(3sYftLgTli8>8R%w@N^P%?HRy9usa z7O2fx)O=;#G}%Hak|>%#DGf#Gol&&RhkZN{nXL0npJcM@A zmWScJ&4nUb!Y>v%FG~LBoW0V4WwA|)^V=g8SO`7!dwO&_0eKD9H$vdamM#gecb9@p zpRKzU(zvq=EF9{TDR5`Z9<-S;LQLYf?1+q-uqq635J(?*7Uy6g1*y?-MOEItC9gt|#Fq}7&8S&|a1Qg97siNn)L^lf$LY^sK$8!EFJ9>XJg ztd52g#8r4MzI`pt1@kX9>vg)90%23{WAjy`C9`SgJ0U~qTfC7c6VV)9^%~fYQ{65f zmPkG8#-r0AohR$%2y+&mUfW4CKKbO?KnZ)R@_e^RRYIB;bT?#j6{GW^o7qy_Ln?I2 z*WK3E){>P^iuSkyZ)nb>wTCRYVAstPWp}!Vq#C0(J8To~PmNDjiC}|?lRnI2@hE9P zsfU*mhfViqt<@4^tob@njysmbaz8_F?9tVgqLKmCi*qEzoujvT`8IAn<}x@lkGI=@ zO@&FQ#3c{b;hZi%_AaCL;C5nL=BUjJa#@lZV}q3xDj(03o2AfK>U_L;X`5Z`JW^Xr z;ncxK!QPbbyBnA9xf197RKrjFI!lw9IjNV1C;LfRRcuU*j`zl__!slZBwP2e*CyoL zg=5S#E#Lqrp7&=Hs>)w=K!6Y<#|7e? zQIuL^DBLt;j7nhjQ1EUPl4zKdfkI!>caS{0m2*t%*(9zxN4{e)ep>SeLGPhd{=}OJ zk(y&7G+wTuopiwZ>BIf>;g8?`*!jNPUlOoB_rn>1^2?XBU%@sqN~Sn(j{lQwxlQk4A$xzCDG)oDWR=Y*p*YV!)dYA@*dp^BFbDB`aj-)L>=A4G*@W#-?L zvrCC?>txCC-39a{S;KSLGL7)dTxRcm!m+`0v3DlwmJOQGCNv#tll35 z%YJ_pX20eM-&}#<1s9qyFgnh|yT{J$4XaGH#Np`964n!w=#!n1B-V*(a0Ry` zuFf}Yt(QgyE_k7pABmdeGJ=w@(U;;fIcI$sa%w2w1hoD|neVB<0_dsU(@(Kj+cvp* zfLT=!Xo}O!nJt`df6fs~IC}`aX9G*q?G25v>o~ylpqI&bv)$}qh-Px5v$aW**8?WR zJaf9Q-O68EfG@cSj)oo3z}l`Ei*?RL9js`deR+y!UAKv@_vQu zPF?q-1vD&E`PKo$jnq#2K2+hvC~vf)1@gX78KbPRJP&{?w)ZS^of!RsGBaLbXL~2@8z4?`D_mq9{KdX&E>gYOJ~LjqK@svG{5GDz6JGlplUH#V zA|+<(B~#MGesgq+sn&Myou2lK&R^N(x#a6_@Knpp2AV+&S0A>eNT>x!pcJYJe4Y=P ztsi5^*N8GLwf(zRH?xp^f3-Mgog6gcfFCfd%fdH96NRuYJ3TyOSeOy$;#3GMATyDD)4cT&q-(MMCJmUc?#G%f@CE0rBR)6@zvGF=!ID z!L`%j-TC?_fQ>b5)<3xsK}1UH?pXia1D6kQ5?4681?Sq-wXio!9KU(Sxk001m<{c&VZ~uOnDU6xu5eMhukB84h-=%rP=b@vQODfy$pI>1YRP#+wo(L+ z_D>7~U%_lMTU@F-+Z{ij4!8>CYcLGCK>(wc8vq5L?g?-0yqW7d5*fONPL;p*xIXK5 zbgtRS){Ga zTaZ;*T~^6l6-DJ^7qu2<%X=Ag29*=h7(3D6x^HocpRDkC>Z$2;ekp?&5J;+aHEz0H zsW23~-6uYXo*RGVuBXklP)2Z{BrLx7ar=I)T}1&O>DB1inwBTjq42!Bl46HS}G!0kXl@zyX=LJKKz-|6+%Q+h; zQ*qM+Sx$daQ}+ICkkY1fHk1|0b;o^+F`MPDgUP~+ch;U!`5R3*z>Gf|O)!d~tmV?7ulH&g$6pa6u z5lE13?@T*BQ4@&S<(bVmL7+SdiTM(V2Ri>Xh7IhA+DT28p^w+g~ z5(M18?PU1#kYe>_U?S{I;Sn09S{?~g?i0-Ez{17g?i5l_6{Iy>2%w}E_bx91AsN=l zWp-wBEyvc^%l*^p^M1R!%q|6R(_EoH>M}DUP>tUpzP@>vr#3`dRggBua`W zYQ~+9aQS)0521ky7!y-k=8h#@LSyxr90N9_D>hXJ2^NZyqQ}r`219vz>WO|lL&A)c zlHw>-Zxjoox0O}Z4|S&USER&(Q}$7%h(HM^C#!jr&dTk>?IcKCz0vo2)~NG?GLbD= z*q{48V(Ulesu35KI%WLUQ5=36dLJ^#Dho84e@?!B6K79T#+Wbj-z^7EDXLnOxBWBijgZw)0f7PTviS z_6P$GCQL&XW2~}rwvnW|-2MH`0R4@#a5+1?Xk}T3+YB#4S?5f;BcuniPoa|xs$8Q{ zGW>IOy^$%}nJX)Ou(rNDJ)~0~pBQiv;Y=s*J14^Ik(P&QXTaKQb~$6}y25V{x)>y* z5+@#iK3fw4n+LduZ;P0axASZ1ZLmSh!wpQB(AWiI2trB9f~4QrkeCSBCEf7msX=r4 zBQm!io+e<0f^FXle+Rj8Ay+{4(&B=B(=C#GEq&6O7H(+BxF^jry-2q?@R``9H$l5* zmwy{?0Sj&awW2vVtW@$q=4pu#PK(GP1K?SlEa4#DGpd!;0-ZxozJ`7W={*IOoZfv06h-KI%@6F3!r9?3M3=+iXrPy>Tm!sE(S1AAmKs02M=&6T!75x zPPPaTUW|i{R+MAi2>SbTrbTmG#ExH&`Xe<6*q-1|Iu*cY!laj`)ok{4!+njeEcVT~ zD0Ut{GQuK7}P1XQ~^nJl~ zmLG!Jcc$0Yd0B9?(#)&t1_|7KAkFn75Vj#3RypW-;CcGylbOzZB(J@+|4` z5+ULIn=-o8=Zq<1ywLDocTq35LKJguIQ&RQflgd%Avn>0HY)|Kh%414O$&UV$RuK^ z*G;`#^UmG%(6i3l)`asWlojUVX?`eE@8sibElzG&5pU`D`(lTWwT~nPtr?;+W2URihN!bT!=Y2yBRSzturGS zF#+@bs>1l9gEnoAZ|xC$41cRqyry*4eu>sG#IvKBd(vYCI?Z#o>IPOQ&J#%O)kL|) zz2t@V(teGzhXPe&V@dMf%xri@tX%!7STct|g?sIZ5SuO;zHq;g&-UwTi2g9&$-Zy$ zDV9vXW1#iwC|vjXGjY5 z>~3rL&>xDMIP3P{>z}NP4FYqht;37yodYk6A)EYRUF`wk1#T&eC@6fWz9GudUU5FY zQYOw8xLfKK7lhGC1jXfLqdUeCZa~4rBx#Yad&zEzcQPf!d*hX8N)w4DQ2~pkPYm}K zpYpvWQ$eH%)MvCp9xhNVy5-QXJcMF(b;??4~0(1{HJc8 zn#o^c9*rRcm4}iq#rfcgeyn^|RV|URaAdAop^MwHxjN0JEwNBj7mAMnF9qFoUPfBo zH*s0Jjm$eeb_t{I1O_=>*=1+yDdueLF$@uO*oIfoGiS=f(;Q1dMk@z58z-)lEjghN zBh#xQCq@wp(^ubUlaj{j#92+qTd(LGpQi1@%9PhjFltKdohJpXWeh&Z`8o9rbhEY7 zTU@i6fn7L@W@agU6x{2ioF8vy>2Q4*o4NUu!g;&~;k5+Uy)36rhbNYmjGr6TpW%_3YHlBK7m@F|Ip7PCoX-A%&#T1fF1SjPNcxCo_Itauk2;v``q|Yu$SJE!={iA3j>2EC1uVKo> z&7uoYRvSfZp7`&_T=iU5{v34upE1n)&kwrl>y}3!wOmF)tF7X|nufu)s2Zstql3}m z^}ipC446Z(kNXsK>c&FS^CqYD@XVs&TW@f)?~xh~iK~AH`4|J)VbQsJka?%?AVYP? z0`&py%^lZd1ezA)U~)6}^K#isn6jBBw++R9e!j@Mz#`5y_IJ=W;NAQiAI-UvJ~KRp ziNxNl-Tw}{+uefju7C6$)VPOe4?R~?qc|ozKtrHGPPv>Axcaz|LBE{upm~R^?;x^* z+)y*&Z6T<_D#WD~(p*vmG-U6664@8{4k}JZ;79r0pt%EP^26Lsur9EmEv7a3`0*y> zoXiZEqXp-tPb$>lfL~oC&;Gk#Ir}tnw`Fn^VRDurdCka(bBTQp*k|O1D`JraO%2GP zQ!B+e*>}*y*xb3aJ5U3BP7b{82YB+eV|A`j##PaO__V8&1I<1D%u&bs4>kv8cKp z1fA7iWL<3ZJ7}LDh&@8aC0&3$p_9^JHw*l}T5lY@PBLHvWg9J@5Xpla?))j*wAwVf zT5w7?bRiVeJY3VeKL+9f<^;J)%sQSUVu2LL+v+U^Qq9a!N8+3O$%iRx=F|)1FS8J_ zy}}ZgMm#X-+jF?iT0N~BZ&}uAKI~Alw@~NTYoju#9C=T~0fEskk|MG|a{zSt#p1jI z#9>PWA)u?PoDkic-LEtxK>F_{yyvjTH$8aPkx1?0&s-Y%Md}KG1_s$hh0A*t?Rlr=I`1%$^OI69TWJmJf&U#6p_Lh)q{J!|86F%)>~Mn$MV0 zTf=qaz(W^)BV#8Ml(ML}q7>-XN0G&~!ileX4GkRYn{iuV~HhmvD@RMc@F9e!ylNk3CBAj^? z1EdPp*jmSyE~;b2Tgws7AY1c(=mGMpp(nmT z(&_f5Qta}*=0GR&J;tPf?2?Cu$7p0=G3ouA-OL>EMI1a7@B{+PEJR|@4LbV{vea?# zoFPDl0!qgC>Niahvwzmo(M2tcU(D zGmZaU!s#2kzxQ&4tA|;`oWE|YE!NS;vY;Y`$p^h%h;UqAr|?(xuEjqdU>|&Yn4~9h z-1a*tr}P76X%Au>94(?_IGb61V#GaF!wn8NIN#st9GM@pDvUtTW0pHk8a{BK#Ak3! zK?(Pga3=p+!o_T|NK?wtVX*+-p_Ma;j+osX+s!~LVtsTUGRNc@HMyQdH0&;1LV}@4 z8xsqRCsK`2sE3#IT`Wd8;yR9zyJfob!np*RoFKWIm-0z`vLY2O8sDM4TM+K5y z)vQ11iofH96$09h^FQp<{C6?#7gPGPZ}4B{!Tm>0Ydw6;JVNWU5Tkv7EU~HjdCmdN zOL)goO)Y#vW9nNA(8NX!)E*LZpK@HN^Zgy&ro2469V?@Jp4bcwD^gnILcr&tuSNlQ zkhvX<>XfVUk2t?Y;b|d%C($p8r$e*L43RNcNw9ck92Bo+&k@um)HJ{BV{r z&;^y8K8(NU49)x1&@)oM>nHraepdtN=1}mxwJY{3`_rs%E^`FeDB*}cVG)wA$qZBH zSqkxDu_0rX;k7-|I9D9lXhY3$Tl6L3q(hv_X|n+lgou!*^6mz};o|Ss8X)TI zXGfIRq7*xdXu9I6BPWbxT74|XTb(RB$-6x$A4U;RpucPSM0Ko4opAP+2Q!`r(##)p z2A50ec#KPs=3%$0J&Z5VU*`|vCkUXzerpj;T%=n(<4Tnnp>G*HmBd((TOA#5oNsod zVg7JvTC2ClD7ame0@&?gOLuDwzfCkp@BGQ40G92dY#sUp^;hWQo~g*jkoikTTu&_T zt{qX7TiBv|zXS6k&GK(pw%b>~fdETRH>k$O#_54ZDAH^EA3Yx;m(pAg_P?u|s?mOO zm&W!jnrcIy-K=;4P5ek@_2mq3`|o0JJ0l-va{*ljUB*u2!Rb>_L>>Qb$SoUE^zR^{ zw+3g5p6{H=htJ(wl-B#WcE(!tw|Jg;A7Xd&E}snKM1pU(hyzUstqEVQaC1rw*_yrLCyaA93f zIx>aO&|>d#Ub@M#DG@opV~7}gw_Z;?EGQg$llZo{hm}KzkCiiPZ24WOCk-t7PgV0@ zsY3n-JR93eabF_F&iT?eX=ME}-qwn9?J#!W3hWgDY5b7eFiFlb$UFM($@q2*La%K8 zNtM#(AX^KhkpzLP>wt$8n(mw~G2YXWHJh#@i71CZGmK+BOMmzDTKt`wJZvG6q!kG} z(~z{J1ChI68~MURS-gdH9!zn?+7^eyua-j&Gi^BB$W4qyS})CBL!f4DJtK3#@0oJm zlGFZN3ZJhWEY#W2fZ?N)`rY`8vQ^A~J1t1~%k~`*^WR|J-5)^VQ9IP0ei6~~78uziuD0P5RNxInV@jI@tjwq)2sjonIB%;7`5uI_0H{ zBt1~+YmtR;((1Gir+F15sUxFjkUc4gkH~`$aEu#sVCPN6k!d|qd{>1}kke%&k%oeE z>h|zulK-xAfuF?CJ5DSvCeC&DY?WxL#6C*$f?k)mv^ktes2>)#L~yjENl~Q>Osv>9 zckQaR{Y7ebuKk|z0-1HZpB1P^^*|t}?p;-{(-XO=@1VTW4=+o*5ZmS$6a{RW5X%G4 zx;CGW!W5-e7cQfqxaCk)ANesL@5c5W#Na~#unaZ;Smt_4)GlFBL)ajWty@1Zc-aB7Y0jdTH5qBx&6K!9Ptn~#kzfPtIr zg1Gm5_vd?pbuN;a##Tq0*0%2zLe^NqDkvILpR74cv`vv$)YO{py_P1zx9$4q^h9m>{ski`R(8*E7!$Z|+0 zbmH1D^~l)6Y7qk{1V$5_doAOLGGGNkW|dBIpzbc29;r+xpLXp9-~wu7(tA=if^V`-t9N4;nYp;Io*XjPs8gW@9aCmgW(&8!=llV1!WceU zCUAqM6tBBqYtY+Q50tH$%LNM6LrY}$hH8%b97YvU9sLV507p)H3SE|FF+yVNhaigfV87lM_^jMfX7k9VG-ME`+ z5b$OYGEQ?5nRIzl!3g&OUwz6(+OI>;+ z(n;D1Sb2DrqlN-Tbsea@I7Zt>dqW=JkNY@5Cg;!~k0Ye|zk;6M$3^|3p&(L!OE zmf}~fr=;o?;&|3C!o`U@i0M>b5Z*<5hGy{W-h_shnd7~Q31AWrd}}*|t{q=o^EaTG zu&|VfXUGw9AZGl;^sQ=>Pt3;E!&Yxm=MxNRRnCb0-0fDtA}h_wwR4 zPyxs*`$S|Cc{4HkP$UXu@rovn(OJ(dLuyep+UfPJP-O*~6%m-Ac7aXi2AgR)fKI^b zS~0*zo_C8}Wd5CS6qPV?QDrQPyQqUh;dIWl@N7$$7@`;BQ^z9uSqf^`QjPXEVjL_D zUGR~2S(o{(Df;gqbH`@LoIogy=L)BFa*cnroO284pJL(XRtaApDVm1NKW5Ai;SAsk z;ZzYPR8@M9mV&nl>KUr#OF#GImSa`$ooHZ}LNyAzEKMk-$l=QQ)nfel^UoIa{{eca z@TdVQckhSFZCt`3%_ag$SWP+EDL548nFhx5thaS%*ZF-;qt7f(Rf@eV;D-cgj+C#T z8;Fs9oqbk_y00I?m5jXEeD*TNUjA7n%+|dsW-z2<@Py)v2v{1^j^&%7BzaKEn8~pH zH(^Aw>D=D)tV=~SoV4pVgRu42#2&tR4vs@aG%x@umKgKi9ZN~ur*3PTs14V&K-uy6 zfInm(8E9Y04b0NibXWNf5-+Xr=4Vle!Yxop+-A(n&s54Xlxp5w69MuzlaRsEMg40K zP)5xL${RZ(>zW6w2oU5{bx`Tmj>b(KuRiYFx-d(zwvbhm-jnKJbhgMhs_C@VMX$x3 z8$KZ(@^`=8Z}a;UuqG^i^WnB3Ai`I#y?X_>eFe#55**lSVz zmP~)+un=S~seVb?IWE}$YVW(FqFlB;TTxKrA%hZ>AV`yxoJ2hm8U!SVCP|dgw18x= zK?DJTCL@_6Ig64(8U@L@iB0Y%=PaRV=W}Mw%+Y(-+;`V`GxOfN{=r&QFZg;_)vo%g z_WteP??)4gGT$QYo5!;@z2RAmL%1(Zi2(ki>q9|a1%ZPVK zg7Mn8E#@f)qt@7B14EUG#|rQ4xWcY~)e*egDa^ea#k(mx9?P7suAvO=+97%hY2{c^CrhHVC zulI1Xu8qT`6*BSSKWv$d{y5pCFTzZRUwu&eM|1k$JKQ~mhX7j0uCDpZbsO=8?~5jl z4c8;(y6G5XYM411=Ue+zVE9Y~Mgr8i(HLgg^{|^LIJ|W%P{* zuHSvENb6om)rGJ%wjf*UvIj)p5EhfHP@5B(g!pXk?$QVGNqh25r`1DlMHyt14n{!{ zX@g>)n{9bRdN=~&g(8|4I*yf3EB6y3=6bF+@4UA&ddBdDfDfyU(QtQ7uspa?reFsn zBMzJ9ZC};|6%=-m{(C97{(8>8SwrzZ`j|_RA&>b22~8QNrgeNvPV)1GRHx?=8{W%B z>3J+#}``{C)Axgy^z$v+zPRhmmX z&m?B(xHPm?`3&tSsOx#FeTF}V7vu!Gb!C_66GK6oU9u2P4zO43=1VQt>U=3FDV|7>5tSCJ zgWj+#PuY_@e)#0z#6pvjmqoJ9Ma^Jjbllsr3xQGh4#iY@fnDZgCcMGykLFSZYlf6V z_=hgmWRaUO;s|UzXQou&y>M5kzBEHl+02tYhu5~`(pK1Y1qU!>Nu4gp!cgD76e!%2 zT#9s9Ze$;+=wm+0gGTeS2^c`%a6(^A=5p~$Jl|?`ZFSyK3@C`m^}UCxIeMJwK%+!v ze^QCD8(=V2S`sZz8KWG#KgP6=t$X=?hdVEXpIJ-8is?w2F2s}vV={# z;;zlV<09zV9A53phGQjD1HqKTTsSFK=P$Nh+iPDW*eMT$ zF8AcGJbd4cR(SRx7}603Nm9R~9X&+60sYFxvcrSFae@e)O;nL!AAhCM5Y!!@*dS0&%J4Etg;=Js9~Te>A}U`(%e1g@ z?j*e44#OD^alBF=Eiu%(SIJFD9v0wFuEQek+{c;oWXcoTg3*7yXcf|t zlN&e$Yb_{yf}(Da&z-h+o;Fx>o(%Sf7F$eV?;hX3V?pV>NBeN}z|TW%utCYowU9e! zDl@@PdT>PajqIgb{Wv;)iQk%1{pEayr{H%-D&?@Exdex7E>EYo0 zkU>@c%y&fI??~yqtzt(bo%O%eO&Uo=INGGs79BZ>@R@jVI_|u|D0djo-XaT(lq+(9 zQ_L86VsI_VG846p)doT(f+ks%+MCLyg=5#8UkEGZ=+h{kVM_vZ8u-s2NTTkf@?%sY;Dk2PS_n?z6RXvk}nL|2nYb`FPKXiEB2X z?=E-<+~>wl;xPK0VcRM3H}r!O`r(|G_0iiGW82kgv=^2Zd%akEMl-`VuKJU(_`5

zK zX$S1!+M3_>;AxE;_gG@iEA}k72Y05PckzyVTbx0^{9@n8uhzHOS1^9J!%0JnF zvohb7=k1t=I>tZ*Hpe&;J7RF1uH^94$Js>ov#BiUR3yoZkAWksso0K5dUZbP&@Buj zmk+gA6khJ0Kekt&+*qh#aqZ)NW5RkyPhT>`SXFLf)Ex`$4!}iI>c;I}61%urWSiGP zu1^y!Iv?`(fgP7Gc?|tMF3KgAN4FQ~kyK%#4kdpP_j4@J;iv> zJGXidY2f6k6ddTglViBFOvr={ndmblcgLvX;5I~&-kLVxlnOn8>etU?ihNcEj})sY zX;y3_<~-k=_3Jh{`8fAE_7nK~v5^N$ji>TsnpQQ?kn|f0PIMdQmoID!OUb&u?G}V- z8dufhroc)AVygRlrQGOb$;B#aX=&LvyI+Yxg{jCCru`0Vo##AFOAN1h@gM%yd|n46 zQGO8&WT}1sV84R@+OPhUK>1hPlm1VA{_}ptuaYIK)j}S`)N#02yY}a!f5Fbx-z_^F zeZknP2++~*5B=E8s_g1kO($DV%rKSpqzI4Jmu7|IXd#aRZim*tgxC1-4PSKXcrll8mM?AQXsD`P>eMqxghRtVohN35zJE(H$ zMa65G3HnPX6%J%xpA;o>2T-n*_2uEQeZON?5*e$-OTQkT?|cj{Y`H;WP4rty^x0fs zZM`e(`l5`&#HPBdx z`}-R+Qh9sIq)_+%%gkRq^q#Y!9mP_nOfk$_C#P_Xl?07&?32&Q*WM9~XFUkNX9FqYR3x8~ zpR%~bF2T%tOQ-U(;FheVC3c3iG9yHQqry*mb$aAbHrL?Jf`$81?!9X`wg&HubY}Sm zSTo3o#ZF`z_8A(-JK{1vIrcl>)=QC4AdIBvh)jvzsArb+Mg<-u9UwPe`^k-$HYiym zxC^_Ogzuf-;BR?%aUwQ|r&?@PN3)wjHqT`HRmcGq?GaL|{5^F-JsR%#a4=Ifp-PWD zBADC7z!E6rTM@I9)hs;H)`~&jttuZjqF26Fn{YE}OqeG+u8A2O<0Tsq>BGmnNX8~# z>?zaY9d0V3ESjW$YQcI(Ra4%wNZ zKru;E-h~GW2cm&I$=V4(@2r9?9jPe`k>BM-ogo0q*cD@4%*asH?SdLj1^PdJCGqI~ z%O&pq>YS3Wml`;1AGHeDDx+aSZ>16ACOi$o^l%Y(rS|dcQv{%GrUPi3JwqIQaTnF8 z;VHTaDT$QXAbeLj)Bdh<1{5XW==@NCz#1PQuqG{!x2V5z(?{}^>_7Y~pyZa)i^XFC z4+G-Oxk=wZfmz=`t-L>dr{+4AWmj^m8W5=pQWN%HE`8TmtJ*x`V-9ci*H zffA^kW-4tBlS}TXd6*vCKM7kI1PuX9+j_ zr2%I9|1!y~|LDoL|5Wysc;y?~4HPGiCLHFsk)k{EFYjMS{$L82EQj8PmN zTrjyu@|DJ2OeB%ZxSmm+ZP1=|DzcA2gP;QF)jWW6a}&TX50s4qU{#=6eF`|&p2q>S z@jC#5B0_`!RA7IdLwrX5>Gy`M4c>2{n*dVIQuP?9*&UOg;{mV=XC%-1@MK8*69AKt z(F`!?vI2*7LHV6Bc0xN^e))R^`Ey1*?~jc^IP0Uc`S&bC`V8 zC}r|DOlLHpp(5`&uQ)l0G_Cbt@wUUr#&pmaj#WhCye~z?x3{;qG3TAVMa7woQymYi zXKcp;nmmDZwuFmq1fZl^1AteYP_ASgF|r?x0%v+FyNvzsh1e)E0v+Pd4OB%IK8w(r zxJMv)_v60-*Qd+!uK*Gy8Q?BsjKG=7pGD^)`lZnXywH&jN7sWV^HET{1rCe%!V$tR z-7Xw*AUC-2ZwYOQdu26;@Sh#%N3U>bg{?n>0$kG$s+Zq726}n?i%X2F&iU=tE^H7Ul0$+jkG>|F-w4Jh8)Ser^BE+-Ux3o;1&L*caIU%}c8G z*#;YHIxH>RXNkD&Qefuq&nMc?-}t*;EsC%cI951Af+y10dVX3~ zS`!X+avPr~FsoG}-NzFw`j@)Lm?c3=nPNDzKX*t?lN9rJ^%)on1zqnMzy^v#P>SY_ED^QCuF%5p5;oCKbq+vevrpY zuLH`0bILAp7GUe77x1Ej{9*z&y*}W0k=5IiN299DM7 z&EE4&%LLvQRb20^-qEbXHuWCWA3BZck5+l>MrfGAklEzxP)ub;j4> zN`3Ts3T_k?9o->-Kk}WZLA$$X8QTlFE?hcO9Kc^&`Utbv>Tt!PC{4NJmxJk3JJmj9 z2zftt^>>av_9!`vV+3@@Ui2^OE&_b*UFiA`_&c}_3a3Pg%PPgcqn zEEcrhX7KrVJLD}r#~7sH;ns9jL70J!O_N7Va0ZBF-~DA0qR|W3+;>9<;0#6w1{=vM z)1&Q=$g^R3OsCig z^2@@D8xr(|=y#hQCrIvQ@WHNC-#bD(hb`)jOyNhDxuvQZdvZ>pS!e2R3s$|urA?x1 z;zWEqL$91Ixl8J@%@21j;Z3Z)+;TB}mek1(;P%aiD6c3%7m}qDeL2w_uB2f-t?;Xn z3lu7?4&GU1K~u3?J+hAQWFvu}cq2V#OD@UE3bx z*Z3B_4$SC|p6^nKpIxik!!ITivGufr*0#QZMtTee5_EU+LTr3|UWVb;2|7qJzQ&>J z@08n&mSc>Q_mPB2m-}iuXj`GPYJ3+18%M0phseX$9_}iKXfhjLC*~!t$h@%TMD>^_nIoby&PZa&>(v)_+ziIjw;ei0tt@}9; zbv_Mzdd5iF<#oKOM}7xy#XzoG@II#7$l8pb-1_5AY}5W)k4EM+Kyg^eFMbm?KchuuGR6a+0+dqc7VdB~msw2Qc~qEfYbMI%k5!330MV)DMtP;Rok zID;Bs05jXLNVryvssp3Cs}rUASCl1fv-HLYa@OH0a=pN|JOq^{-IpQSwr)o=xM2zH%&TIU!8khWJZW%;U0|s(0e{t>{h}?hNd=g*8!R_FN_Ae*r0L0LVvJN9y&LCcwUv^Ey^W(Jt$1avi1bjU zg~Gbi#wXRzSeba>L=()k!2#LN*VDDUX&vjrNLX$Kt;|f6Kyue;Fh}Y$TEo7T;m`*J zi>}&ieiCE2S-mulrcp6Er_yV)AjgVPuDyz8c4;e41_U)|CG= zvd+HFct9s1F&^zR(RG3kZDdEZN9UaZDCvO+6EW9DlFHim+wp0YQw>oMgLY%c*^Hyx z`bQNEM@Nb?QpB2XA9wO&cn3*h5G-A=QI>ONxxhOc++y6IQQw z)jYV^OYVf@JNmRNqP=Q_vpe|`oLY5T{p$<0@KR7`sfO9-+S8-btIg@Op^lpvRfiO% zq>J^jObX33w^}Le9LV_7Z*Q-!z@Kh63CfN$F!H7JU+f@?wD;nCiBlseJK{=K_P$px8)Ce_VS6^oV0GR<&jrZ9s}nr<_8A6A!} zyRFitJNQbP)P<~3$a5-<<}KDd&qx^0{Q%{Jve&FBbi;6n-N=wM{!hT zVeK|6)M&ne9^MT__|frmISL;~>ic_gEAybMWu2y0BP{9PYmioMbf+6XQn)eO7Ms>W z31a2}z;LuM+kA!Dj|>cGxa@|6HY69>Artn>MJ2N2>noM19a6KdJJF>8;Xl}*ePJWX zy?uH1{N015mC^LL1$CBAumkGoe2{%=DcnbiKs8lRLY(C+m?xg*VLm`jmHa-7nZr4i zx+cHb$D&rAVh#Ny{Q`Q^Ov8@f(%mp`?=HAbkC_bl#ziJrSqyVlbLnt1%h!vGU3xvN zsD~bG-?um6B&Y}l$**C7XadAKAM4D;J1b0nuKA|+QDA$4DDs%Mkq+2;Un9@B6S6Mj zJ(Dc~NPf1D0-dJWecv~~c-(&~0L%Tby7`CdOu9Qy!9e_{ejj!0NOeqkQ&>si{YD`9}dyM0;Ddy`TTu^ zR-k+*y?W+|9f*W6>IpbvD`plN04&Cyt(Ua76Ayck!P|mP4R@87+I9C7=yc-#9>hn# z?nBs??VG*QNnB?dZf&h1oFHa)dS#U0J%gC_{RYCDJkirjE9~+rv9F@}9K!$bo}oe7 z#HfcXm3>XE;rO~OH(9B5k=T>BVS#twhVg=a{>d4XpMQI#O=uMSE?BMd<0>mn<=MeH z0}E~{_t!%D*IGdTbHn@#3+Zp_9`z+~Bf8tx#{!k_Qq-TiHax?Nm~hiSP2`HJr`BWH ze!3Yc=>DO6$8QxHb3IYf%SzSbc>d{>Qqe{)l~Gydv{44NQM{)`Ma@84J5{HF%4tobQ#S`ObnUC*3^DYYAM$p;)^OX)he2eCp(E%ioC;dfoJvymCem_ zXErhrR#?eFupaa&QyaWi6=qBw*YFgZt zD|VRF&jjq2dNTiH)v!Sv)n>@>b4Rx}S@Ofu_z#CfhYy{0QEnM|tO(NduvXVa59#X~ z(FRywIo62D5q*k$(~g4HdMAN!=K^v?7RbvBrCwd^>Ibgs=;wU}#!q1ODD$vcGsQ2& zQzbF9t1RqI`nn3nMdDN8xL^qQBZ|FyZ%XlG%8i=;LDS*Mx`jtn>^=G5%97`?0>4H#;;`8Ur+Y! zm4%t@9M8c|2r0nfYragOO!_G28;Cuf0XT83p!U;MI#Js>%Rn;RS7>K$6(y1a$QL|0?!5@pr%YxmfWtY)jMU(8S3BXF@)#W}L&K-CSv5F* z7cU~B?XvT67K9sqYF@p31Nyb~f2@rFu^>lU;{;UtFqcdg9~UgM3LTMmVV6XunzA-` z5<;Wk!qmyq;x0Ze?zfT0XVyk&VXY{nI*MD%OtajO({95WdPr4jav=kF;iG znNHcwci0NFrIxD%Je>{Vtpy2bAdCjkxf6B-a34N#+WN|{7Nm5FJ@FV-vs{AzJ!i__ zyp9HZ)BMv_5{e3c=kWP}$UM%G921MCoMz@jzYI3}_VDnem1dCF#+vbPZcsiDFC&BJ zL2O=ZP9r2Ix;s0BX%aEYT;_q(+4TcXPW&_`@((J|Ho@4N_<(n!r&ncEc|hK_vL{;4 zR;G#$cCVB`<0aS<$IoUs@K=h_M?+&=gMP86vV98>hR^KXWa)P$+xEoz^+jcm@S10} zeop-SG0p3nUuhM^Yopknwpdo1VoN92t|*)pk34nI<8XNSBo@|5TN_Lal5?@QC9CXw zou}2YCkM~j$9Ecy)M-{^z-BAg_#zYH}a5YlscLZ|xtzcbnb{NL*cGQ*e#8tNWOXENh0tnCogj#$YFWz~G`N zpZ#k@qv6=JJd%NfDImh9O=fH7{qE92ZLLN_VN(HjJ%R8bte7lz2aHq0O5?Vx$;C?Pm?S;jzo?x zH>+^vBq7nu`Z2=R%|n((iSCwht6``%I=L@HsUh}mzi!2HsWp0-!RKZt^|jqY7TWlB z+3>*#$%*NKcgatTIgJ#S5XKi8)5h!{BR|^5^TFg0bPtMj)dZpVr1D$`8nv>usJ^GIgQM-c z@i>f5jbpeu4WxZV6Mp;FfJ^>C7JkH>&d*3TS#gI&KA=iwIsKt0&^jmFm;*7W-%SPn%B%@ z1%6`px~kjH#KgWz z5860zQ{g^QjUwMm+|@VaLYkwj3dW_Xr);{zTc%6Rhu$wtly5|0&G_HE&!mGjOI~me zKPW8~179hahyooelFWi{w%$_do=#o3aU9>cGo-0tWB==g{_8;UPa=B$=7nC$h*d-u z+atzYSFSkBRHmUj_#vXsQ6;pQQtm`Wq`mK5^0?|n9A)F|TQ#gk7hUMS=fOSO2rt)A zcuL@!i;F}2(2Sq+(?acLs2$)X_Cghe4wb?Y%jy6{defR2K}zYT<}et5TlgU;@K{aN zvuGUq2`#|?zAZfp@Zkfw)gRjjD&Ihz>}Jgf$+&-DLfm%?UX5^!8@&iem-;K z#l`CjNsGy<`RrFSqe(8!ryIDJY>(N$D`HlWW*O_YzT6*Y39j diff --git a/scripts/proto/examples/viz/rnn_3.pdf b/scripts/proto/examples/viz/rnn_3.pdf deleted file mode 100644 index 55ea77a836c384f9ed1eff0e483919a9efb5b3db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54714 zcmZs>1F$GDx2C;q>ulS$ZQHhO+qQYOZQHhO+unQ5ckkS(nZN#2SJHWVrB_x`)t!Di zB=W+dGz_%NP$bRQHJ?yS`1JU8hL%v=-1v0TCbnkI=J?G2BtxQ;SK3KN%!LO^NZ|2W;@9)<*KWt*z8IH){ zo)`Nthgb#UiGH}Bn+G1b(*43-Wn~w=H*O4r`)dSu(FK6v_hF?*ElPnGGnz-CU ziGyZ#oPm@)Me%lj%B0atf(yLc)K!n~tlG(L9lE;QCiNrDuAb5-#r{WkZhA;fgy-9$ zT37GVCRO~x583fm?-*B_keJ~PtB(n}jT~5Asn5x)4aqoN$V9Kp!gwh{pZH1tIIrux zmKrBBZ^tC~1;%&dB?eaWJogwR$MUD}WS_TdWo~bsr6Yl~9+)fRx?P9+UuN-7mfmylc(C z6Gvu!B2L$a}q^?lJrNWTu?m*IZ!@3w&DT0eTeaftBHa%yH*() zLp8wha41^*&~X*KnXqIBE66gh`BMC~O!<=%o>FIyJvz7@2VBnItof56O!<|^lZ0wg ze}HP*KxwDO%Q-cG)1VPJAT6~Fz?>@DC4_wJXR!UDl=MnclLBM>GiLGA2FZ>Ma}AR! zd^))$xCLp$WG#`KYbi-H2v=Lq;T%(tW&vo~|JsFXVc_{WtU=w@J!kn>Q<~+gMx=>U zBhdzH8U$(9nUL#H%~9gw2UcPcB z4FEGD*0fyPCcDK{5#3r=ERV*(=gqBJ@N$P{-$i_8Cw0Q9BXm(gW&_Z1+(_hXG-8-m zRZLD1f?4CZrR=Wh|G6alyzd3}vFT&CUS8t4Ro@?oT64LdVC|MQAxs#na1qO&F7y@o zERLIDEuY2_>(1c$&~0IV3Y*}n7=|{2zE)mdxT`rmT@7J@=4&Xi%k=O3Dry)PlguZo ztRT!EFao)?B4rE#8EJ{S%v;-y?CthiVqyb3!ym4?a)}LLRM|ty zPsvv^U=3Xu0ih>eQ!%HhiQECIL57)NgexCMVUUqr$VHNoV;4H`+BB^g=221^+lBVN-SJ6YULe2eDx8ownDcR6Q9? z5j@lrrKhME#}_;JMI#jr_yMZM6V#2K8F01M_{1)+6zq$zJH-3uS#3g}f3j1AxoLA} z)XdEbAk7&db!SI8s>x>)zu!Y2g2tz_)G+kA6f_9U#qmz(;$mFekHp+73D(;H1u7fV z?!q0^dFv21dJt@@?6c+eUVE}6S`S~PfMFirWnDrTuc47Qo2ICFl-~iw*mO zh0zb&=$l?-dgqWyXAF!(i7&8vF^?^qA!;7$dlnVt*?>kCTsKGOccfM;Em^*GW>*#Z zSy@gMIy#ZB%P{}#6=5X76*<^*F8^NKS;=4Y8 z!5#>}VZQoRsfr%(OV3l!CKrCH+4^*a!7Jl^~PzRX?ggh5eoDP09c{r_vKM z@enakPo0;ph()245h3D@0$s<`=Kxcyh5$%_u&CGoU^xXg^C^DICgI9qC7bJzagD2` znOCJ@oFgIQKL1XFlL5g7p-V^8z~b+h0}?k6_@kkU!F0e96wq^3Kz>q!X4*_5JA*e` z$gbePaCP-O%%NmPna6$dj2n7N=|7pz<055^K%M#{y}Zm2-PL%Hs~y72lxOLnI!raND+(av>>yVssEWLLOZ^iv^qhq@2al-F6&`E#dUF zNVM?+d?Vz@a#1{e0kUPEr+*!UObBlSK5waA=p)ozI-;_Qg?xG;>=5VOp-gO;Yk z_v)>{I_YzMN{M_K?|VXwMDMJ2f_BF0IJ*$0y&KwUr2%=gq7;QAv7=bX> zMV~V(xSplzPm{JE|Cvvetpuc$897KKjsok=ZGTZXn5}xKN8>et-JemzYs8O>H-I=u zejq#?m9$|Uo!eQI9@LqmL3fPIKE_ zamv!XD@xYqPxng+#xv$1ZTXEP+2!4Jcc}?AWscK1VUE;y7Qx_=E&A}lC(!vzotoLo zpElr-gEfv>4&!Ju>pVUBDQ`MZm3FSON1U_r`6izpwYd<1-i*>C6_rNtvwIQIx>DWtk zK^sA!qh%}}|8A9>-?(0S%BqIG-KAP!xPKQrHpvZie7aSA?$^(ZHrB~LP#NTizwPL| z^@6)PtDRfb^u;#DRX%KIAMS(OlO5~NMsN;XO9#PKGt>2CS#|iMlp5t5p#*%^ntjRy z+&FZ02ig8x8Xi*$?_Kl9&d0C~wY|kU@NtefoZg=!s9jPKXv^$k?H$1NG01rIvvvo; zjuzTKVVYvky3Sjf?j0k~PdR2aOF%e{g5x(DpS3%Whv;71UN6|59?2a)rJgHs=vab& z5hFm~;=dFIsXw3kt4Gv=7;c$=`Up^8`K#}z_P2pGAVs!kZ4=OJIj0}*#VxQZg*{`t zCI65lg_-g#wz!-Y=3`CQ<)5Ut=d@IRI&@|Q48qY^yad8#)&0qlmPz~FBre$N2{>40 zoJl{Flu~Y7RD{lKqep?)t25=7nIlM6B?V7arp+b}t6*6gcgFGGMpai;uu2ZkT#f@? zb-NbG^o}G`3_9(1IF8!BNW?2rVmmBn_ta^k%yj4nmo}YSP8qF^MoBcdnrRAm9RW`| zV9v=LDqD6uXtOO`U&=qF)PN7f91aB7^QKJ6z}B6n6M^ZC%MRLhF}??i5@^z?CU+`hs(`q^ zfN7@Kgx@ljO$Fa!e~-Yvo>>!h4hVK`{3@FNnB@v$Ko33|kawY#z{t~!sfX0nohRIg z(k`P!1iSj19^5?!vnoi1Owl>!zVACPV%SBxlSE5kmXkC{O{b9fGjlKih@pU_wh_b! zAvIw_x%ILsVj@08Q1v6NAVG4*c}NWcZ&cCZ-ytqZDT^H3A-$$B6PLA}ZBLCTo?#?U z0~pwg-Tq``s(B;vPF>Rz&$qDTJVSr4%L9Ms>JmB4iv4>QOvp##?QUC77k`*=*3&T6 zjXrg^9PH;WIYWB=;a~nOyn4DisX%kqz+H(b${f~iL0g-svpU`$C3LJ^(9w)ry2b31 zk@ptNw3gBBC1cA7{lVqrNX6+1wFR)8*6k_K$2OITdiq!QSE#Rgw)K9b}#Xp#(M6{)~EemV9;es)-wZfUtA}Rx$8g0jLhAvz!o*0DIZBh{%##A4r9T=_cbCP>hh485&UY0Nb@L*v7>r z2LB}NYAr<5*2h~;z)J*CgW~`(JXJo;`peNYzgBo1$X0AqmmLmXp4bLZnLg%P74*8D zN~NoSrf41neWRr^t?fql)G+I-C(<9Q>e}()3WEIGiQ((}=hYn#w2x`58u$xIhwIq?Ff^2ZpW3EF)kiz zIkNLZQ#K*h(XwjMF<3^mqq>hT`_(7)@7RmA?WEf!DG+uLOZMH+~ z(9v9rct>bHU=K@Q+l_!gde$fRq>#Yj?1$iUfz1qz?P%B{X|Dn6*o9drEg;`G$g|1> zoLnCGMv>M%#vmLjC`TnNYElLsCGW*6(ckYXKKNhYY<5FiC=*-b|IOw8o&AUC{RiXy zr~C)e+!KG%&biRkMb|3 z{|BsQx0xtwUoOx@gStT=GrLEp&QyP0oo|4SroouF=RulfCuY2CA4t~2j9UccVk zMo$+N8Z1(tyjIc6MZph}O@X5Pz(xxBF+?;(IKY4SSGG3##sv_~X`qjiOH`OyU4Xpb z*7YSMQl@dLqkmfg)D=~Xyuh9EVOM*FLat+;arm(H{$-5G)7b!lvLyfj$o>f#Q3)YY zA;^70eSJT-{P{e8`i5}L0i-Blnp_&$*jSv7fW974`3wG*iAu=GJdO^IjgLdp+kdhF zy;)xA+gbwSf6@iQ*EZIp&aG^$jU-^+k>bUV>mlR>LFUBN6Mch|m50Y9(9eC;+G6V){7U{<+S*yaF?3;l|28;DVdY1~ zjQ)nEr*EQvs{rU98B5g~8rhy2Q{O-9-2-Ul)AR}NPV}Si-3JW(1jxkT09L_B!(>lI z0~m4n*H;!`eCzR$cdldp!v0*Zv@$$AyoC);ZVkO10$uLs-&(`iH@PvsJiN*6_0p(| ziuxzC7ZCOJtu1YVy}*BlZLS^ZwM6$6cDGaUCk5OXl>>zK?)-W${>TR+&W%i5%e7VY zG&A_?s_$;$zP0jVm}C6-3kve9`GeCCVS)RnMg$@DjomR|k|9I)UG|0p{BA<0>gw)( z`E7i{OG08H>?}v#~vX`IY?a9axV3by5A*b4H5&z*~gaNZ0)KItK7`0#|TSGa~YE5)*3tO}Op_ zK*b_5@}hFWGV;g|de?hJ1fU@T9{l~I%*w>p)WqOw|0e&kn_eF?@0V8mr=9@>Gh;Iy zC|!LW;N#`x!?q3vFVNV~1k{}#h!-xg{&Lq0kYCOW*!fEwaE+706Oea?_DT;(-xzd` zYwg1Cs{`+*mq*$fz>|w?Zk1(B&P}*Pi#gCj9DkGiiR}5rPj1DjC6mJGT*6;c&4+RaNn_%w3NbC2-yXp36 zwFMg{;KeT;oiF|A_pT234ECpCNc7`IOHCZ}GSMYrgOi z?+n=K-N+~YOXEr`fAin(5sC4+@tvPT{U0B@x4HFR8{|iL9xSX*OsoN3Q0>5?Y`lY= zRN_%uhhFi3_|~yqU%1aF)rdu*24P#^sEsa$_IDdO_)Y=dOSb6AkQ1J9ja#A>f7;A1 z?in&lFR_~YTS>F{KL}N(PI}bn9j1W7JIZ;Rq<#W^F=W|>aPX`g1Ol{Faa!bWK#C*4 zp0a1+Ohioj@3~=&5nI?jG25=Hj#kabs%L$~j6-(QqSQu^g?>5`<<+-|{z={WTQyuq z;Wq5?b(#*tmqUjlI5?0Ut(RD6hhg%`72zL+r9Mu-ZG=H(mC4gJVkm-d2Sw0=b653( z7UV|wK9HYDYiRKzSEz3z4P0qFcrI&z5LTAT1(d-n%^ye&B1#&>cu8ThU3LpJNKQQ; zH0Pp!vO&J@)QXl{B|cWg1+m9GG-2cHKh2PdPcK?B}VT!-9tA!q*eqoqcCM zd03FbsfT|YKYje*PjqM^EoUJGq+;fo2Sbs+uAafR_jm1-yYLtC##=Osqa96C1?c#I zvK@Sopk&RuJKzT?ECs1qmQY9l!e3H2WzdVv+yp&DMwd#G!*(Nl{*wFlRZfj^xg9VR zk(|Cizqi{X8$f%e)=B!H6R}%$R9+V=8r^vlP^p65XN|4c>3N2}|74gK>OrF1FR(ZO z68mq)Lt?Rf_tybJi68oQD1_K|Ydf{I0$rD~8@~^Z$9n9pc^Tdckr!PH#u=ISLBpeu z(O!qr`_LN86=Zxi`Y+3H1db(`Z3_}$LA$^%<;FTN@o4mc;50Z0RjUBUEH$Mx=LIiT zLKEM{xQYadpI`{P4f5E}aIPd6%EWLFR1MwTC|#TFoh{Tj*(>>3FWG!E#xcE>$+~g4 zxDMa%M-_@0<7y-$$~y~L!0*`Z&CJcL*xz}UDX;(Tgkbya66EtZ(Za8m zYFz?$$l0ERY5jO5HS*V)_Od*=s!PLqN*;{!@;_QPeN2esWs`I^eCRhgTra}MZ4V4H zL6ceDaCsvr>ky~RSa_dGkG%vF#1*yUaxRcitwxCteo7uM@kY`|xyozNkfTAo7#IdHJC}ExT+mjFQtMnqgeO%-%98fJI zR1PkcnTdGXXJ8Q?wcMme1Hp5sH}gY?9^b?ROgrxIg!N?YT3uZ>$FN6;-+ZzL#QB^6 z=e6(T4^kW4fh$BG@H<2JRhsq$NlOIa$$W`L%zm;tjFjb|!CNv~ma%(yBGJZa^|m-! z+qBp4BL37lnr39=@?Sv0uAnwK9EykdFN+>1-Rsgg0-@@D9bPKwsLYycsXK?xr{R?j-+C!JH-r_iy4< zL=^gqrzf~PV)X7jCC{JWpe62Aye=3j3o^|YndgRBlezK^m0~1G#BxryO=4^f@s`Yu z`pY1#t(@hF&%}aTzAT6sW`M?BSFl8n+@7|;M3hIMAg+zh;1zQIit;TDA#6IjljcOR zFKPn+PeqR=0hkgV_fi|(p$CKkP)U4QvPbv3OSpGOo9f4!(0)+1BWJio}Qyt1M_b zY4+Y*!Zb?2p?`7GW`^`LVl$V*`k*eBvoL8xHW?Vc7S1kx$l-Or>8f^Hj~br$jl)%l z-j~t@HHWRk=`lWJEioCJosv=;nJY%w+f8C(4<-#uKh@Di*6nlAAa(q^=P$gF*#Lmm zJVqF1qc)}&HlRdg4J$j2N{>1Qq{j$U^^37 zOJcUPjb5ma-j)2^3t_;d(%CN>Zemf@Wlq40r!k`sEJs67XS;jY7vc*P5D3f84(fjX zkPIdSpX~Fdm|Y7s;-HVqcm0J8>oY2Y@sbI|D4i^&R+-h`1wQ1Sg+3&HctfJmET^`4 zO#MuWPo%wU@CgST0JBzOCcF_tx?Fu3Hfvirk-FM~fK(`el@(yNE2N&%!VZ)dEN>e0 z%uo>MdWGv@oWNGnsUccwJDT3Etp%&k-_1Cbe11J$?&u??Ul`vD1#J}Ub>l7}il4PEX1K+d#3xZSf7(Gn_oNz1v_2xT zNwkcK74*8tr^ezs!z4Fm)mAIE{g%*&$jZL8Pml_Z1jfIqZl%o{M*dR@iDZAo2B*F zBNUpTab7zU>GyD7bkDYt`H#gS@>4wpoHpJN914MCtM(PFI(%PM_&+zChl) zgdzIM&@3t|kD`cD5E9M5*;h9p`S*#SW?&Dv8?NRw(!{Yz!9O8Y(K2zT(@Gs`C(8YxUPx zByW{Rw@M$hdrUgxin;990Z z?<5g0{QbtzUO{ftij<=#9j^$3Q!y%#^r6(Rf6Ls zWF5@(NKDCHocqku-^@q2Tnu~(%L}I^+^3#uK^}D>^o(i+4i`biA<)nSpB9=W5>F2S z>NuAjbtn%08l;+2>n964M7u=a@cfo?keVOv?O7jcT?D?%edgZ3K{XJrfiIDNYQcd* zKL&Y}YQh2GY1`E4foxDntYoYdkU-nF`nTqMoB*%AgDzBVlO-!;uXsEJj6tgUqc5iP z9`D+Fk#>o0c-A5@BC{mn#<_VTXXS#j0ZVCmWSRQeQ+~Kr?bq}+CUG@@>AC@`bU|i0 zr7JK{rY)H8A;m^FZ0p_T?xLo^M6pEee0;6ECBWaaNBlY01aD~m)Y1-)Pq3$aQf#N+ zI~ZGkacM(BZ)Ai^k`1yep7VBaN34lPj@AnAbW|b*OpgDgIo>gMZ-UgF(CH~3gYRao z^-9!lac2~C#)}37j=n(hLMI+kQ@b+`u-R0bb^F`Tq)>p@PVZW%{fax$vG%u*^yXVG zR%m2f`v%5rmEPkol_UJ^Q7yu~v&aSG@p2$NAgecOVAb+ga`sx)=~8yS!A=rnIp`al zGwnL4ig8gIF8Pa7uEubdZbTJCHmO2U*mC_BNL-x4A{OiZT|MH78A2A7vCm7AscD{v z7RgdHh4g}al3E-JvtWl}?NgW2dO7vjiO>b)5lg3}jSc0)At~xR-;ZA8AtgRKSW?s4+Nx^$?(7{?$-=p@nzHk# z&${}Vj#J1SiwuRir#qpo_YADqwKWEu!0Z|G3u>t<>W4(C01Z_iC5HzEM9tpOS6pQFq$;W%F#mZ1F{FPc$S4s*FtU?2vc{S07 zd*nBo^j-EThNwtUVO}hlj%O=q!QmZS4p&BX4e;KIgS>B6 zySWC;cI#UI)v0bd`qgSM#bRw{T9pcfzJ*1FOuZL#Qe+7`xk6ls3V510i^E_Mj}w)M zywP}f4%;8xVnh_yu}g5vbdtekx?GyqKel}R5Al)?@$}S9o(t*QavqMa6-0+?L0J?K zM-~iMConaCYYAK}=0=%wU^6gusz22`MtlV>YFr|~1wgsRKGKoU4O4=ps*qn zHoiWP87LziE>-s!=CDix;2# zTEN6)A{oM$m^~+OVeUsUQx+*@*CehQL4&=GSgNGOZw7|r0?=&b5-THH0wfLS^92BRR0`&&lp|9lO$187jZgq(= z3I!9BismeeQ6$}O`jrAWQ`E@}+@_X4dQ5hWBjSu|XntC-s=aAT1Nm6n-yw%=wVZu| zJp<})4e(Ko66pEmbv8vMjHSw+afzC8Bz73d&GoL8=LIr^DWPv$5kKtma z*$-fP@7Awn1AB(D5i%$~$V={bE}f;6&h(B%)na3Bmh>)VA zIlHW+4+evS->dy5R(1pB?JMc7nwLGEL^sTW3$fG|SczdHEK6{XcJ1Fus*h*h6Vpe8F;3WGV>!P_bE;$5w$o zvTzHc&vOu}pUapl%)iao$nx&6B6Kkh0PQGoo#kJGKM#;r;*B`PTXyYaVHL2!j|hnZ zO~u>H*aaWh0{5ur6O*#Q$QL>FlcHU+_51b97Qol0kc9&%94wh4u05HtQU2a z%Z@eOHwej$%Cc@Bv%ett)hFxi?5v;P9$$58%MQ78umT5WQ@6o(&u96GVfTF*5;(J! zrmJyVGS#Z!7da7N7b*4BTqg#n{2bpkXsFa-^ZIyq6Fk2)im}b4zRnuHY2*EoE*mG% z;$@4#za=~da@wu$8NaYTHCS;_E&&hWIDXtZkpWr}XYXE(*zD=obhJt$OibYt)4eU? zm4@=1=dw=VN0eB$gGYny{_K94d|WHnym8|QIm=`F@bV* zdL{+(s|a|n25pcPV;1b2a=LY-lcLBi@&pkpRXwY#-*=NHOaxNl=7e$_H7ap@pmW|UB zKnTi4B*V`-^J<0FM5tALXXfk({uG0AvZ@CSdhK(>2~LHOc_|C>0vP#R?sY+zm4;oh znC=`rUPrzOkYZ$CWm^ZQ{F&RUEc7ycPwyzu*Y!me7v@`PaV%rUxs6TfYjxa9v?9|= ztgP4jp&my^pQu|J36}LB?44gB^1YH%I)t-TxN}ylEC)_nFGLh;NpcTMifGN=BVJ8k zJ>^E93u30`&$Y$Ysg<*;Q>ZLT69VBmiB6l!%A>Mx*>n`ILNd)V;1*k*x=}t56h|_e z&r9vMWkk~Te?cMjF1YZWiF0Stb>}3MqPcDK#opxa^vG`|rwWG`KN{p;VL32Q%`fVF za_nU1S)xKCM)*Ua@`gJL68mR*KvVg=#X#Z+R}f^}==*c(#PSB)eLD6{N882yHZd#K zQGTU#Q)#uoe*unKHdx1K0Lx67n?lPxDIJs6+gYV6nfZ8z7{=-CTuVQY&MVTm(ST-g z3|REbvf-Ja_v;VLT@5~saZo_>{V;wr^=U<#jKSkxW`-x8(MET-!-zL}A1Mbc$S;gqHu7bOT z_m5;+eL%RG+fq3{1bL|Sj9+607xd$+MyN}H3u#*PDb}RdR;T1jk+cKOlJ@6Fbb~bc zsE^FCaUweb-~}VD9Y>c;w=ZcL_ae_OZxt;7=>TZ9K+r|kjzXCU$3niFW8dt!Il#vR ztN}XLG&;7dx9XzLJ&FReEKO?S$JoF;e_aG|Ox>sq0U5Rn8AP`cMuxL(P^VTidIHDz z`JuVB2qLecYFh`sr{mV-aE zY*b2)TD*oIu2@Y4o!$9ba%=dHflG~vH*`FO1?O;EUZ~Msa}yXfc1dlehE=M@B%#)j zkLZ{(VRPtrHvNe?mQC@+m1X66T;p}seeAPgKb@&dHl~mzO$Ba1r*NMQG_>J2dOD-A zOi(n%xokoJ-I>F{oyW2A>lFmwAWf2XI_;ItL$M@fI=aT*>#UM0V*w@hq(E3D6Rh+~ z87sr6iZ6oq&2(Bw%sTUH3(+6`yNy~eLfx~SoEM6@k6Y8&8VUO<7qq)Y?(57f zn^c2FjH}~&ZP)7Z7j;*AcAuo@Xvpzm(#jaUGwSR99A7f-USVbtm*6d8g1kAprS1<9jcs{`YH6HWZ#f(I&`P}DH-2t5qom(Sqo9(Ki@zZK-6HFZ_0)6POJp!? z?j;H{@rmIO=CyA!Qil`LW|!VG#tkal`DILhUWJylB@g^IIR@Bka~VR9kL-;YUZvFO zpw-`6PrDIOq|-{lGJBH!!TTg9L2jnawNXnbM=6)n*_!j0QZh+wF6`?!!zPGxzc#lL zOd?L#(0t-}vAdg-oO&+aa&@G~<~?Vc?^>>?$}&k=s=FUBn_h2aN5(hT zV$!gSt`*eu<7$_ldVoW#`3J2p*G<$6FP8wkPGY;{Czco+3deho#I9lK3NW*{{i77; z31fhWvrrB7x3LdoB3iH+%R3V#kv=pKmtln(69yjxU8U9BU>U%QMy1GK%c#<1M-!vV zdUR?S$UTKc%o`L>a%nh)!La8hZww&A4}O^-r`er>@2{u{)g2F$XxI&_fBO~41PyRS z2Pwr{*g$c^XHQZ=)0k9#>>66WbYo9sWD$HmNx#pNqB+S+j^e^N@OS95F6j38H4xSj6 zhikM8Rt$sbI)?lWTiu2o3P^EjJmcpNg0VQ(9`XK2Z!ks|6C07+YM7$W%H^pv-TBzv zkgHks^4zMlU@{mckK@!yViTC7y)C+2=JJL++}sKQ_x;V4o(Zqi5#e>xn)I;0%4VAn zfl{1SC1|dM+{g~nljLy?$t34!!s&NaIOMG_O!g*Ww5YH(YC!6ei^flzo&Op9r2@Oow`7nx`TLO;T@Su zW$jk#mGhA>aTl6c2ePBem4?I>=D)5Y4jJN;0w-Jl z2EFUkO#9e1<%W1naxz@xZrVx`>G7;D=IlS9w&33UmeC3E+Lv#G3Q^+#y(e59u=E=e_SI*tM$=$C2xr$MD`{8M#_2*#f^e?xF;=D-@rUAv z#%+T#AtsA{Y(l0mx=>aP&LYfda~y22v|e<)HhRT+5-M`$FSsoPs*sjC9^~-^kRS>X z$t#U(7mo8;7Zrjn@5PdrxSJeuZeJmvpJ$+XzX+n6g1eisTrU~7UJ9xMA@9Ije=G3^ z83J%9k|jg@O^P1($jG19tFi zup&JT%3a~tR`4(ffPY&~XPx!kMKE>T&a$O9zYy%DhjCL618)oalzHNG*Hvf(r^`q) zmuM>+Jl+(UP0_Ng2!jYpgr45)FBTDNie#=~pDJk6Ml~qI1vT7^cqJ*JOQt-RTgjI2 zNbn(MlfGuGbpBYi%hbrH0^Nda^qvb*>qNtdXEI1P6tj{S<#9d^V^zmu6@&xb)Z~4Qdz@#)D2mnlj3i~IptI;1`mH} z^WaC9%@FWSGle~tfVkLNxFv&5Fvmk*Ygh;+asxQYKo?0~T2lKEM!9}e`1uNO2#xj% z0Dze5fPSA5My(zp{oZH@EkdKZ?LuwDvkm&~Mf-AW(Dqaox>22l(4z5VV@rw$qV;kl z&T8iw+j2FfEXPB92^8N;=MkY$RLLKrPo_U2a{x3PPKU4#9J&hLpRv4+9+GjfNi`vh z+olj64gq4FOb7ej|;3qrel@=>70=)sq||tFGv%gUyMMwThNSBi&zoR40?_ejdCn>QfKz5yw)NMV6Q6R z#)pJ~1rJi8zYkVQ*aH~&-iN8Z4L7fY9QG_((j9BYQi#<5Tek8z3hH_59iJf+iq~vU z)irc)W($(T$1)*Lg5<`z+MvK%2}#*lL=^8?%{nc4(Yg zhTyufgNaH1fD9sViHC9xt5F+ZcrFw1E+I=Rw8ZxH_t}<2KD21Mec7V?=A3?oK5za- zH114x*Xqo@@IfJmK&;Q5KIEWkAM+7eP3VO_q*1gU@##$hV%s;5pTzi zQJ|pNQK`sefV8cnMDRrrQLb;5-KIMfmT55Efjp2Mt=F!0GSLtJ*y3CdQ=i;b$xiZ> z$^K{{I~ALV3-kE~J;YS`s-T`KSMn@D6mtG(6$TFR)!% z&cLS`7^1d_73AZ*n7kyc4g&AkVBWAb&8s1)U0AA=A^^C1s?Bv5#Lf8%VF~Z~TJRbu zQWmuwRumSO&&dza{!6X~Cd2*yiEa{|m{I8wW~w#uB&HL#;A15goW(++WEqU#Cnj_~ z)gCz#(!ExZWYNHPpk?`lz`$hUdmY(P>T~kEpK3rmvZE6yX*?>0ybF^izzLS-1;J}5 z6k3!iSl1=Dc&yXiqZ4{pbJ<0opOKyv%QxD7%pxQf21O&9kd^F))LHDV1t1@j>0ABrgLP8eup2X;fCt}F zwWjh4D}sC`d*@2~Cz;xsD}t?O5uf-Xh`dEo18N<}Ab}=107TqtQo4BGN&;|})?1Q; zPRi?QJO9In`_VJh@ioqTE%(8D?F_^ z^2NB(4hB-+?x+Y~g44B)+W&A|A863qrOPTf8jO!j^oLB$#mVTYx@cWkEJ1yNh-*R~ z8xUnUO2}q6_2bd9M=G2XGMA{Rtj(<|;8O`Q?8H)p9rX3!&*!|`6q%~b4x8p805b6Q zD<^BrePf+Tv3QKYmYUizOmj7kOVRNB95ykEXVs`XPlO#J=L8IBeeQ;NCO$QUbrZ9lvM%s zaSj(LWi9^GScF4)w5Q)anJ+iVH|d(xt0PGv2(%ScHcOjNQNC3KOV1hqc?$vvM#T;l zrtf5adA1ZionX$2r>17zno6T7DATCV+TZ2Nwss;TYH_Cdj77YiKm;{S{=CFHhg1~( zJok9DiyW4D#sekJ@@fX+P@RWeu`^rH8u#(j@LX$ml_sG~siTE4N1}^q=tqtQ4}IE4 z59U6)17Eds3WpYB7^CWhtUY?B0|A4{^9H;8u^7XdCj zUF{caT~a)x#pEh#f;H1^_C>Rx8n7o9HtiR;X*5j`_i-mubW6TT;^9b8blJ1yBC<7W zm^sB^U(dCn`;ex4a0Mjv$H7W}sI5Se%yj%&O!OShj!1vmqSX=WR7~sYn@#}oak*K5 z&65h(z~O?03g*?6G}%v9lFdY6p3zKlVMCZ3R1LQ?Q1Vqm{Ew5jlZMla4+DSd)1rv0Wsy&}L@sC43sn4l8MpT0^N`32ffZFMbI{QW ziQza0za+@u>$Y~(Hcjtjfuo-YRk^6jpFKVhaO_^W&5xpiZ@Y86StHjU$8a6>E;IZK zTQilEgSmQxg6FIFe2|e}_H0IMFj%ONY(7}O0|+l#9T9H~_rM&Wb8h$Q`1C6tSV*Je z%CYsR*`z%!YPSynpCGufEAo4%U)xz1vbGoIPov0K{{X^OSH!F^6xCR4rD1W6SVSeV z^NCfsHL|wLmkh{UtTbgs>7%4=`Db*kCrgZrNDpP1YFRYWur9FyS96@_um{9E4G7FS zbgN9x(ZUC0czb^ND~KqNxJjX_`;Ztn&5F>Qab!P32}=l_1iGIQg}Yp;>?jaDcdy3w zj=;p*4~bZXb^z8yIoFQ)Ega^`Hy-DI0X0C%zr6cvhGU2i9Z6uC-_h$rshzt_ zQhSPyWCR_jk>EPzExDgeu+_7DinC$$*huE+$?DS+GG3w%LFDXIffp=N)dkg}UfZV3 zhRzB0*NM6+2mM=l&XM@`FU`wr%YPo|JR`mA`uRFs6pXSOB@C{XDSmA?!J zU286ZSkISU?&VN)C_EoUIW2b0(*EjAzq(K$CgcmUuYy@Xs9pV?hK{FeTj@QY{LKQb zXyt4C3Cjd7E+o12h($c#fijTaA&)x?Z0D+x^vmp8EH>o}ecl?rx(pO;@w9;b0)P0- zDe=jg218b*gXgE_F)IT*0Sv_2cIx)0h4z)x z7>!`RBl`^k_{Vm~ zc|tC7Xpb`{(;VF=^s%LfPMzf}NfF?XftiSz7<}=TqT8Q#lo=CZ=U+98tYm1;iS0|3 zO$wtAuOrbz9WN`tSXah=Nr40}-B1NF7NDL@-Cdww@OHY2-ZE(-Z;c-lM#N**-4(SR zNeNI^6U;2HG>A9P{{5AcBYPxYMdeb*np}xA6iD&d(5gE-TujiJCR~4lL|h z67D_}ce??FM*I{nKSioxT={7rD?*Zu6;ax6#aBFl)}9Mn@B^-J2?cjTj}89sJ=K52P7=yR|_>=RaxR&~}d zioYT!lE7~ft8PBQ*_C7&STr6s_q(1b8^INKEKAjWfnnB6rtUE2G|+S-eOWk%?K#I( zQ?6_FFMrtBiVU7~LlqHbg z?Jj0{?3p)HfvQbsYn3l(VXpr^x1*U4b5b>rNb}jxbm$YX|0WNah_CvaXn0^dn^>sI zlGWVt5`J$euwvsZt$uWQ?UO_q+;<T>0id5*!f$O#xvD-Z5lFXgZ2^^i(J{RJ0Ih)IlvA~zRPQsxXWF(g>Q;)$q zwltM*dyaZF1d%y0Za>PWreop850a=R?L!S}?1iAof#3Vq8zqoH{6=~oqZg|Qu)(l5 z^D0jrK5Em+%ax7oaq+P;;!K$F(AAZzGkP7#ZC52w9Ge095fzgxPj@;lkz-Foa=W$9 zkU~@?+RSSSFn-0A?R#yxhUeqfYXEr7IMLo`xQW&DqO}ljDa6!8W2FY(j2~c>ftqq` zI}wx|rDw@q0`;fxPg5hVCB4@B2w29B5e@|fw!R^m@qrC))o4sIeD8^~5klZfJx8fO z?K-lIK^bOF$vt-G^4BEm4Yns(Hpv?X!(F2#FO80PM-c5kE;5Y{Qb@!xB7Gzqnw(){ zi)+4bHF6DkE1CR#zjlwXC|+XJx-pn(@hS>?ZADKV=tmeY*)7+pV)oF$IcWT#!_Oi5 zb@L8`EUfU*Bp4z&0L!XZ&9JN{Ms-rB+3!c6IcpsH$r6DuQEUekd4yYz29m?+MN*RI z3jS}=X>3H|@+&)tVln}Owmn%Z>0#h?s;E{ z*g%;G(3MSknA^?N=Cv9$T7$f(c>`5t5ial`{i7z)=HHvu^n4iT(WDx{g)-NnLhs7y z@SE5%5oC-4zH?q?(r~&6+=#-PwN6=|rec&@@|4hU;7vt15~_bs3^`ofFr2je}|+1FAM77sfh{wcHTC!VsaIg%jls`TmBSyp9r&yR=BLB<;>wrk9N|ab ztKKp1-3VeKX~b_wc(L(&V5d$*=_}i0@aeZnpfEqP&J>FINLu7I@>6Mj=$q+MI6UFW zn1Sgf+RoGztteB>!y;JyM83+h1imV@8728tw|)RvWw={(x<4WyShiFUrvt%2EN5RC z%X_$d!2VcmcnJ9?mGtTkt%TeRwgH}6pb72n=|`PtvgM(WkqBo+s^_CWYY}1 z*QW!8>?vF-$ENs&M@b^O#9W9*)U zOQGK^m#=@NH`rxe!ZU%9Z9)ZiGv`0e>hsy={OGx78EtF#5QY-03B<_M#y+4*F2+${ z&~)k~5az@rz1+STG9!P%g4#W6)oP86eB_Ld$GunDF%5ovqAla7P?zr@Cn`{K?@cF% zZek(`#ZPVMyH_7fW^?hm9=(1u0(#B7%z*Q;29L5#Zqa!@=hBgl&#=kDd0|KF48YXc zVLh@5Bqzu~d$*J79Tc5_$OkcFV(BE6^eI#Q1pR8YS@tuFmKELn_&vAQO|W5@zW}+q zB(i>X`{%A)@|NJ(Vjr&3!bn|KQhmeVzK+O<56}wqE=12|237&*wmSjfamm7Q`=X%- zT&fUDzbZm)iT%PeR&BiDRfl zc4mK@yZo?JxaXdc!tSw&SwscXD~ztu?sT&1J7W1G{!IAZmocjVtlTMAX_4{aiVyzq zef~O4aUTO#q*Au8peYOwsUELWFs?4qS`$r?(_>KTv&Y5HD;YY-o&@N3=UO^nXCER0 zOU5#KTJR*|ULwgv%ix&wTF#@gqhA7n*emXgZdhr*g4RN6FG`@Xnqdh8^OIKA%H=z9 zevFlJF?{FlU1Ph{tH1o_rkcRQOqx6G2GQVE-&rTt82kw`z_dDm^^!d;ki>?NFcQ)s9++hfS;W%s@@u)|ib1Q}b{z(MXhTy8 zo(&eIVojJ?TbhMuG6s|bXuecE=IjaaOOiGrSV737pcIDbg)@y%G^mcoBHicE_^LxG zx0&=PZ3}Ay&yVo6D8!U`;Hw0IRI!j?PQnKHfS|b9{yZi4ig59pd;WX-@2TB(Uo2g; zBN1^2iRgvuYY-Kdk z!(nfF1FNU7`LfQrhd6$^OH{6LO1Jk~MMcSe?$HZKDof<2U6c}l4KN2s?xwpfbK4(o z=qmMs!?t~z$fGjFoXiHj2G)R@eoZ~6*$7e<|3JH>yA7B=28)_`^Q=eoA%i`NpY2ZFCvnS^U&?^c_rHG8 zgq03H+)bF}dAFe43g3k!%Y(ORyV5SYq8s*qBaBfr8_5sd8--G6l^+$Z*F!&zrrp9U zq;a7Fj=0eI$_6c^6G@7RZN$ksXe{JBt zBOm;#W8GV#kD)&;=XVCf&c#W|k({j7WCgo-qFc<o4WtH8uw%7RBOk_W|U?Os6VT%!06eOnijDJc_Lgh*U|}D!rm|; z{diyo4qK}gvuK_LsVRdBZlKPVlBd@q+k@eYIoHZLh+J5A&Urjg92ohd$SjTH=jT|~ z7zIARr|fE;3tv$h$%1^j@+kv7PIgDXzrvQGzoz!+z;b&0q4$Z!)_yP>bB*79NXv-c zWqAO=kutWsf%{&~X{Q>1{Czm_^SUcnmnYsq*QQBwtCx3J?cLteYDZWNp^Hcdufr9DC|V%Y*($v;=~AB)+Gln(x@C|^ z#t{bScB(>dNKnt>Ul=RQI;D@(q(4-y0w71W^{q^$i;|JP%fM}nxAD$M4*VDv2eNr$ zjJ0HU>{WMRPgEItqd(Tv!8&ihH5y4t9p@z*lCM6J^D?CK`gvA~EY_mzRwwgZ;vV#)agtJ)KA+vL zDyiYtw=(wRgpg&jFqDiIRVRZr%W6Tx8n8?3o!ILk7lTrEL|#y|E2k}vSbjB{i9AH! z9+n$07{y8DlNG;-u zvKh@gW4aRP;b=kp3Do*=XxdjRZGwI##17D)j&MXurvudq%8foL`g^5N8Z5Ab?Dofk zW9++=)$R6%c&V};z$b%G5DMHWB$w6WzEbVvCwapms20~I^(DdUjTExJKb1fouLcNN zmiA8a!BND{s#F%GP8#QV%(;w(VO2Ye5!M6|rwK}eh{(&1CplCm{!}fRyInA+u%$nA zK0*|0s?7?h{L*0N!%Exc>y3MP*IyF0BWg#w?&xt6+7!4nw)nl~A;8?dYVf43v9z;x zH(3E&He>pHS<{zAMy+F#YP_SqZ~ZQXPD4`YE3iN=>;jAZGMa&=Uo)oB->E7hcz^dt zWPrg2u3|m%RsMKbkNEFlj+?S)DO`A|GB0>4=StEgsRunbw&2-H>#5B=b)zF@&XGQv z^XbtvUxN{eLXP%~>#2e7a&Cj%Qfu6=WB!M1O*k79d&5_lHBjc*+`P#7v`k7u5|%P? z;G@QByK-JeN1nf;6zN^Rqk1DKOXdZqPT`JhFX7uVlDgbfKto{I?Q3VdKQA1IkV>*42mF5AS z0ujDRHSMteuEN_GA}lW2Y(w$O?Jfk?VVlc$AMwbkBrS*LO;{{wUO3#Z$$CCa{??LZ zVa6@9RM{oyMX&+1f#l(tN2+b4D?*M2ppf6~u$^{}X^92ZzJ+uYaxc9o{(s?QTLZU+7K-{LvrS4BMbo#i&* zI;IA6facWH_^GIt)5AOtC3nUB>mMM3dRU4wgJ~lnxf~0;EtiiuV2cGq$TdJ znk1X}`e{aPn3tab?Y?(}%5JPCyOevh! zN63_1Rjn1TbHx&?O(Xg7Nr1yWKs~o6)gp3RKnd_J^-v|s;=EmIfU<=`Q=x*3R`?yV zO@tb3axVMEC=q|dwHl*_HcPV%SDqEQN2nA{3ZPBmI$DsWqlbQCBVNq5TjB_j^A);JtWN|c zWysL_(#U+4t>2@QM7N&=Ke&dTlz;z4x)i}DL!?3TBDOBTS{s`#GZ%5iQA^eih5O8(O$1Ul4J1w z=P6w^<61cjtH`Uf{#PO%+@bx+)Sb?g*X<{{3*+RXRGs`|3ztfu1kpHQXPhO@GtUb< z7~@LQHBVTT-ldr{z?jlE2GnTiv#zWiOV>9k<0T7YgAUl))j@mXT6p~<`HuU5qut+e z3QHAepHCno!Rhk8zB{t>dFYTrzMZJD?l&Ynaji+NiqJgQiu!0Oou^w>RL)dOH?LUck=w59kn(NeXDH-C<^svvulZ8xy2b|_tD-q7oRI&by+!B+^GlQb$+jprh`cQ*qHmwngn_WQ zgfe%lb>m;RTfm+9&$NXA@+O>kJFC0PMCoRvaD|gQ~mjSNNX5A78Z&O`w?LcKw{&wlG4pFQfc0~_^)uSe?agO zlx^0tUkaWcLedCzrX?5?&Jwp`4W5&1QNz3Yt{#Cq=i%7Nf34{yv@^FDYs8 zxpW=FN7BNx4ii_toVI+t4=71&W8|zSg)OE^y|QB&0Gm|Z9yCGAltYe^lUXu(rh!G# zI;C=gGpib|<8bPoKX0<;V7BNIG}~uJDtp=7eOd`>C*+#kc0@C6Lg9aM<UsiI=f`94;4F zrMAd!MATVwV5%1gvRL7Dz5lq&#EoBxZ4s#-*Du^)SbSxBP8{oSyb=}OLms#&gppw; z1$?&yYrzn+5Rrjrn}8`^urhLyUEj;KmjlejT>@8|jEmP~S%4Quv6k3HR>gUhX4`qJ zzw~eBo3aU_)`fk11TRojYJ7=@df{tm=m34F=WCbD(8(Hp{0^Yr$4F4X2zJoCPXI&H zNVh9O8)e`gLaPmQ6^0C5i*kJ>s@k3)_FMZcM=wDze#o`C=za$MFRnG2p(LN zqJ#3owkq#-Vq&JuFHflRuXneC+;qJ4uOG-fPWsFnQle80t5dbge=_n~^Q~4l^P^`@we}Od*eHJnE|F5x#&vD}; zRQTz$;w>q>+H3GqDM1PtAu6wWkGVXQ|Tkt1(JczxgN!VFqI0)B&0_SXr-LtbUipCU0)EG;=@?wTx6Qg zy*c`>CiRT|>Wd|6D7rO@oJM_mgWdsa;csFXB9$|5z}9GvB^M%ovMv7r*N{;cz0mLe zj$_B}8ib+D5cggdgoRVjm_FDEAueP%YW;Xk_NdFA2Ru}RL8({W)mP8t7~H@3$`}pv+4Mg zH)`a@IbfYM@oo~v{UnLlYl^3;3n!0?0mn$5@Tpeic?)Ik+{MlGX)U&9mIoa|Ealeb zmogvbI7=hF zNAI+x*DP=P_@FN~s3PA<$lw?xRmg-UjZ&b3gC0Rr?3;XAfKiU%^oiFw8dm{{1kluh z(V^7x+PS3myN?6DawV7-a7B3}u#2!bHMWSix#8EBYAt}w&|q201&v)>DJmxrDdQ+u zR7sC>XE@Cw8~uFYV~?1S>m^0YZKg16iPDLnTn6bob;jyB!d>juNi)*@mag>26Xn|2 z*yf1u1$Ifxt#uluFR^N9n!7ujp=s0lrz!0ez@%xuL@a*N|U7NpAE9sF_(=BtKN zTzs_|w_oY=!Bc8>*Ux6)eXxwWfga-)Ax#v(&WvM`Nam}AO>|L~s`ozzKg#$joo$~f zWz3%0hGp%o3M+ekd7kE-c=qE1hQ|jwZUk+nmE`fPVe7BA9ZDmjmc-$q>TJ${Sg@d( zH{=>0ts~L8r>Z@*vkwo*6%{mk3nowYDoY@16SsYD;McGFJw!{}K+|46Kkt6c#+KU` z!XL8UncXZM0kfF15ZHnW#*m*08PDqI_6g&54Mr&I7=;1Cb)e0dw?eV#Le-!taY;;e zyYWPEZYo>wY5vQwTG$t7>ZBmp*|KJQ#_BMVxPy{z40=1Ddkh1u>HnsDL>Q05sC`HQ z{PhXlIeYgH{4yjj!z6rsI`+%1kUs) z--E~+e9wgobE@5unLcoQHucHFmOI(e9#v=e$#Slx@cCjS9KD1@w9RyIcd*Q5E;=0& z<`JRDAR{G__BV~F5^F_6&|CD%4uQf+vIbE==Vnz5*;s+Oyj^Dwh)WYird@CvhvT<< zI?aH_j+B)f(<_KAG%@kBx~&9jc7G_Ea6_-g3>@!L2CVY&q^(avZdkm?A@~CScjk%O zf+Hbd^c4_KVy#?6rpBL=XKj-E@*Yun<8e)mCtFjPSn6d0_e&8{!yj@W5xSS;z?ZE) z#mTS1foQIM45!w)Xc~7ObRmZX@0ym?S8Uj~xJnDuC32|$j5h8F!#dKOL(nn;^UwM7 znjyZa4eRMCRMC@WnMx`J2ICpS7FMAc2?Cp?uXTc140M>~y-+4Dw6WNu1FbD97}ym} z5*N+5`mF3x**G*ndIskhP4EsaINgBr(gGwFU3uenRR#Eao;s6-1NEZvdJh#HkESG0 ziZwm%Z)oAWg8-7!ceI;}V4TQxs2fENHVYJ{4_uTw+%@V*xCezDx|pQ%p5Fl$xtmaJ z%lWJgB5ii&4F2cj0qo3?J|O%oWRt%jvpvdQXMHo{Osv)fOl!Fw!BCMrA9see&lhN3 zBpfh})P4D%SN{KS`-MR_ICp9e&Af;<{79To*wg$`d6MHX#f4MPx!NrrI%qF9BwnwV ztlX^9UOT%lr9GG;tbr3v3lvT=CcIjU=U+fi3{|Q{SUgPXbW_oO;v3>+w5M5us(Oc8 zbunGkk0jQ>&ul70Vc*nb#2%=vfPvO)V8RRSdgHIi+>VZcqB+U7gA6&-523_Tke+n- z8~uFePUuWiKH@e5cl;DF!M)PR6fUY!y)CCr{=glUQ6rul`lvmE*oL)pOKpBlnX;s| z?-7h58_}vwCuUpp&uq<^5AX_WS4Q1cajkq#?$CZcdB*4*N7ive>1K5(P8o+%7veA1 zI}6$_qV8Lvn#m1hLS4T4Q}<RsTecZ%}EVk91gX&1z}nANTPAl& zQap+!!Hn&o=_>}sxSRDM#(-Ir&&=+JU&l(p74$J1B{R4=apNY5CFFDSsA#qnU#Zsb)2qTzxfbF8ERa2LqWd$~ zHmTtVaje(PGJLWT;;7Nxwot_&Dw9#YbuDxQg~>@%rjhy7p=AgoR8IXqx~cW&8bO;bxZ8@fvSz97rT-Ea5S}12XzpkrRD(GQFJkB0?0bt%f30t zp;YLG$X8fi&V#dzmO-x-J$*=DlymF0{*$dLvr@MzbLu5ZQH@wXa?QV>SRQwYJRJo9 z!OY5`HZBXIJZm9+;KXQmMKxKG^7N3CtU&gay0(zY1IDnSeTZOX;-9?!;=+T8f-o}} zyAjR4+3=&=@_v%#$fJB7EDe z8)l5_Sxf)?zd;5pJk68t0G=JF0rPs}qVvAjue9I1vsZ&(GrjAq9lmKPaOHgN5`K}` zN&RS`O*K2mIs$#KdX>0r9GaD&nsVH#3=7n3v3f+S2<6~z6vQUj12?! zzw?`9_bDiJ>F#lT9mq8Qf_ zm{qeK{<`DvBI>IGv>b0qNlCq0E%Kxr)tXA9`4AX0 zR5p`kq~t4hXGtPuiYr(U{6OrOHP=gMjs`kk8EC^uZ}T&5B_W_yZ|OdVA#R|l%>~IO z@|4Mc#yrTvcsTeYe<}HGVP8WW?i)!JIWWB-{UP$W9Bt}J0(kJ0$(*&HNlA`;W%!b> zH+vQ67K7S2!B|sKFk+3Ob^T9Ia{JHvl;|TJAaAyb5)iG=IAN@R&Cgb39?= z`rq91ypYSN7>b2E_f&b1$ok!ZF8grqYC*b-A%@dZ98>wBj2=cIEf;SzyHJ}@g+y9r zwybMO82E5qFa!t$yiD1s9A1zMrd>2a=mP6)vf|ooQjHOr3kk-jNyBDpjcQ36Jts)u zrariT1MTHY%kUj2STlx+rD_MP4CZN1Yy#shn7iYwLAhmod8FkiZxS)7>$4Qu65L$l zZygrEv7+iXEwL7A6}N-&im;lLDTjIm*IIB;jIFo>n<za&n^ayJ{d8qLb7e$aQR3za z&Leti@xfkl*@S7S*>0V~8@RIZa#|?xWFK2Yr~JRobLnC%JCatY28GAM{5WMdlB>K+ zsG?#SrpO3$EsLF7$eY@xcJ5ddiNR3flx6ly_IrK=H!@k-hKfz7BqffF z!vAq`CQgMsOk_Wa58h#KNdhjr?1TjnFHYY{W%h{xP3h<+nx|x@Xc^8gX*dG?MP35ByHwdP_V@*jC`19|0C<8+*B}Z zoR$~j0mUAK*;hP#v5KCu(zbdJT!STPb6r34_D6`f((S%sw*@{_NchR*Zy`H)nqGpK z0K8k`kHF-yIq$L0J5;f1nO9ccEMjYLc zP&`(U<c-jorHzd>D*;I2X)3s^kjXh%`c%0ZZQ?~*z(^+TB+xA^Fm_fbQNJ6jiq$GCkOamC$+SE5Pr-)EnV*hG*pg;cRUM4dD3n0`!RzX z7!SHAmM)l^T>~$$YhB9Nj+fK6-!0@$A+5FNC9CL{_0zQiQ%)Gi<8aVVq`kpNbIA); z1u1G+`!#U+tMzut zDEduN%!?(NYT8!q)gG=!gvVR`C_qWj`T4`>fov|d4RXEnkw|KsgYN^37>znd>xHX7 z0UmgA1-UcVFqqpYVm3Bx={NZ;RS!=|v-OAC@Xea+(P^4ZalWbB18kOQIsdEO%15n>LOOp4~O)~SSBCVT>)n`h%oRJFdj*YwK|f@FWHp?xdXQP zxF`(luBZN&4pJ6?A_GN`uUC0@k@@O%sc=7L7DXgzU>n^|1gTTLH?z-<7Cy|HCKYu_ z_I~(I0Ah?r&)w!kEBRxDpSfq{KUIPSXYxR_d)F~Hj8b)$#%9`CFlb?8Fg-4zLhggo zk~%9QmE2gZ`IBt4!D>rrSuw^Z9hB3b@t9*T0jS-~Sf})#P=WFO3bfa^(e@HclTb1X zQ}8sg>mrC{jHE>X?~k6cDY1Z*+)BeCZZS!e&+3Mt4JCkWr*}QCbjm|Y6J%({n zSsRfhc?V^nZMG`Quqh~;bA9kGRJSV08f+Ly{KIT}}Xa;4h#^4Hn+g=Et?K>Z{NQpn8PVtXe1p)&YjA0pDb5U(uzF@s}bulKJp|1|a&?O%s|1msdAB2K{@QrsylhNt^X;mY4 zO`n;s-XgHsUx*9QIJ~;rrd$cp_&P?DCWs$V*m^;f$wHRfvS%KvNsW0C+yLA~n{O-Q zb8Ur@hfYFiYV^g8O}DK#AH#nL*%dGTG6Ykk-*j%xv=Y2IJ@c-F&U|$nk5Z zKrnTbShvWW6mw!MVCu_nbS(8>mWkvF^$K7^amBYshK*~ky#P*OW>S`5i zw;i?(x|u{tq51n^&&=2Ux#notCyKPH2X5qaSh5e8P*rHVyr~=2H4n$b1UtKg!kzf^ zU=;D;!>7KG1hZ?SmL^I@g!GWVs$D?WsoJPnyvm-uIz37oM_a*c z6b;tQP%laGc-XxyPR?a#z!(v84*z^5-CD>RS-IUcf=Yc6R-E_YH^WpyCu4w61C{Su z+e}Z>E!dv?ZP?`dwg5gsV@TQ3EDipJE#ZC@M!5)3!)o{eizM#E7v6MdJ=|N)#EMRR z2HvTDzx)BTu;jJ{8F=qvv{j)HSLLjDASwj1 zzW|XbC^nmhUgms=Ymt?fzpN!;=)1be;MeG~q6vHL@X&573RIC34lyp#*4S4kJ>8<8 z9E)=;fV^#?eoj1GCw>O66my=n*opF1SlDS1BGgT=i7&*O-8*tNK4Irc+&}=F-(qtf zt=`+%d~D}iz*rA;v%6$@P10w3{5H_0Z2G>dnkwLRnkB~GFUj*%H9#kbR7m_=m1oLW zcDH|iT!H?v*;xj|`?tstCx=i9z`J&*sBD^>EjG6$*;7v}7iiv4>P`{ZhW63npLeDE zzCisLG}-ApT`=_9cwfgmnPJ|=T$JS5f{^-KFXzA>gRycU<)V_op|sv5Ug0@0dIiec zUSJKUCAC4=E9V0uf1+?LS9j~-rIcJZrv$#EUJO!(4+P7gD|`K_SYUXn*BTFxxvCpicY zw0}gGn{j>91X$&#$Vu|1Hob-zz+saZX-WIgG9}Zh*iIL8#TaRtE>9}roS{C_t^%^c zgysA&ITI>k7kUy=;&C$*6s-yD4fv;jULbrL$YWtxwXXjPSWj!r2%7uAz}lT9@Va#L zUdIQUt>X9&Y$MD`XZ*Pj(<@SNaQ7)!aec1ym~BGH`zKb&>@4Mi z9pb@Wh6$-DLbcBJiaMLKHs;JCeMGuQnXa*QlSJA%SrB ze?=#Id)6IuX0Z&hhQ#Y5Qya;+aJsPQs9HeI5kYxKL^1N>C@@xyF#2?#t5K=xg)E}= z$Gc$2_`8{FZii%DtN0oNa6yhhAg`^Pl+y3$eTxlX%#?@>+K6LRM1=B}OXSgD+&%UN)v4JrUrenEQ z+Dn;~w@g3Bc`6BF576QQn8fILP`p8usHz#c#61Q)lVFfn3V{gOeUc90-$#0zV(#J-?1ph6 z9gnjFsufTCIkiAVgWL2+?)Q$1UKLSDF1?vx;@O|Xw(2}6y?}Aad71Wc`+{wwPF+1u zDPE4*$ZvJ*K7I7H2UlTyB)H4~Gq;x6!8x)NGCQnCiPxb_vKSrhL2CIl{#CYudZAc3Q1KnL{9c=IYwbmU~Mb)t#Ck#nD?)4+I4 zG2_de&Mq1t>*RWK-aCL=G55KgGXo-by0wO*PaGu!7xBvENQ;dV8kp6qmP*#Hc$=g6#F&#UT(|89YSOB}UHozcjd95dP01QxNXW!xszT4`ZrmSd)| zaino?^g!Frz)xqUWKbpRc0~2fYwIRPloPKQGw=OR(He<)<*tNb7c2n6$mGr7`s&aC z9X5@-zoXp=sP>o(?InguiieC>`%ODEs&Fy~GiRn6k+(s&1gj$CEtN8cr8%JAD3foI zoe1b?26oB$@_~AMxYccReIJ6*NuYKPeHps8RJiPE#dxF-7g?0P&ccF{m0<8M2JdQ! z#yGd#Ec<~eI;aDLr6 z2(mh{VohV1WWltObQ@Nz!VXr^q0le97Ol8v-M9Y+c=knbwySM6I_@LyHgw@&uZ~x{ z90kx;@(L5C>Nde^Gn;6lt`wPKlWVb(8_Om8mbKJ3G9WQX1Lgg}74`BRl%TdoPJ}>1 z^&X_Qeiko*d37j|Aa{g3wfw8A-6@ZcS+}d{ih?9)7RbY_kS<@{_BCC+&+`$=o#*1c zE=m^Le9PZdN12)^T9qk!Mo#X!(l!!__~rJcmMpohi>BAl4EqLxwH=H;-AOv$8#UzP zEpo8iUT(JYqT=uIoI$Mn=iSuX!0PxjC=SF(vARP!mMy;NvLFJ{BF(3t^muFtGDLjA zzcW~LMA>QNe@)=O6z>P_#IeIA3g--pkzXa$ewjUGVlH$YeHm5Gm~OOGrIqo6)Ut@G zDQIX%QlDtvrvI^UENoLgZtHUgM#v>z4ckdQmEvog*}f~MeT^b{fP z`*pPzKPMB$Owp8J^VXpED*o+^iFA?=Ge8STo>ioocH{<^#WtB+u%3!$iL9A?kmvxo z4P4gdb&;RPP6RxGJ)q7XIGEA0h#t%v!34Y&gsfjf4eBqx^$E2_2N%Ep%S(T1sbki$>g#RUywC7(>nq5tf~i~5cxsy`s01xDcW56w*A>XF#7+pt2g zCm%hBmR?hU@5vQR5zx0$?3o8H4X08pLVZYV-y*`}Xb@`IBgwAVfEdPVa@f~h00ifr zR(vc?^7)ir!A4g_97&{*`vpHn;@p5N$@1s?3NG&(uP8b(O53r`)nJ6mkhgM|ND+74 zk~h0|r+F+MHY_;E)2Q!D#R{u`#Q(lJ)fec;QUoGHi|f~BI{z)v&p{L#9X9`sZlZTV z@`{>8D}?K>OgtiHT<>*{4@Y`W=Kb&GzcLYZzjoK>$V%qp6a~nuy2e}540L_{5(Qqj z%9$T-XCSp_sTda#Y1n3%7yzq8W_VvAV8wQl9 zn0aS<`M|m&zEl~!vZGikvM$hEWRT$tB5l!!p+RcU{}P#CYK>$kA!pxBF%e5HZ0LjO z`?lTyN$I|Nz*1XL^~pMIJLZcD)ZOo6`7QIWveWqXLt!fUdh+KFxMC_mXBOPCMQSZD z2vZ^%7}3&7gpUAeksB~_-5d6Sf@psRPY7i?yiX?X6CO;A%I2-ZQ$giOY>h?7wnPF_ z<@S6Kql`!7h0*wYlfpc#3L0C8HK+ky1bfjaq@ zH(b1C(ZUU5dR7WrTLuszPYP_f(3Ll@g(kR|6|(Y$wZ&pC>o`Cuh(h{T)w*7%sX(pe8tb3pOsP$G zqG#{B`JNn{Nop|klv%0a5N24TX@Au9jn}q=QUq{u{!#hMa3iVby@FH}V_U3ckxkJ~ zS7G@RdUIZuNM3C$*caC=U>BboYqX65IqN=k~%hC)3A`blApjV3fRxtL;1d7o67Pej)5AqMKu@D~Nzhoxzp$_Ka`vBBj=Tsp~0+wt)&N zp`qKt%ObWjLZ(Cajs*qsirdHL`1~#oBf{1-6i66kW;CJgG^L$dSmN!@PP#UMHQA26 zZ#`{oT%ya<%A@Pq(XG;>*e*lW$68k)daB*bk=iT;M$UwEUN;{VEp)2I$F79;7Ptv&z z@lnS$ulHmYbqful8i`3lcKxK1Q=0aw?qzxe*j@1YVoqp+qlvApR@WZ3t!8{Uj_d_D z?MOW|{n7T!yRq6?p45ZZ?&x0z*<`vaBZu|UhFw_7tR*k+DkB;0<7aitM%7M-jqHi4Da z`K(9eP#g|AvZe}vV=M^+uWiVhj^7I;oK$aAW=lV#ERUdea;4h#^4Hn-u&vxQeh}`W z@oEN)aLf>Vq&$0-p`GP&8b4v)aZXu9FXx3u4S-et z_tJL5H@#9BoiIFc{;hnT%t`Xc-^b#kInDgx^Vl5y&oGhjc2N$Owy359`tvivd88LG z&cmx;QG(MPf#skoZ?Po6VBBM_T&CHJtf4#2y%GCAq=TXTeasJTM)tjf6R&gK=Px3m zg{W&8F#fr&=Detw?Qrrp^U*Afv2=j$G5i>B$e%D?&<3hfqA?WvgY_UV>}KiT)s)-3 z0sd$vn>DJ)RjEsew=RkprWZD+mAtA=5OMX*}5G>mCwG*c0L2+HlTtSCe#eXpgR;$vSk{dv3(Y z1iN2S&om0;T{dfFSh+M zbduX-otgZYVMN55y=QTAL2hnS6T3K9c3-3UnK%$ z8!+RVyWcV?e{qWQq7#lP`8#P}ZGm&&HKFrTp>Sd`$wO>G(69?3Om|nb66^+&(Z6k8FVO}-!pM)D84D1HZrE_UAmBio4X8kDP7iedVUyAVD=Lt z$#Xb}vZa1KCTFYu)sB|&f-Ur63a`>aGC#kboTirs;bb zKxli6V~=MQD7DgSO*+(F2o($U))f^0#2(*&2d`BRJc!YzXD%>k^yU4#nclf+Hzq)Q z<+AU-d*V!avs+Q&dk2RCwmDGQFg-7|vljY~Wo5U)1qCT0QKAj-Zy|k5fS%*ksy78*Nq(z5M7w@)j z^445aIxK+FlI{Z_v9_;MT0+aJSLh42cV>Fjc!z9&b|}t**hXL*sj-p-b8Dv!jYIb6ew3LaZQRlaWT<)c5#g6T}*%%Ibw8SE@uEgxhNa51~nQ@v!GV9&f$Jmg1rN zSJBQ9c+!|*hlN{$Lf#QX=EjN~9Q@K%UbVL2!C>PNfrhAbE4z$4B)>|PReRGuBW&ke zz*rA;v%0#-<5CT2(_zE^B2JtnFwjVDQU{hJf*T<@VcR6M(o@wPYOpalE>1GikomrE z$$cZkC21DKVIli4#2PsCaQmYr_C8#N)?+3vF?`|!NMq{BL6Hg6dF;CHNBNDV&RkYh>v#j)pCmQnjekbo%W&1Z{ZdBw=I*d$?#~Qq18J}h}0HL}xbQ!*N@tYzbnPEK#B1QmlJNWTxo0_|jN!k|8=rU&zZqshvpGh=P`4#p zm~u|-dBj>*WJcJ)Q?qVr`H&4PtMTNXh($y82#&Xrd0YzMJK^=(?Ji}V+-^rR-XO-! z4$rm5EJC=jx16*?u7F%E`+psIT?QpHL#UY?+QB}(%tqfT#wDQG;X{718``8+qd>(s zRWhIbuX3eNnllTRDOfhh0goM!GmOGeyQ~9g!>4XM86XT5ESR-f1oXDo84Xg#{b($o zljs{0X_>*RA8y3}1+QI~tu7dRBHW*+@A{l$!_42JQ7SKqNNb7}Dle_?qRE(mng615 zqqo5nBU8FT0hE@=PAWW$dP9kq{VUFeKAEP*i-hZtR8?dQgapS-7)Q9MsU4?wY>*xr zHvtX+G?F-fx=}tz2zEt_vEE2C?+8QajNLYFHW-}6)m~T3Xi1}G`K)lj4)9{UYje-+ zXP$fvML;XCHVNben7QgecP0Nu5eCzU;@D|Eq8wgF=9olzXVen^s4F9#$={R%euc`D!;o5Ug#5qEwhCS|vzi;gR(&^Kd1zQ7Z=gXXAk zHfQ-bK7+ICu`j?Mhz5}zK)9q>(ReURncx?1dOPlzg;a|S9HfbV*vx}LuZsWb=hyq5 zN>ynb-c>lP^R`iO4Q@c+{YC-SM*tl#h%I2SN{@<1dD8-uZmF&S>`1fr*F-VJ_DEgTy%b- zmG)Xy%;o#7MPYjFarU*%C$TBW;>xV{H~(UPSZJMvj&xQD#)KCsx=*rd+8HY%*0)n( z$=*ZAjp2WGq30a=>TTFJ{km$Q#3uiB_9S-kY^{brZC^@?`s$H7;m22C{0p&Kls`an z0R%kTkd8xlw=LNpldIOiJ{g&^N1vr|R;3`0@tDmC=6;VwuoMc{6_Px9{1_#Qhq)B1YaP`k2xj4{oXj*2IT4ijD{_%g&1bi)RZ}Grox335)!o zuv=R$?~Q;#_`RetFRdpHW?Vfx#-f&cruLd3#xK-WibM%K-Qvwz067V?x=@H|dROY| z!Oc!Y{0V->gZ^@rIH3v}J4m`IJU`%kqwdRAn|w9Ne~-a z&OJVwjYe+Y2|?zVQOhQH8>DLr0PECbSlr5^~#k z$Ekj*I`P#>1ebh_O5y-115Ai%XK<)p^yr@Z>&04$95drVL@Y`Hu0sN`9|8JAi)p;8&B%^}-FssYk$wf<_9sTnCGQe`U?2C+n zoDW0^#^K8Zk>eX*CGOTc1Ce1}7d@C=k}aWdKsnm{T|X?*ByIgYgng|*0Td0c5LboO zUfQT}ewKSh05&8Y!cyEzFNR_RTWM{1*y0G_AT&dvu|*Ivek1mW@H=$Yt{M9-~2D!(EF%uh>KsbP7kj7_vt zzN`qwfxmGVA+bLkS_6KA)Yg(F79z=ZkNZ*g9w4|aDieId(wGF&&JXZloK7Y_a0h6v zJqt;G=&eV5%2;2Z!{7Grn?NrmyyZ)PFiT zy(3tW!hhFh1Xv8*3kT`@tbJIODC`@G&p#i2$7xHPR{0Afgu6SXW(+GlS!8Po_}f>} zEvqx+DZ}ijV_SL#J>s+Usr`N{B@uAmse2`xi2p|qE`eT3S!ZI5fZDhU-d>YSRnM>T z$?fu6y1Lr1u~-``7iC;bHC?h9o?6Ba9%(setT}EoxsW>RL?%j~5m>$P$dX?iO=FTe zk~BO1x6B;sYr39RJGm6M(6Jx_lKrhY>AIRGh;R|bieEkLv@yzXfV7?`n`tQg8yi5z zch+5+=4lVTbt8&pY^iU(+{PAv(vdcxvuR>P71vUvKQ-}tN_xv*ts9Tmm-l&Kcru~@ zXNxY%b`iFuMCa|7K^Kl8m)B^D#QF>Vrt(Xsid;v(jbU}vd*lw$N?!m8LhZ&Pe{?jc zE&_2%Z_j7;C+l||Df;Rom?#wAff|R)B1pLGxo8J}V?U}C-#7?VYtQUh#bbNk<|#|5 zT$V&QB<2GFcK5u*={4nV42kDqhMUW-@4A$5uF)7oXjFJBysr!=k=?5l{VE4{mN|CH z&_+G_Leg4od3T!5pH>v#w@P8O;*EL#iQi!~^Psow&-#PyFaI5+TYpX|V080M4p?|( z>;^mGE)@dvkIf1^Huc#93*6W92<&o+mYbdY#1Y5k0vfgV>|kS+=;{SA=dyyFYkcR> zoa;A3U1|ID=^Z^&+f9|X6IC>Xg@4d*AcP_5s&s{wAkS7idvi1=<>meTcEe*+q6&6K z@|tPV62`0lTl>u=1p)n&uUX~Q%dUao_Vez08_N{BAL1HitkDH8w{}zki6#~sRkNx2 z0BNPAzSD|W429!qS_g8*!Zr9pFQ^8X$8ROp+M5rPK=#LnN8&J7wX`@_oWa?Kfb37Rn=Dd4>(`!ZcJ>@42_Tp`3d@7uawKC?zT9NgNC~crp`&hoZ&zRSu zz4z_w@8@r@_mb<76X-ez1^}j}cFL{|{g)QKC6qPz-L8S$kehp6>0X_nM!vz`bK2y4 zLaDymhqRAB2Y8^l4D7Vj~Bg@c3uy{RdJGZVNvC_m;^MiP859MqnaL?6xnC7pOJgOy?mUPZlr?445K#X1^dfL{oZDnleXfa-gxuIIMFdymLN4IoL zBKDsWwt;f#hhrP5kXv_uk{3Y)eE67&MFPk`CZ98n6^2v){$~zNaqFCz*9XmKbsX>+ zY{(=X-0$DltU3VAx2&>4UElC_+HgL3<+d`KK)I-DrG2f9sq`x3mtPmtcxQ9vlAar} z)Wo$32NePOHwDP6D7w-S1kDAn$B1kPzlTo}+*7I1^jZ%7rToj1ar6Ed(N(H~rR7|DJfy~1-2V4ut zwUG$zIGaf2e^f{bE$qvhk8s>^S$D77i4GO&0R*G1>-9%68j`$VBULMUmlk!MThqW| zI!VbM(}*?IyVyrtg+mRZsD`wd`z{=wQ1W}>sRDw#Agr&Vmz(Lt4f`;1=5gN|QRN($diVQ9k)CV3#uZ)QhcySSUfsUK<8j#F5gA*qlRS%|qs%}# z5xid$0GLq|JdOYi@|+GWf_biC!q7$ZQ*lXTZ@`q@5X|A>}dwV>h_Y?wuJ7x_bmizg7~hZ}h}A?o+rm}L@y z3*>-s{*|*w394VpzlxJ@cjg_9seABU3P?!Fibtf?t*CUmj$Sy?VK4?-9CR|Q(utz) zhudvk7phwN8m)$}m~uh9eg+szQRJ74EjPYeGNR7&drv}N-wa7CbP=1O@QtooJkz>Y$wT$k(-zAxnIzpm14|X9#>oI zC%_<;bpKyaF%d+2uKW{G@66_gC{Bh62Y)HpgzxFo+;2Leo`l2YJX^WGFt@K?0|VgE zd1hzB%lH<`90=aL$~K{has+MT6iQ96j)^0b<8vhTk0})3cIM&toE(Ncm{8F{A3z?{j&(f%fi-50ZYWt+^?GzOF-RFXv(7KUJ;cO`}g0 z{tYQazW0L^7_lla*qD$rHTnCWv?H`{?cqN3D!bt)k~{L3W^p-3^FY; z?%F;7E}P*BDoPqG`}&5+2XY*wH?s05!m~|u&fDNrS^b&OY3;&vIKTQ=z&j3pF z2me&2zwbu(kjThXAAg0sGZ9&;=)twQ;gEsyRh>13=a2Cp8V0WbuUgLUb7|tl5z_c( z?LAjetQabob80WDeCdc`!YTJbnG1XndYp^*(9O!R%LJb!DC?^%kVQa;WvJctv_(8= z#I8q25>*dFzw7ov8=+s5BSG|xp%rwekQX@))f`Knz$4^l`)lZ;^}MhKH8kUbd8??% z?GDQes!#oT*f2#k-zE^CA!n$v!~F;td9vm`x&#Czfdcm!vYmNVT9=Vg}QEZA3n{M^5gIzSSRj z(`1v{m~Tj&(ytP$pz8gAb7Uz#pGwfcuYWhcR||3?Zur>xt{oq8Bx3XHME``|M8l6Y7YwdAlB$f zEw-%tUNP+Dm@?RJDs_RyLKT$5vVr_%Jj@3^GpVmMufS1n6Gp5uc@XH2V>@B z1GQPHbBWy;UHZDj!WSLdxRnS{bIvMw?l7f)bWs*yS1DSSH)n7`b>>nf`LYO{xSt3` zRn02#A=CT1mQ7FlKbiW;?O0Yy*L5W7R6( zO1dCcNy_3b`LIa-%`VF1o13}Z-GSd1$JVq-UEUA8$!d0} z|A-y=gOyilokrJdao8Bo-hkflk%zII+eZOcZSI^d{iv5&K$R41tJH%R?_)VPq2G!e*aKt8PvVVFmS$Z(C{z53L#%i=sczDVn~B(ZC6H=x^^u%z1B8#h&f0N2gATzNdM~Jk z`FlDvk>+x1JrgcBBd^78=AuQH3|g??$jYFBta-lA^xe>S|LJ{_S(l6PpgBQkdP-if zOe>Qv9>9N7xUEOYt_w+C*G*E3v z4}Rjw2X?q|eXu%d;*?~Zv#G4x{Un{O^*|bS!%VF^6$Qkn8<{arz4#(P#x`9o&Ccog z+>UK`t)9;xz%+2Hs%~r>ucZS3wkupj%+7gM2XR#7Z$?Z3PT9(&83YS(L0g4@x|g|( zOfGcbmD`|Xl?}zG*NN{+R|@oX^@UvMU@9F0&L2K`=|NKT&|Jex9Smu#e>t&=e;U)D zVoc*D@~X*dKFfWv^zh6>l;xn{G|QSsjndFEUM@gB@qZKnNlf8U_r|3K3y!qcNkrU8 z%Af6(*EVSybh0Qv>D#)oyO%U~Nlz-%0A|Tu`j;(q;U2@!4GiaO8-@(2o))kDtkzNdL^wb`)~-z)j>w~VCxXYwJ|_IDu^xK^yX3yv(y z9(RvJm0U7OF)m?@!7X3nWlI-D4a{8{L~JW*bGjo)k@uIB>)>v&LuQC?o^eWBKX99a z2}m+w7yGtP!j5+Q^`|>Bu@Qg38Wbhfrc!({nNwiO!dI45+YhQ!vX!BIxwEwYMEiLw zF)NnqmP-!f8A_`~b;Ws^J{t(i;4}c;2Er;0IUi;Ci);eM3ZM{X6e^&W7gA|KB^GG3 zj`IYBA@Oq5YkvGOZxe#en#yUfk{>zT)gyfzCN^ACemI_(o$f9}25WPLX@ldlgg{w( zLXy8XUM{F8qFBCsL@7NeC}0(d5Mj-+ABwDezL>WpwBAvc^9KhtCd#Y5Jbka4JRWT- zKZM#vO=|~VW?aV-{QKHzGhZbVoy0zc@THBp>b}u$f+kHxD%s)hP?gXA5O!r?)@+KJ zZ=PjrMzd?bfG4EO6^j;|5)oC*0fLU-mRe)g+%RkWa&{C^4+D^P)#neq#<6l{etoLg z^~tLW0DH@JPVyt=jb(K`81Fx-T02HVgA28|%iGf-K=tjG6PElY%WN z!_sG1FYVWgy=_`VkDF2srCa`q5}Mw!M+;g51=Y4kQ#3nrf zEv^TEoKmGjX5q_pR({wMtI5rkGus2*|BKDrq|NdVn~99^d7c)0G2(;Zbax_jh*~=U z%Y6VijpUvTtqCOys}*#@Wr$*qlZTd?&ou0BC5TS_RNY+gQ^0V|?>{qEKnq5}KbV_t zU=na99vB(G3xb|=>A{#&4_@$!S8^vi#3jsxUWpG3LMQ)Z2dkd=t9BR~;3>0*Cx>af zXm(7&qGRA;`V1bbXI;-Uv5{G&q)Ysm7xCGF!JZs^hGzX)K30o-M#2H1g2|0r5fzLB z(HEZ!`oZSP5RPtX{4JxwZ;KP>u!`fGwX>jW7qP>kI+p3z1Wv{P8^i5ygQle}Dx}q@ zYU{u*M`lOpTtU84>bm)wmOh^vr@rtzKn;Gr0voxpc5OpzbN`?AVVi5tciOX}6sr?0fpXvqN z2iY-x-xw1(?LAgpVImJDTdqWsO8yDFcdB7-ex*^_fuM?^Q{{XP?j;5s4xNye&m+{6 z2Q@jkTsmy{thy^nL+E>`eFOEz_?36VGd-%4U)oH}uSG#RVcmK^**8C$a8=FcEjnbx zaN#j<$}OJBE&X&L?`aFwY^PVv84~$25c!4Z_d7bx-*^&)Yeg~+sGu%Dv;;R>^q9B}!>XDrjd#9ym9v8vbGWval!Js{q%u{m zG?W&76<6?j$fOivdcB0do~y%wk`=$x%34$?A>}fMD~wb_&bc4W5l$4Db-yl^*$VPu z+`}z%&i|v}rHby=HLxNM;qBJ<6+O~iwJJ%Kw6$s*Hy`HYJTa&(jJbDv?4%A)5kp8O zGQO&&*pWRa=CUr8W!Q=8p+vQY(Gt@Rlc_e-W^lguhc~X+OO_!kjuw0Lb=nK;EZQ|E zF)@)mphb*!72_~BU2rw*)!f+Op0c?xw-;jv;C2B0g6tyIgeDOLeIJ*Pm~fK&LUsp% zb8L^EERpisa?ozd>|w@z+*^@Xn4UC)y|-Dp`?}srBaWE&?A;%~Kfh~ychv)wS_jv> z>kN4~$Pa3djQ`I8WhIXCh#W$Hwmx|vN}WZew*?a+!g&r!gjr?hwFZEXE}ur4Pe^!- zX|1#tB7}2CsY1_9c6PwCC;m4!KDy?k4(dmGIWc0%N{mVZp|15^Um^s;&!4zkDNli(h8%6-;mTfRHaLtetXaQpnLNm zOAagboCaqr)qBiJucIZPKw{*CG0G0=>YkBl0r8Gka5eulmc}2SiXJ450_r*CeCqPo zn{sPa^ckx~tAs=NvP!@fxZqIFLdZucgMq@sV~JNWnNdna8ud>c(Wg2tUT8M5P)1ZZD_TQXPFuKg0sgrrCkK4m&35CO6p0 zfM-;I4;SOnLK&Vb2U&g;cxgS}z$vD7t?#rse`8O$%%ph8cqK9uh;PJDQxvx{iu1|G zn-c;x9D~k_A0};|op2*{P)>NiVy*8vA|GMlIl=#>X&OkQHmK@WJ$qH4xzphrbQUx1daKRUE^2wHXE^dG^4Hn|etgxuHBD$A%<${4H}?nV^cfv_N8LJ+O=eiV=@XE@VPldVT^224$pl*7>&f_=hdWmNOK zo4x=sFn9g>ZMZzXIEG|;ru2SJ&sLL*jd;HmbxSiEn`0=Z98fV0zC=}3N*E4)b6a=o#jk<5*woo`z>I1KSr;zje)il0&xq1ZsLyBn#NUEzo5Y*z}v)5 zYY4{ogu>e30G2R002l(EIQH1I5vpi;A&*8sC+kHJsn#Pcclk z^DNnCRf>_MY-E2Bq)A=@aH`%8CU(liVy%nU)}o(iUmqp-y`ZI)<2jIn@?8GS+4H48 ze+$t+4-|}yv!U}Ne{~}C2K|!)B@5?;8^7fm4Y}&H&=4!AjfGM3RrEW9A$H1w*b+}C zqNuvaC&mzmBt@8k1#Dv$H71~bM@$A#x_o7%@~AS34UYs+Yh{e>Ix*E1i`fz%QYV_@ zVO4g94YXOG<*&>^_1GK+BXzQgVJmk##qU*Nll2PO1-i(pyrHEZb_ge-3}V^HSkc~t z-OZ431*tAJezopy4}W@H#(ipAQgL71ly znMcc)!wOC^vQ=8t322`bZ!a^SPIC$-9iuFo4w!_r%sF){uw~y!JY;`qLm=kg5VqDr zi+bl8FT(?4*;#PvyL1mHaC6>D&{{y}PF}=;*y-X6q@)CxN6y)>W zqA=W&xmV_B&0OH9@!}l7IBbm_ER)cQwFWXu26$h{(Ybuggj}~la!6UklL`_=^)vL? z#mmZ>RZubnJ9lp{E%h%iNi>=8V}ygwmc37~}2<%V(7YZ2W36JLyb2cSq|XQ6a4codgnf-2e^> z=?H>qSd^7!wuBSShK2_pu%$h}9RA*S?z+k%K;Y8B9^dZY z>~MoGqvP^k0GYM{7s4Ht*ukn~`mA|U&yansfigJNihKh&`hIgmrVilR$u5f0_t{Q3 zVkA=N%C8wt=q-xm>Zyi&OJH+F^-#*H~%G>4ZNH=$5VHoRMd~j;36A%R962>n#CbCiJt#9D2?S;SzKD%Kke%IV>ow z#s*!L1E?SJB}N*7DkE1543@E4x*oNn5kgG!N<$w7*bZ7)hrygfK>cxz3%}JA(EW1U zh+H4AI#rgAx*bbmMGw2w2<#|!9TqYktM_YkF|axsUq+SN_arM1@RG z?MNF;8MnPLwX{b)zH$FmeDpS^%WLb$&~xO+5lRimTFLI?aj@LiTjh!a-d|G1I66M+ zap{@?{XYCHFbg~u<7^WSLTjb;-Dp0%8W^!CVwb5l*-|-F8;`!mmd9NlO}ooZ#_}6` zqCvk8kN>mDyfPD|x*5`OkX%Ce@1EM~A?&Icdfkjl&?9ergl$}K-n!?%dKU40aY3n- zyLjuryj#Ml&V(es7)YSwZ_S!omncS?=LIc*Bie9JytzZ|VUvD0F(qlZpNky^Cys#C zUF8B0KGi2Ne~BZTLE%#VK5`hLIL;DY)e8?8hclKZB|zvhp@o=#gEG{bsg#RIL9k=N zMTf4a&g={C!SlRU+%94dVGnJW;tMv#pSsy&*51i5y~F4vAwh%JJ$Xwq7cKG?KAk-x zr$~*0FCSU&sk(9HM>xa(OkWs1TQ200%FXxKkB$mxTy7m-~du$r2y(rFuER5d|1n^7sgy*t`GvYzovh_jFq)iHiVb$qu-;7F{^|1EQ`brxdCPV%^Li5HXU+t1+2xba3w` z^m%FG$~+=(d9Fd9z2rCeJq&`64yw}SEiH0Oj2w%7=o)2mSYl~|q{?#Tj{B8dlLj8J zwu(M83~f71M*}F=OQiwul3d7{AY_A+4@Y!fU40>R?F!oGs~z#keB#`NIEIRuY*Jgn98 zSpGzT_glmFXKk7bE~zDo{e8wSNHW4#D1xrq%lg6m8pNVdIH7-~kG#hCc$5Wgkk?E* z2H{|CMhV>23z7ko~c-iMi<~aRJj7BR&FPAy#s!0rWfkId-|Y4r)5{+AZ3uulwQ;(*aDzL zCnIU6QAjkpCg%CR_G}p~lpeTIX8Ue(I+mEs8V%!4?2(E1DnP}W(R_W{FJvNU_&QFJkBrv(k0{%!(eiA)t&#+m(aP*-mWvq3kx3o_s`;=Ir%7z^y;+CF+? zfsVvc#{xileGKMdd%ZeWRU`1lL2f5y0L_LF%qM9?z3L@biSJoI45VvEps_?_$}!s% z7)GdMCN@@~D7#JVx$ekdVTG_O2uHbAGrl6_oRku`XO2l83NVzC1P-|!1;=q0xX`c2 zfI5TR>}ZO=CzJ4D<<gtPqj8m3EZigNCi|DE==LKfrc@G?$}*y0gJ`!t|p80tp|38KU3dI1Btn zPwqp^2LFkuEssn2J;eyqGF{x6j*rSVbLgm zgDF+w?tE6ggh=E&>s@MpSo&W~MLCu<`I$Z4`%_s2EZ5C6HWTh#($!sp%2T51E|ooG zy&o}ku+bTM9YZ9$KRaYCE12tz@V(9Q!mflj25*KS8!HkH{x%L%SMl(TIaf&K_@gZL znr(^f+kcL3J$+v;3NMPlnkT-%Ao_zWbnvT(`<+zoJ8T%ng9{1GohvIoTSZEC8hVv? zy+I<)PHCwk0Q`F^Y$)y{sVsS*WrZhIQ^B_@k=X~BD->QwYZ5W(kfN3Usc?{u`6?&{ zwudSea*D+Bygz|Iw}7PLp>r!XAhXvHtiQP5WC}*<5_5{K7E!R}j@h%vm6nqbQ}Wh; z)iHu9C_FmjO5!sDI%(h{@G$V6ztm@XZDvOKxHID12-8I{DK#zs)I(MU5zm5{zQ`(e zN+tvaXopZ|LxzxOiU_bhi?w#?cn_CBE?>FL$kha33%~%siw3%}OvAF1=v=tI$r63# z-3l!>A_frC8XSSrFFpTHS~AxMmEa^VC0NP=uQiz&*QA5`0y&3lUEspyYu+MIS0G#V zxpPei3GTy3Ix}UVszx)MOOfS#d%&9o(IS+m26Bn<2h*{AC3Nu(GosPGCp-NZ%G{p` zvAzoDh19NVzkJP5%PW%M3O7pishklX?)Q3d*CvVCz+Cw3qOyfa# zAljLn%D&K~9M;k5Uayf<2bXa(1qySmfaWAy|W0Gs`$n~6Fp*eGDI+Iuo zWcRrgAb^G32FY;ILo?-%?X-QjvhTA?ZiKJj1C$Yte1&yQg!Ba+;h;i@i~Psc1j!jf zOtv8^m<{vdotfVC{zk>~>#3$$F#!FHDlzo+A%1xaO&69j6;7HHSZLc&N{yQR)(Ew2M`o5;AcsF|s~j(;voUCntVPsZ zd~p+SaS?X*7BXS#a)Q7(GFHnra1Ga8A)9v1hIDa$Gr@(3Sugr?_K1pl7wBOp0ZzZ4 zgis)&jNi1ENgn)x@suYLfLm>7d5)#HCFcOHG#;VvH+Pagamsy(m4qgj4JM~B{&KX9 zml89Qh-3R6gNmjBrtN9om)%n#M_y1LB%KT8VB`<#ihFWK#aKV&1+YBz7 zPMeS;u#9}@)%8y*s{~~JTFH~6RBg6i9sHg&2-T}iUox|^d3i{&O!lwW!&kIbUbQfvB-YE=r}BqA0F}Q7lK|z0$+B*b72hu4%J-`FT4k3|DrsF2unv$ll-0#sm2mrsa3 z(M)?KNCy*j>#6G^8mQ3R2mNuSH%PK%Lv4$x`@WSBvd@R9x!KvQ{(!R$a39cE#)HIK z9(-8mPd~kGKXPUYx=@&;$05{w}e@Y1NGWup;eA4f#22jSn z*#mSGaAxue*S1ab^W)jV?%iSwww2^0P-N82snW+S_0RlF%z_%3c)7ex>b0}kfO;{J z*g3+bu&ZFJjx3dy`b4fh-X?X&aV`=YKsG}J2xj|LW=f~lxmUj7Y@qKaja_*-lwH^t zg~pa8lwvGd!_1grjNMqWXNkzt7-I=Dni;Z$$XZC2vSxo1Ldd>kk8EYlno^==>>DP^E>ys&$)m1?|z=^nrp7-36GK_K4ajf_N!)23C??4Ge_#R@uFAJ zth4`Kf8NMX&}#E^8eJPzRO3o`f9cd#&ah4%NM|PlA)O%{8F*T6`rLlwt4aEuclXmJ zV|6NWYURE<#yq)!aQ$YzlnxU0y33&I+WuB+@>HkgX4dVQ^!JmcF}0EKUNEX5H( z(n4apH}5_!3Z~n7Kkws#2*E|o%sN~mf1=Nrnu>!t{wdVMP)l5ER;4q#D9K0QP2NI# z6&-`|vSC)T!%`Cknf)<7J3DZO8N99gV#&|!TYAm02SeyBL;Y~&O?x*t2f}Fi;8riF% z`NMSI2|@fu!DY8{%QHgEcaxd?%ebWQJs=nLkvA8V8Pu`+&R7A%RDi)8&Npc!tm%9YQmueLZ|T1>4Z{*?1wowr&%zk&dWLJFH?(YdM{da}(;mnqbZd zmpp3h55JmPNb8jg2m&n`O9+EGA8?eW}Wmy)107OxbNam|h9qYUtGV4?Cq$L<}-bl&dO607aBFmtS z_`Aid%hN@)d~x*FEt~Vr7Eh>XH-dErq#*4rlF}Rw&Mu3`VLJF zsjcof=2j!c_-fs0t&`Xi*+p?1PLE5Lgd#>GWVGFkm~(=wQ(_9iqE(kVvS1#!v-NaU zq?I>{bUpk?(-?{P34-7W0E!EsT8g;PV#mMn+$=KU!`B|RD60spd|@Fm4XT5EmsQi( z60TaDrIXl<`Tqn5#+OHPvWg(A-Du`eU9& z#i!Fq-9B2?efzsy4oa6~Bd|L!UI}aF8F(KRE++CjNBGJw;_$C6A+Q8WS7K!W*32sehA>MKK8J%&_DXFbjS06-N4StA zeZR~EznM+gwd8x&iVS>O^8)jTdCKL?AvB&^KdJn+h+f0J=~cMHx36?oq1jNb zE1IL9JyA^bm=Nmi?zWzwtt*0)Fu}cXzMdV`SOlp@ud59Ds;`{OXF2ZQv90tCH*dE#Sct-NNDoQT#T0wXhM>-W9!s5kwBXCQ<0w ziUZA?%djENIyPg6l^UM)rC`2VuF3v^{9chsv(qqxa$)fiIB~1UH$T##vnXHaY070}EYl(S7OnEW!_I{&PWtVfuj)5Ug+8g^vd$z-o3Q2P_;b#b=VbkPq}4}p(VUCI?x z6~Z|}C!TcoUMg%b+je(qR?VT~iegy*vt zSW}UJy;NaebRl!w{bCU*XeFNko_DU)!U>gy`Ihzmto5Zw*&g@KwUB?Ampt4N1U<{L zOn1*i?-irG441fTvZ+R4)f#7`{G8CXtaI#Y@BXt+V(B1U__gyYot#5{`v5!uajbm9T$g{uY>o*%937gZPz3gL{L@I zE`+)&a+s19KGQ~?2^L@HxiZ$m=QN<%Sg<_{?; zuevo1rg}fjYt?MPRc;?S!_0b%AujL?M_#N&*G=Vo-}&=bJTvq~4Ww;M>u4?ym?>G) zw=G9~7q0~AZv17V5t8Ba@m;`on2t#e|KLV;q0)Ydh~4n91jew;V=R)a?3^rpYMzA3 z{eA7{1Bn?i&k=$C_bQgvOM+OR_f$cOsYjI_lIo*ji6#~y9LH^EN%pspv>5J@lNn5T zlc)d@L>hE5c92C7)Uwd z0hCzt+CMd6>(uR+6QLvL!p(5m&#h04c;n1Au;U>@o-;PU8%&A1u>5r}a|4n?3h^m1 zJ_^2Wy@=Qx7t4%(?b&~4IVgX_dyQ(CU)XkVG#Ht(Y)b`^^h;nFe0Y5?^AK(6(#^;( z!tZ)U^Kwhn$FNyhBMX)p-oDMHZ|6~Ws2_E)nW@i42jlM*fT*^)-#2i^nI*4Ijon`0 zDT~sQh&VN;dwWEShDZI!F42fwzCQMTa&c7eLrbNS8@v5U`j-F$)vU_^@OJa-Z z$tRuW5+zt8HYUv;xZ?Y~iX@|Iep!nY!>U+MAx)vClaAs6v7_N1%dEzElry@V*0-*t z$%ZSSw?1(1Y3pAo9^yPacB5BU=_4tTp>IL-nD!IpGqF%Xjc@EuAMaa3iW{|n)56D(f+0>B5$NLplrn)?Ps^sfhU3|J$#-c;(YuW{ z&uSe@l9W{4_B&McIQnj?lwZ2}E@H}K9L>s#6wS__$5QD~-cC5ZqGgsr^0a3ihG^US zC=b)x>yDqIdJ_ZNrCr=4-I7#0i`PEl zSBt`m-g2u`@f(|IOV~d)J|@3z7l7;c zS=o{7V6=5UljLabP9s4OS-aU^4 zjNF~j$8)wD+|aT^d9AzdXm8S`;~Q6S7Ls3cH_R6l2+zx2n7kCv>}wmbYBD5|^!=V6ohpSvJDHdvt#lw_P~Gx-ceD z8puE06v8@9_3W)%cL$r0 zMf3Os5a>+)uR5+d9i7|K0(t1&uCey4Wo!2Js{(xTfE@O%Q>w^3>!jLfIsbr>Nv2cv z%vO0=b9b_wF8!@n3Rbs#EJO3#XI+_44%M92^7{+PObehlwP#;^Qw$|R`}=AZBob~% z#J@A>lI7s1e(Vz(LYD_j5aq+OAIeO{TGTl?a=dep+BC0-i3bKCUBYOs}NQ`W0j z`X98bZ+F5CsQjej^>!GF7iL%*7j?QeSQsh?KB|-xrN1e6Gc7juwOxT%`4ctj`wcoSW1|)+`4<%Zn^DoQsW5vYj#cT^`(vMH>$~JJd}x}yIlmDMg%S5 zeDC+H51m#KQ6i5bNG6_#QwJjMuCLRmkJMaf@UQLSz=!r2+Z=}-qIUD;&mozd&}Xy0 zm|Y=Skx#r?||@NOTJC?7waI4Pvw!O~?(U8@`I5-6)hAWvFhuh3Ub+ zytrHP^s57Ki1$#5*I09?f1la9g4p|5=N;a~Z;ICpb@=<jRy;ID_)E6U|3SL;aWW6 z7n5G^A0PS9gr@$(HKagPT!Gzm_Yz4$Ue;~Zsk#t}ODTUWr7 z8eLrQTZ$=KYv{8n3qf07T&kapTprtC4f7~fPjQu!J9aUoQg;6 z&C5>R0lbTu$5=Hsd0^dX$|s_W{R)(xs69_;u{=dzZVS)VFD(RBL67Lkn`rboQ!S0@o^ThseiA9 z+0g*E2F-a;t|)6)i3xqOitSF(n&Vscuxu6fGV)rS?um`X-<#_Cz z<|?zaF_Vn&&?EI!F@wd?6`}xZ8UYOmOZftRYTb|HrHw%83Ak{$r`Gj`Idy0!o5q z;Xp|U988vp3>YX6fdXv~Y;=eyHy5-T*2xV61RWr11T+Rqq?jBGDZ5j~l*I=C66J9Y zmCmzI3Vc^G> z>L>z+;_Ckh*#%D^BArqAp8@Ejehy`R(eR5y-~r1&>cqmuf#^&i(?gB~C?g|t&_d+k z2Q3AILZN^3|DL1Za%4No7y<+Rvt9-+|BpRoy&ME`U_1nX*1jf ztP7=bfs|S|q15kB?*6Ni^C%|_;eb3n7Y9l_vXmu1GmOM}l9>bl?f~)|`$+QbhAb_h zC>rI0#{t1oa49%Y)R{>1KuAj;U`}|HhqDWsAcey_iHRL-a~_X#@I;euG5_yXza3xZ zx7~lJ9El>U5a;y6H6<1xCC1-l_Qqoz0U#h43IP3c0cBw@85qzJ_ydDM$S4$*DnkN0+Z$UCk6w_lO_5G29uG4{~H6rAP{mo{;&hVpyZ1B6NAZsVE?uw zWAJ~Uk9_|B_#^09n+pxv) zpOb1=eb-{v{Qg6Aeb)r7ZaeTiuZS}Kn}hiMaKD8{+qrk$-_kg=xbfum@3=W*z5Q3o z(diGYod_SaADQSq*!_=gOaRxsO;C1iS{k?7o-T~XkE-hEU)w0rV2vn^KCn5BY?$o+ zHMGM$=I9>mb*P%R+PhQhTJG;vDu#16yiNTdZ#knu_FCxu4#30e-}JQaJ^VK34xCik z4LearjIC8yo)Yg82z7P~dpXBptWhuFFIYUmbC8_tNb!Wu&uqAInURehmY8T!L2Y{k|yt~dwFfH%d!{Zg69%t zSK*&WOroj8&D~KZ-Vf-Ec~o4!UW5W2u7?dul6r`eQTIjO>vgequ78UzJ_p1(U4L73 zfAn^@+SR=|zj^5M`B~a;=+m6~92S^5%N#BQ!xFQqg$|a$2`Y*!s0iUJ)r%*w(id~p z-+y#@UT|sCegcz)c3l?A9$Q(uD>P;8-U^)eS8VRVFH<<=7SRs7ZeH<0wYd5HdK5fo zoQFZ-AC#vl1(Vn)FfB*_bd?&>Ct&|$TYuAxxB<)Dm6$Oj#7_w?H*ggr|=Evb=X<%b-Kq3}gfSbPV;s*y98#9%z8!3>>>8piP02`*Oa1I0fQ zg}|IB9abBDkIU>jJO@Yh4g2GR72Hnd;MY!-w@j&(2$4NfcYi+d6`ueZT2EY;3~|pR zVl9Zj8G`1yF8C{_kX-`Q+x9S~=Q{w)d!+-|8`#@*Sg+jWGI1k_m(S%7{&Cs#E>22Z zFP~jOo#T!fw@2;@@1y0(JUa-#duTrIANg$9?~rDY*&eWdnwKu1<~~TEy$t-6H|UT! zg+K$g<3m}y609X1xdX)TLBJyt zaoWW7OWO{`h(DZXL3a$zG%KsDB5JNtr*}e>I{p<@d-M$-Wi}1MH|7$QmB@yv!lu~VBVVB%#E0%uFc|}##DcFKfkV?+o=_89Mb)n#z&fWP(fm@ANjBZ$nO&N63Re?{t z+;QHv6xrZ|>*6wX0+LuNh#=98M8li_0go)kZlTzCQ5b_sW=G3@mC#{viQ~={R#yg5 zceXic$dD8#e!WUp^jU-gyd=)pMDGS~+NhAP$0>a{(el8=k|QF06!KaOeGn4UV@Z>I zRL@*lgqT#gtO{t@{sG-PxGBYV;N`UG=EZq%)r;g+V$#?AILT0woNF3i zdtn}n2f@T9Cm~QQuHjl?Gp!mIRSzTqLikrB@aZ2up0FDg z91t_9LlES8MW|OC7s=P|{XL?hrynej!E0nDMssHzI<>SQdoe<*u_rUYF0LSy?ghc& z%?52lJH|A^1dGGn_I&LpoI-}i&#u$P@PXx_yUf6hR~ZpKdggq8qGah_gPk?REVOU( z89NLQXN)gGZTAAZcA`;d=>?Ep+W3|A&R>HZuFN<+E$#vwM#qxT5gT(Ct%bTGY$&TJ z`V!^_@SPZRq->>8L;zO03_PDNxEJ8K3gn?TrOkxEN8}4fpmjqu=f<6CS1U$q(=Ysi zwovQItJAx8p$)mvfPnB`!88tHlJ+qNzntlP;Xp4Fo=rh+ZUV=vQnP2dASguZW&n<8 z4#jRh*H#%Jm7kaVACrP5X`vrZ-u{~Tb(rDYR3UsDk@^w?@ZuXzS2#}B($QN4Tml=;mzig6FU0h_UzUQzVj692|2qlvV zk>5n%OJWuGrC6Ts8%jJ*^%3y?g5mZK*E&S4^}+d~ZtOm-?3N{m2`Nt3?a3mv4D_RomGWyrYVFY=BmzP$>FfOD4g`> z3gu07Uzf=;>z~C(d9e_xZ!IGkZhsKUvLyGYU^0DSvLi@4}^Py_OPlfM}rmjM2-AV@ZzoqXBK-*}r zj&yo0!;!rvDu_3U(4N5N^i!asfi#z+r9!OtH5VBLNKoMdC$JS#9-#wUUNA>z z(1FdXeCy20M+X-Cs8D?_0+lZZVeZ9IN+@qEJT@nkr!>$D6QJ2BHN$p7T8>v+bXnmK zxGwRqghn5ql@xlek4io~LQZC#gm!F6#F&R5jw=wZ*Jg$D*JeTH|Ffjjdak!~E-7{7 zv&^{axwFcLV162AiJN4krHbxp4LKl}D&J{9C7YGzSB`+8D*ckp3duE8{P2AMM(Vf! z<5kHI#%M?d#t_ck{mRw7S1Z!jQBJ!UfadM3HIBOR!|LW7zEe*ovnl1P#^O> zW(1T#L7egz{L5pDK{WO+uEourI!kja#`o`1UrC|oou%2Hp)Jg1;T4JnwA;7}e+G z6YmWyP5`b93<4DP09Eai#@F=1Hl>L@Ty3xD^w}`79?{I9sgr=FQm-j=_g1bcT%{k9 zwyO@2*cZyJr(6^^VR}A^au#`Vx7Fc?!dgwDdUW546f7?l@Ys0@#ylTl>1Uf)9nbo6 z%w8EWqA9;}JgKo@NB`^q5HAe@4iK%@tGOgmh$Y#^!+Ewa)&WF>aeVvPXtdJ3;mbOb za>03Mu8MzUOF3W_d0oPk|Lun~*E&bjkol241SL9ax=d&=`_IlF_ULLjehSMpYE}wFmg2 z)N(c8D}va-;jZb}^_DgH%(6A6yMow-QaGI9)K|de3P)V*losXbAeRM?5rdu5ysKdD z(5f+7QUk&B!l`U_Fsj%u&J|9XD_`48FKE((@w(5ad7`8;o!`g_ix3SwwjhZbgUG=i z6iuz!FxU<7iihE=#(YW_CN!(NI*iHrG@BNYEMb3?V77gZw`3^7A0s_bhLdUyS5>p- zT&_+?_fs(06N+9*LelNOr%ewB?X26>yj3CJnhtG|eL)Njh>4+Y61BS9?3R^RjMhDSB7N;%b;EPo8PN4{JTlUMW zxSe(|R6S`UYSs2E85@+#)F`@c!p#hSgUwt_+Nl!d?fzSknsz=5&)_MwSbGJT*VZig z>QKaxS8`c_m>+COXs<-h1S;>H*n{RwKJmlKi7e`a3IAt7Kitx|&(R372)M1%Xbw$WuhUDo3z2+*N*% zoz@y8T<~NWJgelra*ar#Ck3k32TV;=@>N&vLtszTl=M@t2X&$#h#2|zt0@$*)1A`k zWkkA>jHPMp%!LjU^>l1CBrE)*Nb`{RT<{UVe0a zWeKX}*5_?=5*o(FrLQ8b@@V8D(hgCLZu&$tMU-eAkp-SSR|`#a{G&0qBsAm0t?UYi zm+lJKL{yY4ZdelH^R@}-nQ8~-tc)BKZo#a~=mZn9#7rJ5CLu>!*hebj0WjGptJ@4rz`{)hX)k$*T}o?z#4YB3 zw#b_Vc=#JV9Xny(^!AmHN}Q73t3ta1Z)%G3vzfGdUK-GBwqsbVtu)@FRIFXN=%mw{ zTD}RD<~amt_DwYuk)%=KFQwBI6o_AiOg~DI=!MlFod~|Z`i(4$ldvOWak_@Eub#>h zn$ICJ%XA$~y&K~QlfK7cCc1@d>>=^y=X1jRo*JyrxSHeIR9+G6B^@QDz*o~_g(~|Z zY|AP9_aH5Q-&4a6(}RV78#<&1C)sIPD(H%j(O*6gq_(Xu-KQG5(g0ll3N!%@cVrd>*f;+>WwpvF4ii7>VI5@^G?SV-XW=@fsYvDM6s zcX$p?={ZGAXY~@aRPoNs(uU!?gqY__H;w-tprsq9 z5jE^AIYW)q^sbzXdp6*^Or`r6E(bh$o7!jIjogIWt6>Or7 z^>mvXQxji)F%3X`lR>x46)h%%5I5{FtUnj*^E|3F4>|jCFc0p@@EZg4-ujI)6nj{6 zrs0rBtqC&PlGC!P3KK7fhn=iaW7JTvm#RX9PgX582*J@LxF#H+5EuoE9(itY$#M~e zyk^o<&5>sx;9|~ZIhR}V`#DC3{`qqJea01Ka^BhQI^=k7!ocN$ zLduaoR2>(F&fioR-d|8lrm0!jC0{5@uEqmvUjUWF4a;LgWaHHBJ5t>S!%SAQx7_Jn zvNv2jJfF#!tz(3RYYFtr+&f-u!t=|IQ&4&d2AzJ3ahT|l*X7_Mgzr!u>WHxf@01_bXk3=c;~I-{Y83ST zI43@tf+&{LlZSy`H`rMT$&1U2JaL15bW{xNplUjLsS}L9jLII7q*uSRX63hT#lKaT zu{rHUgmF=2)im1-7}*$?em2F7iRRjw_=3#Z=tQUrxHgob44_ouJr^KsU&Y^rQ04C! z^MoVEn3B3kF0B^B9HQQ-{zE;Esye6E!QNhtdC|z!MhbnbFr5!9VG~!Jhpe71r!v3S z>lf`Ro7!%%saz{aFEmM;V8O}QaMU9Nds-)Ke^0^v)A_}((l^NgV`^vezhUsdtN&o+ z|FGl#^#ABFD;qQGe+>T#nf?!D{x7a9>fs`;?DCH(6Y}uL#O&={{x7!~9U~nhA?yE;>VKwx=l@Q|_J5KQGN^bum=ZE57@C{@ zS7HT2C)0oL0sPN%46>#smWD$19)#NeBt}9e04pOQCmS1~?tj^Z|0VtBcP9J~CI4?E zF79OS>hOOJr|6}w2naBTz_y9rx8w=-u74hE!6Cn#L;%qnY+;XNjy!lUfK6rM# zxA}LpjDK_1byl6%R(WnY$9O0HqNHxk+XA`5B8+T*7chhNf z85dy2=qKRjrp3yv3T+>+Q-VOp(9fAWg zfM9HJU~X_~astl4!s7hin43<-F*vs{2a_f@GQZigFf_ZKgnxaZ3s92P42}v5dnSNI z!N9=iA3f;93ZR@F8<<2F_$Gk<*3b=Hc6(+M&^E^p7$stA0{&_?+ea8^JF_E%|buoKQPL`Qm7Wq4UTxXSIVE6;`Z}|j<{bRWu zyyu_Uky?|RU9<8toT0Uv_1OZ{|92=)aB6wr4>%>+JA;2fPcCo&;_Ny!)b0D=z_)NV za4xV-%w&uXtqf2BDA1{$&GfB0d=>t4f8SW&R9GuHpWe^(p0Od!r*q(w6O#)#NDiKE zz&|nhRUT~OFBoG3qZ2L$e(!Z?Hhf`4ZR=C191ndSlC!Z5HK9iZK(8LOPYE|WPjK;*;xHI9HL5T9?7 zU%UPv{Y;eDTA%8kKP06oHI;Y)7y7!NSUdi%AO;R!>7BqS{*~DlE~RH#CDxAs+rFAspD;7oa8RPz`(G;&3Ej#d7rgTz@FfL%z6WC$5xpU37<>9*8yL!S}chcVPs5; zk1?nnMNEd3P&g$)7>`M#nQn4+qvuUPrq~^W+VYwpHFvGX4 zSyC&rbkOYXk}b;qf3Ze9C@p)=!)8KP!pYgN9}RZ+iRpm~Np-QF#Pv^}_bgS>9pyQ@ zg!4H!1F(UD})gs-y5SPC~H-|C~=mkWio3LF0&AqH{s|dEP zkw!YeTw{fnbsC#T?`NHXkeII2i6!0_P3CUCj!POE1iUDS6AB+rso z4cI>1VyVYNJ<)^rGq!R*q8%*AH>Qli>?C20IvQFzhy;8JHciJVncHy2DcEZHYMAZ? z_hC)XQ2bnpzDr^20C?axb#-+^8&KS{4VlTtQ=yC~w&A6RE`PJQDnNSmt3lisP^nP4UdU8%%$%b335mZ3H^*nn69n3n7gmy$|*_5}lU% z#o1wkgv$%2FGE3i`t))sPJu%^2=GJm>1u-t*Mr$+yM8&ko?4EXhXY+j->u`I2snYUBX9O@xjeGmSRl>m`C`1&@JwSE9$SdAh?Le5&2yDM56Ua=Aesl$sI0JUX=&U;Nqi@c+;)}p>r;cFT(U#i;)(s zw+0Q~Kk1ZE>YPlk7gsz~-DzKxF9)f)*dxYaaI1)IdV**~f>2(3#6q2k$k9owV%NAq zNsrzpPFf;SgFpoJ{w}>gFc}HthKR2jRAAHj{@|r!ctZ%=DDMN!ARAT5gLt|Mf{27D zpP;x5-Cal3C(Az7-9Yd7$*);un+_rhZ}ECO(p-CW7xLCf--U-TK#skQ_vpy)7={yg zdwz1+p9)_Z&)qCpk-YE8pI7`$_L&n95@*^1C$*VZIpP)%j5c^P^|UF>_-KG`A9IPr zJBpBP3#ZCA^zn*4dx@{Sf_8Dx;SQ1QxIZ9U6UKGoBUU8|o$s<7S{9eV$J7hz^!V3% z4Muo6fab?yV6@u)$i2<$tvx6uACrmSxH(2>f~P>hBM$C22YhI#c#%w;zIK6;4sHkL>vzpZq{`Dmh_3SPJ1f^$-FEbm^reeN`& zi)ovUz(pMDxL%>Xh<5KFFw(#o?DZYYWn)iC^ZbS&= z(E+hx!H7a5X%dJXm6(wvEpf<2J=gfD7lV?)JL%S(Ao;>Z+!p8)omzdT#UA0W_yF*2 z^?uZ-zf5>snYg;qts^bh1T-);XV)RE~Re^h7NB z$+(l=Is*>GnK1`q9gakwGxXmuRXohT*C>l2i&wsL?!_LnH?+`4Vd2r(yaw|rIe$#| zK`bG!(cCUc+o+DJpfAgit%heTOsDf5Sb~X{<2jk^tx^`vWdu$&K(acj7uI=Rjjy2J z3M+7|btjkG8*r8)OeLkNO?`H@St~nC;(;!ieG^CT_c;=KA^GM3qiS&t95Z#O3`$sV zZ}_9qUrUJ+&(IC7!-vful{^hDxRz6E6=O&iFl82h?7ng*Sz z;421Ir6+LvW4zPLMhc|;MG$PRIjE>aBf<)#FW{;>yTu}T*zu5=?o zjwQ`a*4sR-)mJp9LnZ>5{Edm0IP)ufxTRd(%+yj|>Eb?+P$d=(#lQk^CO@ z`R(5Bp%*By{S;v!>Q++TJz4ad=RjXkM&eD@kQIAN>#KmsNYT#py)Q8li-<2PtHp2~ za&6<8s!D|YFQ`nXGs^tRkCqzGIrExZ>QDWssS*;}sk7Tut9p zm_~{pZ{2qB=g@V)ctK!lG(l^gla^qMd)-S+gu73YDhgF`>PSv(VAnrW8ZPn%zM;Z5`s{O%Ot5IBKoO6p21V#>l#!!T@*qm z`6B!}gJvfd941!Gqg)peFi*#x;U6eBg*4k{o4*ohM-L(Q0*JI=uJ?%6%RWk#628GYno1H9qY#-LpZ$<$;!B3FJQ=_aZe(M(;aGwBb?X6`;c?rM{Hg#GbF z)}Uc)?cBdLhaF+ZTJFMdc42SCRJeTwE>tI)?=B^?Cx1=LoyuWHsttu%nvETT+*&8V z$}25vZv#_b#N%?ESg2vBp9^o+6Y)|VU}+m?5%#NTD!{O>{R>ipFv)g2K`@S=|30xY zUb}_DR6(BeirFc{uJd$T~@&`VyJ zX(Z-huQ`nHequ^|m2q#vtJ*sfGY0FHzInm{_V)>bZ|)YD;p6zzjekM84l?W)xJ4WUBTv=p&B? z!g#jy?cIff<*XMKs~I z<)9^#t|qawTBn-tl*;`eAkBG0)X-?qxJOe;M%OFQNFX+^h1js zTqmD|bm+qa+H+kr!lQ-S_q9s!Z~c&mxzrFgp!V^tmD5o{DbdE3hYEab(MQqRIBun% zK%!=8qPO-u4YH{T(0&P12rENv_5DitF;>Au5uve~3aOWm3kQa=;ZqV_(bMS1F!gQ8Jhv~^r< z;yJR|*gN{NQ~|ltN7qel_joa-Z6bR-3*&^C^-lK&ejSfr5E^0X0kNUR`J-vQN8ZCR z!J-(2LrAL;+^U?YbNnPpyrP!Mx0?rMO%q zWgHvg#CPJCoi5nN^@!Aj8Li?4HTumiVUxv9vAm)x(KO`Z1@EK)p{3o`)PGE?=!)pG zl7X%KMY61U&KQ2^v&mM40*oU0h7k@*85hkj9QE=F_LPWS!1g|2D!V3I*mj|qjK z;?Em_zcXc=uYB=p!{C&mGBqz_NHF)~;jPkLHzS%})uo|^jOV`X350EVwvAjx zm;k!h==|}}~ zprV^2ec(03DrSiie~QngXFG8$0cNJ{qdC5`x_DNPzO&clvx!DZFydMu;iwLz0(pg80S$-YCmXRsyc9 zmI^Lb%M#7`=7A#72ts_~-USVr^e}K}hG~>R@nO$gGg;uTn1K=oPbp7?+!z~1uPh#k zHEE(#OUsy4k@#wsxJs_l`Rolr-C(c?|EV@Pbs~0;bV7h!nEtjEWzyfXmY~?1_Ui={ z-@-<#z*m4470>x|HfJCn8v0@5HpjOfzVJM{YBbWG(0K)Xo@l-x`*(Yu`d%fL08?pD zwOO^tV@EM<2Mc53tUvQc@2PakG06E{b#+ z={s^3qq59C#CtwwDYON4Dzy_$Q=PTrH4g#689Q($k)XCP8hcr%Hd}xvFNJywM&C|lIvUC7T znzPbi0-FxAuL_0gr&|{?dVz|rn`j*nD7ShwlpVfR9B6a`?`=hj7q8Y35xp9(*j{Fx z012GIFC%j2&JsNhe!I4cb=*>CV*FC&o|$t-) z*ArnzBD7`o7CdgR2EIp(B`W!}sH2Nfdt8i;M+F;k8E;gu9lU8E9`f$u9eLjVKL#&X zndNGl9Fhkxraep%aj%c%-Jhz7Whqn?qZA(sf;Y1QMEd;7?$msRZBM6)=8C&^P-k;X zXNt4sn0IW#0xyz11J#@^g0tw z!J`^-v1ewmt}l<}Apupvul8XsauY1VgHx_IPmJN$#O|Ewi)U;^b}fW_0}a0&W-euzEKNW*?6o_&z0&xL!!YAC1dLo1dVj29E!~kH% z&Nqjhh!!RRmynH@w;wMeUxeuhvaYhMf2g6X?pGH2>b>N4lo~6#p~=lK&osEz@?;z^9dmvvIKpvQE;LpT@4Mk^Tw8)n0v$Fg0j#ytEbEZ@Fgevftxt9RLkms}ybY7UHX zs?Di0z8N8LKnuy-$RRUA96hT?EKhXQ=_EvxWb38elh}4Dio4rbR@NdlKJP(SaVIPinsN51n*!3k*BSn{^~ZZ&ia-U zpD~MaIm%=x{ z-Pd|S8q%a&sY5I-ZXf4RbAO_?Rj{Bv0nWb>j%-6mQ`7B%Mi~9CYTXW^VEIn`1`?wF z!E=P0$&B7OmARZdv+L*F91(fso&?I0s|&aUzuoF(9n!dZg_<0DRNK7bZ7dRk41o{a zhct(}^WM7@dzp4Z%irl|>qA-O(yn~5z#0{i?D2cOV?GyaAG%@@9C~7iocX_b)bXHf=^o*OE5ea%h3VQ->T(3hAhPUnFqHf6 zTy>X~d_Jgz>yGR6FM*|BEfDPC!$v-rNvm#+N6v~%ips4jM33xW46HgPJKJgjW+}WP znJT-DrhLQPZp*Yy?S5(C;(8)lU77PYbv|9I4@nbQ*|)5Cf{cQ{c|y`8x>ARmjjKmM zp69}$yS>D#NG^juI=SC@E_?4}vOtf7ws9md|frDB*Pfdrpv|O-eLOmB{ z+y4lNx>+rTfX`(;N_K%Hh0bL_KPtUfmo!8&w>-IMYWVMep`-AWGUIC0Yo2mrA54aqH}9je zk@P)#{1pDqEc3DD=p^}CQAqA9-BZ#w_J>!A+Kzf12$(D^3j88}AI>O%ftT8?n+TL| zA@kS@ONOk_E&k7)qV)=5U|RjWR5^RU`RaT=q>(*sy=RJqAeu49i)v+QDVqe<;`Idm zwa+G`vVBdc0#=jdpx6-5RVdXn8W2K2qq*7dE9t+ z!)#h%2z&yaijhcp9l{DIbR<8oSF6L@L20Y#r*#JBt;$Gulzc?G-213FEyYRiXl+c<%1(jWs>{Z7VbY|zB2~D*_&d-z3 z+qaMDinX>j{iuOqg<(9FTLZbn@go%k7VnL@>vTc`GI-}__qiOJAG`mmZBVuUj%yP2 zj<;!e6r-f?x6pzddn)}4dAG6MaOhS_|0%nzxH|Lp-jzocIJ=ZGF8BtIY{e~7)QAO) z5Lp)GNwcyALzpevS(8Dh4wg8-55?`kBn(Tk-u2~9V`{W{9T-1-OZ~M4|{GgI{ z2RE(2kj(6|`Rl{G=Y!|ug1A&iyYg9~iZvM4%2Re{yP3jxS*%S(q~z9}t3#E81ANMU zV4M=q`q4p}Z0D80`biW0s5iQ@ww|_}w9(_Y0=-ojS21Xc$QUv!vj6^Dc^9z)le=GV z=RkV1#6Zv|E%w&fYXpmR-?9tNs|IJBDF%9@OW@x^22(ii#C;jgfsU;A%EN&dEHLq@ zXqVE;lx=D3;`iWf>sYvHDKT`Xrq;6Ttr6g2M*Co7sG6=`hb! zT>qA+g(6bjk@zojW#)!c{#_TY4r&tA2eyp2UKv6ktm}{?D+rqQ8NrU)ZbY;LLlaj2 ziv)c%j4XspLia=B*NEbB*hqnC2RPnz7x@vu$(yyVcW{gH7?Z`a*EI3J?5}5NRLF&Q zqz1se9_piiEFge}!B}x-eCrA@P0u}H9g4>=!aiTIh|y-^S2H5_jZzT6N_xOoGG58pFtrE3kVZ;(fLb!* z*lR7D6(JJPg%JY%M0S`inkLcPoN$e|RU2i{K@PJAYOO0VPtoPmM4Wja#zoV2n$#1x zp1(?oaSG-R6+oDqoj|~Q7`eWgw9|88@llA3zaf|na|{-zGgmjlP4@BxcOB|8OJ*dl zJQyi4huk&-qlv9ddnxYbl$CVlr(oT~bOJuv|Sgf`x`XMO(Jn_%Fh8PV!-S&)5an7H`>gBQMV)aK&wp>ZaS^xe&lK*(}o+V`m0)ug(EjXjFJRCTFP9xzpd*a4v!)lIS26f*K;6!s^D%u_)e^fE{+8<7@45Y+kN5*QdDBpn zo=i5$svhu8>Uig`%!LluGqU1FYe5VGlG$YA=KR3h+*Gg#q*jLE6Na!S*u5C7Lb~jj zd0a6%3U_JVtJ!3^^4*9^eFMyc@Uc6ySLxE<1NQHv;_pIKG;F^(usA1|6#15MJNZ!BCIiX#QJH?}Byl`C<_le%AXRzl` z_lmzmYsv>P5m$6W^+ibEjOspyR=~@pPmu@sg)c1YnY-GmDs&oqd$G0leP~`;zBOS{gnp)w5pM5N+7nwgF z7@dEJ=wu)Au1PfkX6Tl%s9|@G{(S73ygsu}X?JqYBQkiREopwfQi1{Q23L!)_pgr( zzYQOk+P<01Qel{^mywAF)=B9OnM^}rERxQ4$|+G)Xg6!To4c6I_^~@x@He`Ocx*;9 z7K%fF{Isw!YFV4@a7&lBCKrMgcjk@7B{V8vE>>82@S5!7-D4gA2>3~hMP1ukuP5!X zOn`RYK`5l`#uC;PA+#^8S^{i%yhf4dEeuhh8d9A{5YJlM8%-~-MGMDq9uuDh4%?*< zR=gH_Mk~w?pU^NE$z-Ib3{cqW(P(I48NiP`vU_yobK8F>W<)O3)*|UO}5@D{b^V0B* zsc84dt!1ju@7z1Y!L0a5+HB3L9&WZf1KJdXCv)Pw$8BJF^X#WAZgUL{=#03u6(Z$_ z#RjZ=Lq0aE&V`z9$C7~HhNdFawb!{Q!fA|RD!AyUu{?m2R!}#`P<~EJ%~Zq7{IRkw z88X!jE+q+ch(e0J+!H8wR^3IYoncnV>vY0?QtzEu9)oGl`h{v@3eUu9+(CW@gUz}R zfs2`7nb>rC}rYP9rU@Oj|Hjv7J8FxzSQudP7(AiTnm5 zlz5EvWnoRKPB&TA9RW;*gk8n##~);gZ)d4@S&xLX135Wx^f_c6dzT*>$sKgW?sK8D z7E}Z$fkGamFQWM%=By1Qku8 z{KR|bRP@6PFC~?;eD)>4o;qiB9W!~LE;NtGscoC~gW@C#|Hgggc}R1@czNoHAd3d- zp1ax0fX>m2RW0SzW(>;-x%hT{PxGIj;j^oRc85vG~_EkUC< zEF~9T$u=ZCrEg@tS|sF!m~LXKMK_d;t2R-6$oF0Xs-G@9NIIDAOI92>*96xrrzvDO zb|0~I92>srX{jkkx5dS`*xzrEjxd-L3ta`+n@Of&AxJk6m_1@;Sp|B{Gs`}45H>yE z<%E=H@;8JZ%UN;*M~U}Ya4#)wb|VOA{|h%j$iMrtWCQ2A7(@~bo;7(-=p`uk5>%H& zNhy~rnxR{DrLyj#`KgXPSp#~qlCw``Jl8pbn!r&zBhR!VSV3v4!&JE8 zkpLAgCMTqtb_WS8CTU!B#>KO4Ub{+1e_h+*2Q4tNt9adb4*!OU$;*rSg*k5$y1Xtde;%qPm2->M~e%fVYD zZS?-bqk({#>i4(^e&$L7iTnuJm4i;pC(sj|NT{O04*%EsXkQL1XFqIj9Bx8fI(1u} zQ%HTY1nfHprdi9fgaBviPxB6iaOUD|n)Zylt56iT{Ghpl`Lt>2WlY4oR(o0V9d^_Y zBb^1=y#&Fn%;I71NrQE(@E$M0xk3hlpmAk!6;CS|SVI+Lw67&F7kONbO2jE67DD*w zV~7o#HNG)pjs3UIzb21Of5@%-?1}d>BFVuDru%0G>sh)HVnI93#Yr|ftDyLV+Abzz z+rirfGwMFXxfHB;o=vegvVTdiW%Jrfd5#md$;v6e_!TR0RAXt{H$D(D>#qGt339Rm z(WOCbt$=hAK6LQ?M(K-=+AN_|`M~>@q9T#JtAR|_zQ7~B%X_}#f@VX9^DcmqMAt#n z@x0ivdKRM@?058_QINpIp3hufKm)3N!jI2}iPC1yVxT)bcUm5ztS!cNkVGev( zsT;68^r(+n{xBcAlAh4)qYBOjdK}IgmEy z^b^1$UZvq4!|`^TP-rF3@e9*rnkH3WhH|2$+1U_f1J?W{gXm0E>DOfxU$mNcNXjso z$aOkFkwZYg9Z!;B;qBex2z@HQ-4uQ5#wtL)B+$HNY>*!&d$kJ(U7}k63f}1SwZ%#=UNXk1jqSQWm+#3jMs8_ttYx4ZUsXYr>v_GKmQy%pbCJ4=dh? zClMs;qbvAqjERXVN6N9QnWxA7;he$MF{h3HvbD2gCWi}mzO&!Dtn@vvW`oRBy2=72 zb|pu!u-8+<>eRbnt_oF`-rl-U$kM{_b74=r5az6A5s~(5fZ50wyTQ8xWMcl>Z{m@` zo$M0fYAepU5h-uoPLKb4hA4Se41eFa6epVKO>bR=|k)%aSB$L(q%KfwxL={^mIW|Wm$ zsY){e>)h5-x$8ad(-cDP!nFIWn4XD+mpDwSo^l8^taT8ErU3rv-)NFV3h^81bAmyl zF31+c(cGspee|S5uOMGOzR%6i!GtSm&P!ikp~2*REWcBoPY)b9YuL;)F*;??`eryY=}}at_j&Q z`N;GfJ9|RwW4noa*hks)@5hZtgk{MxllHCQY|D3X*jsA`n&1GUMCo3IZZ-3#Caz)N zlb!&lxajr+26;sBv1uqoY7mxnzlL!|U7Y%~UTeUQ0SmSS^s^O05#snRD2gbLJS`-r z^Q)8;?=^zo;qfAwJ3x z@H`7diP=G!3DH$e`&c^7H5PT6wAw>_X!wHFKk(H1{jH4OY18PH^!z=g9n zp~4?3=?PjmFcIWTg1&QIXVY@I3Eqjro43!{oTp=yTk)3Ba^lZKITLFzbT;1m`b)8X z7F|nYq})e%cpRoOSrg)6<0E9c=6P_nxE9LG^C5t;;<){~6>iL8FMonORGcioK- zqC5|OFFi0pH1xDf&2&^2oT24cj!5Bc^tKh$wry16*pDM9P*)%rxgYwf2Ns~m{^rh~ z>>3ro(68CG=-UipDQzO)Ky1#aKr4E%HlwWB7;J zN+dkd*@ThVHQL_H44pW0+|x2x<5Z#AsuY1bjX4#?Os`=OSaqaFY-S)LAXL6w2)7Ht zNFwi070Y+Da>(&iV{`=hC$;SQ9-WlJ9F7sbMzAT~mH?KRsO^Pgx6XsM$Eq^2iTzi> zB=T8CzT5MmVvaO!wba-Sd>JE=iq@)dgc@>*5?y zrF>1SX6N+$Ql3nlKjhIusVL|O*wCl9TM?Z7LCgMG+?3sl=ztB~1R%t4BQ4klY zc=l&fK({axh7+VW4LoWNr?R{G-HzXWnArKuz0HC1vxSbcPVdlrzvk1EPtLK+!};Ju z?G3>+IAA@q3#KN?LHl-+861_IfhdNsU}EW|lntm-{{$7a-mdtWL&t`0ar&8G?;+GQ zDo})6TNd3ozx#7vA$3P+VtD{hWofJ-C#A7z_)t%5%+Jmm^dZc^Z4Oom_r5m?;C0Q) zdH<$mV7F2wk$F>s+7^$^J5h3+UOG=vKoWp2QCb(Bv5efn1QujS(KMrw)UB2sG4qQv_;c|#L-`N`R;*fnpr|DRAE^<)TPUF+*+v^pnagWf=BwA$&l@><$i5`#PuF^S ze^)aWV6v07n?f(ujD zHYydn@_tN|b2EPD>EB?#HfX&5=AoX%%0iYu>jBZ^)7af0(H!~(GRUkpi1c(ltWqTg zJnaW>o7(&Locj2g*?~+A+NvzVr_Hhh9gyUvun1DJ5nh;8EqTQ9?}{7w6v`p%yAEAO z{AeRHN#0GCP_(GdK(XFS zXae<-wEJuZl#Zp1q1R{ldK40>0`PUhVCr~CFc%S{LO@8u{9u6!d{w06-J`&xJ8tT(X`0)-f^iU;7M#QYw-K=$2&!VS_Bdk$dUy zD?AQIn|jK9;Bf3;rV6OdFsE}tuR^&w8q%t_AqVNTLxO8SRh)Zx(wuxQWBmA3vz(NL z%Xcuz?Dl20MH2F3_*o$d)297EXxWRQ?+cef9G{jPh|ZO#3vG2@E^(DPG#+cIDEO~= zBL{vDwo{0oR>MQkMAK6sILnY47+~jH(2Nd4bu_AJldE|Lg~Zz&A(nDJb$nClUM}of zM+5(ARPVe=+5 z=|*BpuXXpuntK@{m8ZQh?J-BDN_V7|MN4t8G7tPP&M|>Oexi}EPNpE0hA!(md;vwb zjQFl`19z8^!EEBYserbD%X}1>T41n4%EK1S0HbByqD9U#`iRj1<ND(nh7 z=*M5bXd}vppB|>n3w+y9?nNHLQWe2FblvHe-O-H)zY)bLn~xQSAB;mOwJVN`G#a3v z$I|U!7Sp;~esg+OL&qiY`L)C6)L~cLgY%^OKpp@#0h_FO2S7Ho?gwqV+Oa;hKLHHh zZ#4}LV_ltvj#1YWelZ$#WcW7Zr5T5nNyX7ZCeyLxBE6%_NOC3RA~ft9_l(>#oe)*j zL3kYHL~>N+3WeW&t!5e2vKe572>P6=uO<$bo^J3x^(^&6$@WUdT>b_2fGn1`3P}Qc zcCbG9(3u~8-MQf-+0QtTj_W(4arg4H^jKa_d#aMdC-FVz`KtRZxZ)&4I2)P!z%GWPc9KcB3{LrL5W)AsVWnLR+ZwUOkk2g4S47W61U05#9KMcOG+B**CVr~d{ zj_8;$xUCKWIMXKfx9~n|xg6AkkiU;6f8BKF?(xPy>e)6;ZTInwsDC(ES?`LdBQo)i zQsjgP`h3+nJF+-c+=nTz3b8PF(u_4ue8=(#V}~@p@^`v{5JwA!x>#oblC2E5pnc`g zpkD=vW*TFJ?xZf}fdus~{e`i{qF4SrOZG$UCJ1tD*U;Kbwj>qlyByrsWC!1z^w5t{ zNjr9LjET0~u7lbhoT(aPU-aj?23XhKk7g4YnbU$)V~X`>()`bpo)!bWZ{W}AXb}hkr`731F=Rv;($8^D%RGs1>jTH56s*UbQ z%(x&N;faTa!C?dJDjO-^Q%kHwwW6|kzf!}lJQcy|gIl={9sCc*CjC z4Vh&^NiLH`cU(^r0~{SlAfd(}E^X&}wQb1Hr1&9P)G^Lz*-Sf4!b+1b%7H#*4>C?ahuLZY>2&`IH38K0Xk_;hf5HUsh$rPvRxjmtSQUX4p9XxU8GRCP%=GCShTT%b=y(8f^Sg5KTqqE);DS$dtY%z{PX zp03rhDCWSTbQkx*+LL=G6ii^J&Q4k?dMhGm4KNQ6v|tPop&YwtDkdlio}nXZ{XnB& zYbdpml5NDDj>yP~-dFH3Ir07#qs-v`9n}{>Rk|QFeFkq_>_&^SfEKo@#k^gRS9H6s zf&lAn_{2g!yhltC*#zBHOa)R}?Y-Y-91Gp6fd#)fVh4?{>`UcHPIUrQG1g<~76s7S z$}H!d*$t>BGYBrWlFJ5hB~DVmA#K4Q6#mlj9dzksJ@{1xE}EawmI#VdocGsZS_`~! zQ(F+Ol1K-WpRw208j5Tm>m0pvu4K?b-UG`o7WCQyY5+;yyx|a_Sq%=qSxZ@yb2f9C zjrIwi5)r;dJ>$6Xp~@Es5s?sUzNL(PzYl?R+~M}!PcnKYMaQXi8x{+i4-QW>)xeKA z&_=o<0@yZBom+-p0vkjZOc9xVqFzd7;d^}``edrBk3NAj_DsQ;Kvriq;iAcXyusDv zudY9eV{d`qhhq_n$iY@i(eLlJBOFPV^@~Q zNA90!`AKTI70NY=ARW1 z(YQb&dB^8nx%J5X8+0MOPl%mVAit@_8QkbJ|lo}hs?&#;PAofmsVs1{ENqD$dES(2w`fPP^j?KZi3@*f;M z<9~1S_DoYa71Zs~XE3=nZx`sqdwyVxGH>bav>70f2GAG=YPsw8d^;7tSdQS|TfDQH z>Bu77hZ%m76^mLY6{>iM4E>S`d%tzM9fT(EI-j!B)k(BVp28&s7HQ>?eO`!C&NqKq zw#JG8jp>s=cM8O>I9MN9eO}w zKnx~r#MtKA#A2R((5stNe~^?Qw2pyH;P6fC(wNWjwgk&gv?K79OwUuv+3ts47Jh(t z77a6(!{7hGb9izK*MFTz#FpoX=}SL0S@q!Rrp|a%U{3yWvet=DbyvccMzCF<7%(hC zXLxrj;>Fw9Koj7=sqaETWQ0)fk4}RrIdgwrr!^-jD8o>P8H zWj0ol7NY9hcye5^kb-27*ALdU$YYDtvgTm6=n^#BXGSV}+1!0v32GZf#XcH4=Uv3(pa z7g(jX$ZkZ`S#n^i7YMRg;dQ>u|gh72ZQ0xG02? zVI~E9w*zaz5VH`GfoGe5DPFKLa*Sam(0+~8h-o^px(zwP{0Uw(7aCo zL(@pND?%G(;2uJ&4RjTTAj&Sr=+uEU^6PgK2H&_vPm7@`8qChGCVt?k*zABq51)iW z7*nPl|Gg3};-{xg8)s>-$yY@OT85^weP#UXj#948+0LFT@r-)W4^rb)@Va=4!-7eD zt8i(LAX0YRN&#JmLYd<30a>3{N%j2)yq2HBlgrNNPhkP(gbib>H zA>-W_5<4C8#nDvq+MQ0#yaa=bt@#l$lIQDPa!1yAIPZ577Hz*tb|xqiZkI&^0dWW( zT$G}N^24?&?{;Ehrpzx-sPnIPw}RYyG{yqZ_c{sAs~?rM#-{>5n~g7;9WpO~7QgZf z>9NW+LjC>Drf1U}&mBh*d;!-5WwB>nqddmh*16(#X`F#D?ua^}5>@fJAI3{YMgk95 zJ?;(YDAL&5pR7NNo_4oHzNM6k_z*p?mSi3!I0NT<%2ZT7Y&_jDCs;g)y{F{Z^Nopz zb0ho^ilmEVs?jbbNB1J^7h!W>90o4+1Z|pIdJF-29}g>bW(f^gyC5$@S<2ON=_nzP zI&0drH&A(lwDac^vH3Pep$BSv3{(l3LY?;RG*c3#mb-qQx7g;{J(FbyI>HtWXaM?i z*?8bXE5L}8NfXDC38Y12Swl>WlC8Q_0uC^h3dtm-M+|7CoZ)mmI3itN5SZe_Smj(~ zn$EpB`mQGRjQ;A2C2Ay(BQqDuPCoJCqQ%l>^6_KM`rR}ox>^L#t=su!lCz|bQ*(6C+Hj*e5wZYU*jR-3cw z_>(tkYAlkBR}uNS^SiR^@pMW$xU?&Gcz4wq}+G9YQSS z*5;QoALckqBlkwWrynJU4$)r6OTez1az!AlO=yV$7RhqH5s^gL%G<_WH6b+V4!uHqDu3x8HAFicJ{V8 zfSyP1w4~Q8Z~FM4FE*$m-$}^e7$jB5geHwrpn`)QK~n6Sd|H4}j^Ol(*Et$j0f_|A z)Pd2V)biT7r1ray1HN)4m=|zGc_grlusAigh_|`n*OzK7fXvWfS;_^CU0W$CClD#) zC|FcUk8@`@%_1B9eBon{n2_rwMaylbFl>p^iJ)8t={t4C>N&z)?A1v#(*2gM^v4tB z+Su6Ui0=hGQ!;YIfJpX5f9WjJkmy;}#)J6u{1mW06SatAtH-QI@LrKL$U__$r-k zpDAU`p4o~ z+ZVzgvfi29EFA%}n6nVrf(pivp9vYy>ge_f<8}>3DC-!70m5~l&6u}BvFJk8peb=l zOm@5RL~(8^TkvWA%dlG57ia3EAlTWmW_-r#Fp{`~l5PxoJD__E1Fh-*rhG&gkHe^a zNCEuy3Eeq+_YeFsBui;v_gIc&lKdGY=T{)2zJBoU*bFX&uSK@a_T<@VU|-D_U1nZs z5Z~9(r#l_ZT?pn)o_yy-8Tq0S^xU#QY)yrpbZ~dD%w;Y* z9TMgdp~xU3C6V?wji?f9MMKbA^vVu_!b!3QQ9$QrRSel!fw{b0XAX!<6Gf(7a2ki> zw|hFxfX0rLl^fG5h%GcR@w2+E1Z#GGD4K9Xuf_}{i7nB~1tCN8wG*rNljEh`w< z6-^Qs&A9rk>`~b`G(dU==NL`!4lOv{fb-GPWZ;g&n$>r1PHN0T#KN zP;JZktPLVY{n-$_2 zqqC+G*3IBO+u+Jfij0;bfV@@zM2v4xX`qjESZ~d0Y2+XG@j!Z=_a|A9>*d!2{9$D- zgNr!by?DT=EjieX5k^XQrwNVFk5TT{!0N7D$d)rttzupw<>e$B}!3^SU+;jzo1wicZob5 z1pvX!%Aqzc3!*%0A${P)Xm>?5S&{Pekdv%H_LaJ}kjewbu%Ug3U}fT;y#C_CgNcGL zGZ?!O&I6m2u%2P%_V6T#P#P8 z(FUQE(wRbjjUB$M(x&0gPn++2TYW%vG-~^{Ky{czmkKXf|B-IK<~uxhmIN!`C@0zP zXG_QpXvZy3oxzBOX(Z!}b+~9rD;rxSgkzVg$&Oq1Y8>BNN;v6=6mHNMJ-y76DKsK{ z+pQaBjO$rT|NOr}1}r?ylkNbX9jF2GdgG$=zSpm`-@LO|gI_bf>#QBVX(@2!eC`r{ zk=RN7XrN6sJI6W$aW{DS6`+U1O8!5qkT0b@o{P^bUtz>3?^FwH6J}RAPszRtDJ+Z) z1NOi3n`HMXD0J!WaeW=gH7-liHXdffrdb@9o~qiZ@EsTfb5l!zf}K*jd}AZPmhfXW z%#@f_vmO4ry;?2usJ(D$Kn=cTnuX&f6200)g*WApi+4Ndpn>~n zf1W-?maJnLFDx49=f&3KWDbJ!tVYU|u#rl4UuNkhMl~9C5b4#LN~8G@ z7&BBhlVzmjD|Tl|B4vszSP=X`?3gvzOK6S;I$s%R!$@!QGj1gzpjB__K8GQ0psCFT z$tLoY$$!Q?$ijFy_#=NQ`E6leLmciKNfkLTy&(M|^0*vr>PZ54@RZ4%wVz2zj(lbK zlCL*=73da&+Bm^jQ&BKtjiPn^Pfv3D&-#?;BOM@bwuurvHt48YF83JmgMg@&2u(3Xu5Xv}jd5YYNJBc>rX-9rPttG}1JGy8ZB&HxzR` zVdMJW-1EGU%cvNNg**3Dd63BZ-GMIqaPDeBx{D!((^4E$`Js#+Mj|kPD_=G(qSB>us{)+H6vd5t$1K#-~ZcW@?RUNg6#T zNZ_VExPJrfNhR17HSo@gYk;6nv^MrdIi^7a8Qh`xB(`iM`-k_QH-EJX!Yedzv+WE z+1E&ag3ghEEJrLN>%r`?5q2>b?$b|) z93Q|#%1s?HEwyuvf319|#9Nnm8H@RkH!LgX4hPZrvhi|SDDY$-TSKS(zs+;$Vk|q7R;UJr$HM$LWjB(m zyi2H}Vi~8E6*qGGP*v+YZ>cSdomysrT(jPk$tB=2=j%#VL$@m}mbZ>!aLM zFl?Nb7vcfM9)#IfJbbZ=p0d)mdJkNKC24bAKlAoSh_}-1zG1foK2%8f$>eV#J9wI2 zf|vljTjGzvb==d`+t^Y`2ik#QzAZdamGS+#E$&@>(1M2%|;o#ns$XB!+xA|WQSxM*{B zqsQ)~SOTZa@fXCOp{eS|+54r9jWy*w@4?S!uJG-x`f0T7N@bet3lKEB_TVT$>gX@V z=zteSzbN3YLLCcOJmP3aQP0Xjn04=xIx+P_nIWraTo(eoQ2wdW3ZXk zBe3wXOy+^l!OW~eVHe$})1nnE_ws@W_<7;$XqP|9KTZcHChgbG0bah|!0QMRKwu3O z{br}c)2l4{WJTQ2W>o}h1s&Nc2E3C*9{e++gsx3L+6ese>gjaOo#Z&s_^2rQd@DQf z5kmMrrT)q8t(4s59mq7X+}=Y2WM@sxZd zVpD|jWz<8I4*=Z&2P8j0mofj^Ry;unb70Q(0LBkwf!yDOT*R~Y1uz_#yA5^l;rY?_FkSIRNTM}9ZB2f5QZsHX(yjo%IG(*7no1xk zeu?U_a%&<)1Flh&C#y{jsMzWvR?iQI^vGByAJ$y~XElg0@Dwl}Ns6^PlFBdHl>@m0 zw)(gz4D7C_{+A9?7JwoHMUbyod3TZd>UF7bKV}w1Bxqn8-A)9lQ@%H|&yE&8%$g<@ zbxHPq_)Y*~j7HDh=0q#`V}zf%XXQUtf(2*tK(u?;F*l4-b(Y3v+F3AYVPh~oE}=s1 zgVK^ZDXx>Bnnc<%-dpnCj6l?_+TF*(z_6^Ec-Eq zWy&a9?&gYA(L>D|p4`!M?l-tJ+4`oYXmy=$IW`d(@ks9+comSZ4VxT?sCXi;qQbpk z6rz7Qn)Ogu@us}WXpt9#qP!MxW+@OD!v$>^axp+ zg)D$?AE(T4s@DPK{0q~7N zHcX| zBX>=onXld=u-RXT3(+{dy4t2(3DNjEMw2FpA5qwPL6pftmfNyt9;-=>c@f+I+(nyj zE8}x*g^`C<PIB#f?o|>i5(Fvw}d0+C`Z`;A=A3(V39e(Mil~^7RJe=lPU1whg+OL`k9f`(e+_*Z#TYXxJx;w5kVg z`1D{D@!`X#zK{g7YonGXN=AhAkiV*3K-a0-xH0~rS)>i+YPBnJ6Dz#Rp1e9eN*hO8 z!E6)_*33{ZN%45ty)918WoN(`5pxdzd?npl$QoI>-8F(reGyii_u)6gR6-|XfKUUK z?^@eTPtz^fp8RdtTh7Fa zPJIU6seZrw0kp8>wgnk@(3xKX3a^t2DveyGER;Yge0z4+KS7uu>Xx)sp%7Q)tauW>bX%QmSO|XeC#G2hZayC9;=Skc^0G!`q za~`eU+t_?;=Uc#74|TJ)w{TMXa={sF8^xJq}$2*x}-o;#$*1x8TsNl#zN20YQicx%%b+WJ{i;}Cc&gVL50AP+ z4BkoAjqmemROyl;h6a9wy6~_%k*VgMa%(sPy=9eLWpK)xGE?%VKzp2HM3KGLoN zvciPr{4hBaDqeuDGZPf83G5B{r+;1`d>Y7OVOX`U{|Z=7Ys?6m`@q23oh9(P zbo5@w2b-ebxewDTQgCqhDOYiQuJf2}Ldg3qFqNXr6ke!=0c2Ddq z<$@jJ!Cr<5sVPFW&nzkTo$+Gv-)Ov_`oHEHy`wvjNI+WGNO7BlLnDKkeWnl7*n=KK zxf}a(ot!J9b!ZPjvO!ySDgCoTA^S`QGic3KqT$--#i_4i2e$<@WDYG=D`FT3c_BUy zi3HbB`xi?a8liu6({FK<#2OTJZHVuf&yRgP+(+sW(MUoRKL|ULyoIke9L(3KpPeCr zaQA;jCwqI=9dl-}46%m9>mySe$+&R3u;{2-K+X|Cc}PSt^5Q5kR*f+Fbf2qHsp*9* zqV>nSV95BpnQLx`WL>NH8Ut`ajzA!;sTh&=y_1QL6oSf8M(wg20?QJ3y8JI@cQvLBKWLW!n`Tu z z$FR1t_KtaS!SoDqk*>7ik(|x+Jz`VXUH$&{g96Z5dE7@Vz!5$t(y2IS48?FEZ_P@f z;$Xdfj(72CYUvfk+q&U6`+Hitv4%~Bs_mT*C?F-cCfZUf1$ro>PG$kh!vR~E5w8Zb zQn(grEoqZTWgi4RPY;QLgV&9C+bUi<-tkq%Z)B5TkXH(U2-$s-4&mQNdYfYI;uGwK zaUmU#vjnOYPy9KxKt+Sw^hfUZj*DIuQAjSmnPB4CpTxH6JSe?@amjg^_Hp}yZKFb?iXsk`pD|Fr>vNy85iyxzNqr0B)oD1LI=pJM3{sdbZtJDC#}z#m|Xu z2YmyjW*xhxs}Xw^>ScP~Gj?4u*`o@;H(>9ZmPQnSgw8@#1w0r{%5S#tm@5^4lenk?fkj1FcW>v^)+BaUQSRu2kbraQbot! z7s}@h(#1$zE__5i$lh6+u3d(%m(wAI=5}VZUFe+ih z9y8=(o`&-Lg6QF2PXFi%oUHj_HBW7eh@h4p77V_A59vGizU`FPz z%qcDk;H-jougG`@!4AyqSEYus&ab_vyZb8q*FKk3*9ZCi-7PD_&(RYyC9=C6#XR=9 z3qQUh`q-bd-%fHXAd4Bfc!KEhIKwKWBYsfq_dLwMm`P$eA27+13OWv8rX}kb#qz@Nal)lcwf|8YB@Gl1M zYKY_y)W`;?0F?o`lU)}aKUA)ip5z3wC z;=L|P7TbKw-&9ALnkZV8DSAdu?z+-85{dZb_NA6Axvq<**Ut?527)@q^T| zh^i@QXh%|?Xx^s(v2ZMGQ$B9%a|cGqC0-5NNj>A;`K@?caJ-=x-XXIx&Pjr%;Ar#| zA@2KiwH7}o6UI!@lwkALp!X{N?Tm?Zk`FUL3rU_;q?vZ)2A9P)nOm@)ie`zdnS7Aw z0JsfY*5-AQpT|xFJb^u+&L23K(Xxmh%p1W3ycLA3UqcP*FTV8&wM7RQp&6YW+2h>G z;`0rbtGus&=atmRpIAA6;eKQ2_aUI!QCxzZHs00}C}r>V;75H#UE&7Br`rU?U)I7_ zHNPTzrLxM#mh;husd$jXPus->4PqsqOv<7E?8S@vjwY%%dsKStu*fGo-K=llvT?;EcuIx$MyvCY+BgvyY&a+gRE zcioaVyLYE~EFLy2ILOne?@PrBtAE7*zB<(x=*Ln7B14Po*Je8ZEz!?G6dD~i|BY^< zcR})snnf#w>#s~aB4%9gb&n56dQay4@8!QT5p}Tha`4ef<&z zUbo7bA8uzLwP&dq7ZGXLW|$ZNt3+maUtnyN3BX6M139G+l%52tAXFF}l>321!K9(J zZOouR$@o4?KAa~NYOsnur%0Fx9d2+=0bFHb4gw(14*CJ#4DORiJpOBa;GNXyB%-GB z_Sgo@Eb^RKeg`1V6#DgatQrN8-l^5+k@Vk$sq z7TmE#YArAbQz97{(b7tUj{s?r8!&R+8}@;MXnzJz2xU6FPbTgY9!!kN=B>k1LFGtn zjYY?{L;_Of_Iwbdi+LlJdKJ+ayH9hkKzhrMs)+vaKQBQ0FG!5o(-gg+8F`EVaax_* zf%MXWI{BA3T)bw{!VP13Rtj2M1`r`n3T(L0l{c@2Cb*atvhszs#vD{K^z-f6!0FGsZDpHXYafDo*bM>YB2PaS*hU=W>}+Xf7JDj*S3RF1aNWwQTfYoBdO=Tf>ab^ zTdZY~P0>zQVfhn!b6%E6UTrnF<%&E?e}C;8wr_HtGnj>Uj&}Cx#u)>n0NawHsa18C_m<*v4*Pl)QnVq}feI?2q1(dCBDOO^rbG9R1qJeo+sEek{4Ne7!qznuNEl>hG@;_c2( zx;B9|*^a$$J#B4VqRZ3DqwCqxtq4M*9;)6=FKr5{_W| zbS<)+*eH40mtmymf+?4lUte0rpFVNrqeX!|+J%>wp*4+zhm~_`I2%9}-&Mcje;z?l zl@l#6j|>XyGk1~NECoi+gs)Xt2uV4ZC+J;=e&I?R!*Wem&Ko*rd9$)I&6RN8)Gerg z&CtYw;&|?8#h#TkUE|OF+cA=JJY-%p;y|&ov&Bbmu=aM+hghi5L+Iue(+Yqfndev) zi4*PlPxwDjwS#GpNcZsg<+O+G$J&@LP5KnxLlkPX77~u;h{{Skmhqu0xbjnZC zxeW19$2G6_WEOP`4WSx|NkVr0q>@vb_Nwk>dIZ>A@cLp-Xo91Ot*utq9=5G!d^nEm z1vl+TJv8IEwEm2uG3*>t%ge~2>fmshv1iP>qMyGoXR{{?jgLZ%Ep-~o7PVtdhVMle z`;*Mv<4`m0R!QuA=iUoAWP-RsNHvOvqm;53m;HJU;{v;a6d6S`y9bk6H6D5}F0G|P z^?mE*BM%ci$mGb-3-j$kC*eur_T5pEWptSL5u zmDc&JN90f(4mz@?3V>rQ2?MWf$eNDd3nZLWZ&YSWKcg&v$xpgEcP7(eR7KjA{GlAUNWj5J1FSHr zc>LQsqv+V6{x%|lZpb{uEzQ!neZu%j4 zRsQ$VcEdNlQW>2vJaPW5e4fln^2guD;-fju{NeN19R1HQk??j=4wts5rUUx(Gr@VJ z7ckDlt6x!q(;R{2pek>%B*0+YW3F7L*^8{9JI%cj`#+?Eq5XZ#4{k>Gy@M04bKU1J zBB6z-YZ);9xvu8CsF>|=@;CF*ER3;qfbKE;7;ng*Fka9Gs#2mc6#IkqATaD^>EG3q z+q?n(XeOIAs>oHTONh5FiWsIBHmBs)v_8<;$wm2?ArpBV1sdhJGi@H=;~$w)&S_GL zsS3K4{s@rNaExQ_EC(SW#->YtQ;QHRpk=7f{PaL1;~KMA`rQf=t9@tjMW0_rl1U4# zK&6sd8xGaE+#<6SUV}!V#r14aMsbS;9wUoWQ`@TiaF%!u15x+RuZs4XXP_G){&uk4 zdNlxArI0YFNv*R^)?1{u*9(yU3g#YErvM7%S6=(Wm`>9Sil?W!?N{E`chutnU(QM{ ztU1;C+3_W^S({m@uh$A{Q5bDnJmrpO9{||x@41!#5qDln%DFv3|KGutj>u)+-*_EL zlqdi-boQ0czRPw#1LHQJf*2;$48)*26j0->KTN0rIwgjF^&`U8&moBBZ({3oD06#m z#K;7@UsBIB3glfjYh_ruhcPv>0Wd;i@ACGj`t5(hd0M+Zo}yIYj@MIXumkf;M}==2 z=>1L$T6&vqnf}#M?ed)XZI1oHF+}d3QHy!HK8)x|VBFTAHu24L0R6>)N0cZ`AaYFKTwmAP#yxA;=b_D3PEJ&zO{DZRY(dbl3n5H*SF{rB29nXn z%|N@eT2_K9Bc1Pk5B3W351k?0w*4>B20+5dkDM6`5QlEqZpN{Be#h1m5O%sH?Z7Co zsbM3EHgnu5Dy$e!3CaN34e8dE=ou0WGz0XlJVSLdA>jmJpmckhhDo;TM-xJCEC*Y9 zT$T&2ntE?uRmx_(I<}E#SNz(4OT{VzeDKSqP0_@5uCL?O!h0%fG zAls9N3^sCuSd4de8CCU4>zMtCO_{{Lo^Ir|=u;rk+1OpWkb|4M40S18)^&P*7*1gJ z6C%lTIEb>Pemy2YtUy@W0hv7g>^^RcTT7;=+tnQL%{>21CFB43~v21H1-#YP#Gm8ik_2;Wrw!#W ze}(G=RW;;FX8Z+HM=NCU6}WH0%jFjmE7e+|NYXc9VNp^0? z=Uc#74|TJ;y2#^F4QbP1!~Y^qoFp*NNN!RGmLq~2Avs~&B(%~~)g5ZEF*q(xGSZOw zzHiBWBf}+W7Q|s8`!K{BIP`G)qb2q}T!q$SCN432;sZ!y>d8To3DkTF&aFn6Lua-D zszW0lMYrkiW79|Zjit_9R#fYF1Kgh^HR6qbM&8SC)w}&tM)>CLuw?Gf3~2*ttj!o- zP+27o39de#JdcRf7J#$fX?nHe!MuuZ#V?)i;VnEwy5Yq8ZGv~guR!$V5#66jtTnWu z%cV|3-;h(IHhZ}6Ke0+@l=gJ(A3elt;K`Ek`|`PGIiif=zs?(Zfg0E4J@niF0a z#?21TwZ<$$xUjdJv_r0dTrK;59eG^_B{M^)nH<`|KE2FF-zvr>pxEI7SxwOIu8w$~XAQpWvg zET5C;8xv`n!K)u`#Q+7bU6-vc7yT7cWDJA^$4nSUxTvWer*>?R z9vU|R4gfTgIDfiPK1c|5MT@cCNHgyUL+Fg%Hf}Z;oW<2%SIlThqhOglT|3(o8(}?2OX+EMHUPtDbM0sb_691?%mP~&C6POH{ zkyXw4C1kh>qw>|3!Y2~YqlcM5(7H~ z2NPomp%V^e1Wz;-4hj+YGw>C%30UR&%jBFDiW{jmV+?Z~2QsTZp=hQSaTNWzM7&X8 zPUpD5qIfBmMFY1Pwq_2wO?R9?<7%h3TxE+H7jJqy?wEyCiwqp3iGSG4gF&x~|LW)0 z`<_ZwX&l~FIIQ!wQE?4!K;QjF0oF$V9WaP|ZZ2v@1YzN&As;exdx_;Wdu3h*aB|>I zJb2o<82MIACzq#tW^07J6S-@bLb$+@o)K675{d+iY@n}-Q4Cq;B^jr0&5u3jP`dhK!cwaq87Dahi=toAqmVt-g@orR8cRtd(07b&_=vTE8HDP*f;&UYN5m?|8@2xcJXYjhCgjzN{agGkvieWS77`Lv09Wr zKym>DJll|tLwC0=*&mau*1$d)nX*TprEpfIAdc~v%?ajyk43N)3fC2vgaE;Mm3+J~ zk~Lr5en+Qplx(-}+0DfL8oVM#-Y5E)(i{(Nss+}>hc=3i2rkRcivNpe3sp0|h}{W` z{GhN~TQ2X7fI;}Zq%kk8CkgvJGPDK0(e#V7es|@O^jb=EZ3K}~|x+y$A;C$qW7KuyVcETYB%0p_x^#?NMDpjQz z3}qDS9b!3{PX(a-y#wD#r;s)G@sx+?!m)Z*WXH742lH4r?HtcSupoCSzRQLSuzBj+ z4^GZKKAMe2Zr=$(j5%E=+gn;-eHn8;JQ;qaqx<+nWASp{vl;Y(f$RW?=YaZ^hu-mG zZc)h2L$N%DZ7OWVsAGyUz)G6=g!g8325YqQ3_uWJLGHfs0?ko{!gE7Mk?|(97MT)q z+jz&ReyTe0)ky@Ge2hxs04W1Zh-qhVs9p78zr8^{OnQ_3O-cHCFrr7%FYE?=PIU|9 zphr&}(-RuSs^rMRDe|GIQ1z$0`r-gAjP`p>P&I71JM*aIS-=^WS~;Oo8K})6+eE4X z+gm_j;WW)M@I2dR@pQ~A#?P`!n(d}4`k+gqwuc5Q4mOwGu3pJSOP?M6^8GTva=7e^ zjDegFL>%LI|*8($^v);j}{VORMs+WcKVEYT!w{XK+ztv~@34XzMZ zh1Fi#sBwOldqn^?Bpt$1+)FQpVljI4w1iQ_?Snzmi8L}$D4Y@wfrcSyrN8fIH!W6C z9!n}#)T(6!-;62{FVx2aQr*2k$~2wI8mThg;$XG*YP-~1oY;R)k5SrrVp&mQX&(2e zTmC8v;7^`Yy~Rgrqmclfr`FES2Uyo$#)bsp@#E;3=#WIutw1WjA_L4%OIN93d8v#| zv{SyU2*!cGaTg)6KO9;EeuLE3k|q`+$##$XQTHAoxGgFZe8SS01k%nA@L-%yCO&Wn zXs$iXmcP^%jNTwrDpTG68@M6GSATlGl0)blJEGvDeHDmGSd}R38;Z|AAAZMaOPp5u3nPTPJEdj}D?C|bYYF(< zSJ5r2Gvq15?5JZ~dImk>v-GL`ek&yraNenVC7X!{yP+OV-$8!H!OTue1xvKgLQ#t$B8IcKanZZo-%I_pFxN}mx}z46GBUmQ(i zk~)$!JN~!K9O`Sjo>n`#6t~c^AOe#8tvTtsnkI;F5ygsMJ?*qH%5Z?Ro+q1WDEu26 zK*o30U7F@;5509Gie+r6Z@t{c7Jt%_HlVXo%U`Vb<}(04$(?q00~0v#v*@o zG^j2DaY}E`XZ9!ScO5DE>LZva6yJdwhsz>Jxa_%T2Y+KfsubTi2vlp&>{!KPd*0?L zOQ~FzL^veo0|9pTyu|4>VYK3ndH;#uVKnogx9!jRgY7T>9i&@-PAOn?^Gyy| zcx3DbJK-)B0`rf}3OqLT*#isQ*YgPMa*39ko&3ZR$K?VVwfF2`W0mOY1u^Hcf}Cr7 z=g^$%H$z=%`}FA@JyY9FmA4aBG=znJ&~G4wA?d1gg_R)BRy%uhG$-Zd{rz^sV^g9E zc1H4=Y0?tLtN&a3%_Ri^{gba*<<-lsf#CM@?t2@{6uKYc8fC1}1uwUDQ~`-578_Nw zsrdkDrKP^pidYPV<7rw4a>l|n_(CtJ2AIchCDz)T50gOl$A?GaFjuv-I9Hs(*@l4h z5jiPn)onG}8%mYdakwkE3{`F6oNy)=(2NlD+?z=nvgW+w&I{o$=vKHkj(sS_V2<^x z%Ow6Eol$88ZuU)Is0;aS;y}^^-*#{$NgHd$h+XbWb=yn?pA(bwPC`>>Qh7NHPdLZ| zd$R@w-8?v`h&<_c%UR~Udx6tyMfN@ACk^)EZDxEboQt(G=EGW%^@=EMpi=u-zPitt z*P^}m?d$L7Z?N~0>yQ)ZItK;-rlxkvt_}T{7QH2uHTd1Gf!vUrdtT{YouEd(!QOM) znt0@L6#QpF@A-EgaEy%DS|T-xH>35=2b=#d@vl;o|HrDx!Q zbW9@lpAxo#a_NU-8>x_6cYl%>K?HpGn2ALK$Ur8aGmRC7Q~&;F4oz|EoS4@K&1ZES z@EL5#BpuxE-`A`<0L{0ovO-BJ5DFmmQ`-w}9{ z@RDc{geU=z(~N;jS_~xxUn3ou&c{L9#5J=wdtuJRISy-u%fQU|%-|6l01Wb+4lROtu3^I0h4hd2*d0dU01oxTnT+}YLatcQ*1|<>pNXd#vq}8pcbh?gSIMHD+23j0+GOW^x zqVI>>ZCw|tTKXEThOd}%LA-tj7)w#)my0bu2muN2iK`kwg*o^tx6#xrvMr}cL)}%a zMQYWZv!{^NA*XHuJ5Wub?P_>C)s^}?TW4d>op*S-PJY4^&hOqhf8nU9(4LiI%v>H< zTk9vlAeD6gUr;d-M0>9M6H)KX=7uOvh6x9MDcFSX>C@bAI-#C~!{t0%xxO&BuU`WL z;L&+zXT!_*7Rnq5-n_~-p@?z>ZQ~S5O|OoLBb4KFB=(Of6ySE|;rE;!F4~oWhjWX= zq&(L|{v9`a&U-qb?axU0XtFV*>eBCXdAou3?urkReyXjxDSy7MLuxPQVdFnlt>aCj zPZjFRFa$h+)Dh_d%Hpd=Pq^i}%pY%CXA?pCl;jt1OU3K!;_h-SxCZ zJZZ$PM@JG>4@1A}_CXt=Uy~z2^o*eubf=IPIS$nvOP;_Z6|c}P z!#0x6Ydox1_Qe4iT{K1uOto}s8Jf08#!o+Ru3cjjSL0eJ^Vd(=@OR5BR#@Rw`htNT zUGL0Iu0N(bM_p1o&;@~zQ7XKsJe4hymIb5sYIR6gc>nifz0Do#rv)Sxoa%ScliHYXNSxBI604x<{eW|1DL$V{(7>;MH@{Z1>=>e_OXdYg&`6T=11o5ws`?NL zefRKH0HzGT>j`vkHPm(MAi9tqUnahW3Ihxws~aULPH{`^C0QuYD-1?14#bxVtoZ0% znnP0T&ngVCJA&k7JVx~o<##j2ZAlFoKOrOU_Zf>)F%I%?Yi95hKTxzffCQF=ykd!# zu(Nh4SNd2wV}1o*>eo%Ye=}T2&W6gOI4Bxf@;bSm_>CD+yZSY>o*n=3nt5su3iu$_ z=t(WMtovRu?B$p;*l#Lzfzr2Lp#`mRc0D*C>PvJ&E!MEF!~S7>i?&<-;~ZfCLSiH7 zfl$DKO+G|;`eF93`EG7tKMx=0P9R{5kiX`(4UYj9PlHJ0H-oU%i_sLg^nd;0SkgpP z+l#D~d!p8(q6rT|{r1lz-}R>`fuy^3Awm(THwnu0%pvnk29!pGxqO(fbMvpMrV$5Y z=41o4S*df0-56c^y2Qd49oo2+2vBp*DtPWNrGIo$7GPH?T9!9wa6xtEQYHDa2%NZ| z2t`%RLUFm7JxbV_rf%k5mBR|68qfi6CUacPf%FK7n2lOVx>GP_bjxNtv8-$ZfoWsa zD&I=FAXZ7rK;#OU*}EUxw_C!kZcLv5ml4bt0OCM&UdH$m=li=^q81VX_p;1gg@sl@ z%P~~2n2lNR;)N!XeCTXD?EL>hGz2t{(K`9CNdC<(%H*4yx!m1>-xtT$v`Jmw54_20 zcBub|9r=TmS81I_*K2Xu7|-5--tdu!v7FmS0atDAoG$&SmsvoS6l|;1gBR~(IcGI< zL9Xw~jX@FdGqluTa$HjJlz{W?mr9}EiX7MjW5`e9jZc=^2fjU)Hm)KdO7C|R zby79_)Z_~QThrH9Cr1lORjxPubjdI_xkhBJQ7WUu15|srYv$l-)s)Sa^TKPwA_p2( zknLFF=^61S%0hDQN$Hy5p< z&lf#QJ%k0NGI(}Z{=b5(bF|Ap4Ta=b0$!hkupXImTnOu*U9u8yK+CTvGQ=0 z5#Q(_KR&Y?pr6ILD*^GL0|oGir7a+b%4sg^DhWOK{3DS}&crm4o~W70taZ1OU{4N0 zt=w#z#0GF4xEJ0yHJoGHA_3ZW5(Wn@*o9x6cX&(F1Fs0U$BRb_m}HIh{Sb0k37I87 z9<5@}<@6#u&q#B%uAEr)NJU-dC&NX#5g7TQ$GgNNV5v*JqyT!LVq2}VR&BM?V3!g> zpjs)*rX@_c2Q?*-nL_j2(pzrGDAzIq9;TzzBP*E;+(M*Oc%@uSeggOqU0us#DRE$s zr*x+Mk*=>ZcE^|$_km{uFq{g%29g*k!PYVL{+W{=yF+@x8?tEcumhFb(2`6cX(As3 zo`^CmTx1vKK0us%rLo?|gax)UXFq@OG($#1zJ^=_W_9iec7V4}o5I3RqqQzkSL%8% zsD}A_Iy8~ya%(*kE;l2u#c$@KMVAa(u;0kapne^a_dLq(=HhF=(o!?NT+3;c+k7RL%eW|AYsSltkkvecvF#Wq%sM`CYy z3*%(9Il2Tvv)G?Z;HG)3JXm41dVRL~`6|aB;_W&{wmaO&;A|YNImU!{;jZ`eFeo%o zZATA&;>ibgxN&{3I%(pRWSq09tlRx0ovrmi8g|1>tveM3#HSmXF;BhtB0$DAT`kSd z>G<4^ZFjAn&mX`vaI30rY#Xno0|2%wTtv*yc~%E;ROD|)OaV^W%A^?t3vfYOg@C%3 zxr|IMbl{cSpk$Q|#i!Sa?@Ct+^mX-xT)av5J2h z)1P8Y<0bN{$!b2!eX;cL%tMsrpx`vinnsP%&@x^wKtAz*6ah(0;ZgU-r3DL)wAV>Q z+(^ow?UmOyX&Q90C_m}jy0N>LG{A=dVHAr!b)thx)1 zEXy8uk3*GQGD$HmVT{2oU*cs;7ex)sT^dAeD`|7OBS?|=mz3+^Zm~mVh;N>8N?Sj0 zn}Z2RGGQ0{wok&2cKr3HJ2J5mf4~|PCDo=phs#CI+p?$ftwEsl= zc`Gq1mg|;F4&xb0t3`Fid6_;N2+H6z0Nn<{Dh)XwW%!G10>%oU5M~rApq3X>X+b3x zXta*=1cV{+a@1>n{4sA6g3X%BX|IwWIo#DFeHTvL8Ho|v8PE<*-ubA@Sx^y0A|3C5=tBE#+QOD;Zcm4^PZD} zEh)p&XIL-o*NVMuT11bVQVykC{)rNr-m*svS_1{uwntMmLq&s@QyRHmgk1(#3hB~{ zq7TBB*>kl2bq1vyGqwZ@GvB_z6@cH<8tL3R^b*#~AHAPW@EfT<}xCaLw;OGgd$gM!`Rr zn{Hqda3&rY8NdsIo^$EJm{Sj4@QPP*Cp*L?%!FQv4-7&l|6~WNp7^VF7#iRyvxg^# zX}f54Ou?dK;9>d<9;#B6X&psDL5K#sC|`?Qesor7kL@ z)u?Lgz%EB-N9bHZzEbMC`I?qKpBm&Z+z#j<#B}i0QxouYOpS&78>!8)jlXtP-ve)4 zKN9Ujdm~8|T?N~%kX@DfK}R-{HKz`5p3?#N*}yB&YRHCt*|3)2ZS~Fp=^}x5vHy!9 z;vT1n(k0N|xUWv;7+79B^!o9NwBnl??Xt5pr|S@PF>XqQ`cz^O0kMh8bCRk31$>|C z1>6VOF@E0|6FBWXR$O5s4<%c!M3PGW3A}fzVQzkd#HT_^~U&>cf&J1s*_*ZOw6xEK{{dGdOz7WKbmk=&F3vT zWW;dcF>uN)p2;o!bRh3(3)O6=SIrp``7#jsh3NM?I?dmB5`=3-G7hMqEY4gukAv!-0|&zthTER45_kGKVXSR71|WAI%X?6q$9uE|u8| z@?qS=EpyKQqu`~A?$tH0A`ap0*7p@X(pg*x{bCxiGgEV+Y`N0R4jOBGrT@5d?i7myeimlKVn- z2Z3{JkDe@%^4fCHZp!Ro#(msdkyn_WG=sgjS-JbV-by2mnD^}6AHP4pYkYUr1C&|^ z*SzZtc{s=qYLATn&jDp6j`D~cLVvbCc_2!iMWwd|6CuKR4oQSrW#_d9fR8SpMw(AZ zc#LVSv=$(a9RO#VL1?M!;};|>DSg}oIRQT@l8!-FT&2Q<-&k60Rb$$fGLe83 zFOoc7o?124L#cK=)|c$aN_W%4YHE}US23F>2EgT#s>y-e7mOJ-YII*N zSv^Gw8|QZ)3vcE#>CKtPv%KoYWWESL#-v208U1cAr`J*)eM>*Y0?ww{fxHeoCh8_P z*vWurRDcf`I}6rgp9Gv^jrcPq@sac*uAqG82ez#86Wdw=#QnLVc|K!|D|afNTW8W>Q+5_RiL@k;Tv=s z?dYncR*Mdl?qYn+g9eh0mk~e~%l*b`4Huu3AH7Ooe7^2PfA#kxoMmVnQj$hnxlJxH zBQT+Gp!j;L&D1Vxd8ubO@+k7x+5&!j)x0%LXdle*>#sNW2k7(}9eGFHI+S4L&t+pB z4uBm{*MGM{ovAO_P+v(HMe#!enJs z^ShhA05LFk{rYXVJia)FWO}CbeooI;lZ%abzZG>$Ga8#?D5e}xF%7;%RaHtJn#g-- z=*iVSIjcefTd$B26A=eTh5|#QTV5-?NY!$*P%7*d=(9yhG4-;>yb-0)g@3lrDmCDu z1UvZLQ3-2?y2j9l+6fUX$Y9X@@$M0RNSPJ1>+uo$!&j0_iQl#gpRkDC%Mb_(>F7qw zqz56&e`2+@F%~B{z8j9mC?FCWqYV2kV0S-8ud%9|%r=UE@Q zJovM*H0~;U07lW~-^&vYJc%~aJ#)e@Yxku3n;PCZ18aR{s)%~8WeXbLm7X=6-{Ma( zOt$kZ*=JRXk)&*7e-NZeUIB2b-VP>q%EV%=i`UkopJ-nnCHTFdrIq73kc0AE{>|C* zr9XcQ(LWCqjEu9P^CN$CBJ>9RlL93R=Y<=;a@@hE2xcyQS(*wJA)y1%7WMu zPbZ?Ny2vNS5Qij1n1KasV-__gpngY822i?uWu)?`GKvk41W;>bjO{ux)fJ1`5+710 zn&V+rc7_eKS)b*v%t7_o90ntGvWa0UcRR)JRbi9$3fTp^$f~@dr5|<(C!q{t*~nPY z-h%KPNS&<=pYIDKhz5vWMyy~~Oy6gRpA>H|GoMa#3ML(+ESe6OgtW{#bt|xC-$*=Ue`!M?=HC#u z)*Ih=4j1a;HdH99KbkijU6nL(22DMGD-$`U&zt9e9VMgw?T49S;UhH5=Heh z^w`DA%9&MAG6Xw!Z!azNFE2?nnebzTjtPYTrTKq|%yiXNL;I7uStzSm)ZjDmd8ACV zjfGs8H68Pp1J+h>F8nzFMR$bKo(&k|?g`6hl>}`3YBD?NOgnc+>mgAgtp=S05_R1G z4hrcAf@)Zlm1eet6U~N(2OqGdJ-;0O-goaL-`Am~Izf__W5LPy8@%hDMR7pj(!n0z z?%(WigD<1w@?HR$wgDHy9hKO@s$}}Cc~Z}ieXW5qIMs@L133DAb3>*M;M&P9iqiMl zPB>yDQs~OB8BXXeisb64hI~t4b4B%DoDRs=MWCt@XYJSN>iE61p6@lDEec`LXTFZ6TzJbnv+?UK0bnNdv&9^G&J5uabNb5uJ3Kio zD6GZ?U6lieqAFw)AmX5j|OJYS2yVMBmD0UqdG9IhY z`=ye-qgb)1Y>~92?;Q$CW4I3T!t>Mh+xJo2b%l!ec~ajVzC$_uIz&Yi;9wtqBg$E0 z<{s}Vum3V%p9856PR7ET*Qb5sU@fH#FOI-*^VrDP%d-H&-(fLqScAEb`vF(}%x6S} zOi%4d8%!Cuy)m`4M?JoA|5beSHm1vK>&MV@iUZzXQpGqr zKI(DlngRVj{4FpGJQm|@6AnUarS#osKD-(ju_$7fsW#bCIaC{uzQ&fvT^>!l%TC7f z8+)QbzYmZ9v&p%Y8P!l}-LB)%9(pyO}Nnp&4AMw{maEr281a8JCsL+xRcem5~CX}F(@9R??k zfYn{)0uVmcCozACBb!0tQvN=27@;`M5?<8{4;Y6tmM0}Z=rW;&n16#Z)S9W3i%3DR zW5Gp-uBguJ3-7`6yjI*UVh>>tZI|K;HpQR1*<;q;$uPac=p-RQgV#NIOEMQN@)bUv zJtC(_je;*9S?{U3apgxi!~aZQ7(H7qYrkPK6#hDt5L6f`sw8 z`AOGVUDpj(f`gwGFtAgMZrUpmUa_mq8hLTGI8oH`f+LAolpdI1aTX@|MYn4b>Izb` zf{uT_8nw>`rmb-CHVXprcjrcTRjc^}?#qZLID#64i9>4HQDrkNk|>q#RQ49Ktg>A@ zoiTz{a$kFF8mhf0&VwwB-wy=vOZ9~3vW7F_LE5wjyN=;sTz8+T)o zkN}k{N>1Seh(*xfIzabyTPKN&0A1# za&olAIb6%lq&J71xE?_;e6Am#dRszClLXw^rMdd5;=;ndg8IN88WG0y;Kn%wk9a(+ z)$>^XM1l8P!}n)xnhP$eC5ruh#xF=R!dEDQuG-7`!TcJ;qEI-Yf2EJS#`t)Y1#OVm zOgjeQU~WbU+|>)2NU62&E*||VfCKL?0r4{kV`5`<0jMfoBMAkwY-X~g1KKvWk3Ylm z$H9TfjTq>U|eIg%OsT__jwW~XomQ%}-dO$PtJ^Zq*x`0M3scoZMe#w17GRjqw~WZCTQ-e!q7v4PI3;mWxO!5`2108XRFC7zfeR+1jaS zJ1SKCtvGjpaBB=j&j)irlZh=He@oo^#>0`wd<;_cC3;LrxezMBqwI7&p2c4)c>7B~ zVP2$zS@I)o5W-k%SGy-jpQ4ccy%S9@oA!Isdf>stbi-R}P~2a+kiTrfvL!>PU=ji0 zXy=(`QY-1?5cIlW9)|70nWnsN&ic*sKSK+2RjMZf1URR{0W)eNWMC-`DrDc z$?n^yfv2@0g>62Ys)dm{`F|nX>k5PXW()6e-zXgbf$Z!3NhYUdSK%OKkj#``)F#*h zphPDlX{S+0G`c3{`M&mS87-6^xKU>NZgM)7n9Ldt<4*4HReQSc@_hf2N?Wqx{boST?lrmX zGpApkZMsVUZt~Fj3<+T zTl|PrIe@TuYYngAuUC&K+p5v>Y`v|L1g~s9?&S=Jec+=E5d!F%&+U&9vaXOMbXW$2 zuS0NF-Ib;1W`wI#=j}o_#{-f1M6ITX6=Yo@fY`yG*2TiTd6I()#dhUZpwz!um{1tW z$G%->=g_u=39QhG?-JPt+7jRQv~E}xF)M{Cky&6NEKleN*wq!ldNSk2%nm5LDgNF- z@XwDs-Y3#yFzlCLGID;iSjz5|_TSx2`c6H3*Evi8?{qkCBJCb#44|SZoJEkH(1kNB zhgf&Q>;GW9%N-LHX{J$PAzwy`dHi4!pgE?Yq#eJ%xn;;IVrP+y2_A`xxu;ym>MO$7 z$oAwjDRl2kK$9jsmy z9XZDV5P7BeXM`(A|H)oy?}gTm7PYPl%4ToN4qaQQJZUvhGX&Z}8_e))F=l%f)_f%0 z>qPthLtmYX4pKQ z94#qBvBu~d!RXzW^LSou#{dM~wEzx~q@)tce2-s4lHk>Fx$N!~f#xG{L==(Ll}oxv zyXQj|QTD6K4~=Vf>MO1Uau@{${jU``^hjDN>Q!MDcmB_A-r4t4Q@`kNy{wU&kGe8F zSdVdp)vw0_Kze-)=3#riI#*RA@WnxHCuIQ5h7imrX+*v1C0B{>Sw9SC0LSp+QC%``R>?p)Hy7Ze&GN#ogf|9nh9Da&5)S@04pUe0@QpcFNagsW zEcTjhiR{~dj&41DUoHwSiolvDzQ7>*gDiCLtB3oYRPH-$7{-GO3C*1=D?VFAN_HB0 zm3O^CBF#=|sUiUUdn;@x?jxxzd7x#5Csk9yw=0p^2be1qUPo&ZG3k(^mH(-5kd65& zCfjr*AT3~xZh+7M(GlBimnz>u;q@~v&WT|lMqw# z)_~P9f+{FHI^#;>GXgqk;3Dua@SeZaXL@aBM)|li;@k++MKCEfE&tR*Rs|8yf|$O@ zDt1aH1O;e^P-jDikZ6hsusw^lcIkKzmq9LHxy{Jc1Yirm0Kba{y0J{dvXkgsxW35} zedXN>EjA(s5YrkQfzmHM|4&*n*9Vp0Brhdc$^x%7nHkrlgZctFhiqNo!sTn;B2ZT# zTlTqgO$Q0?!$&$ZWudA@Gn`A2<$Qa*HY$;+^EiXFPBi-%PXn@D4AZj z%V{9mnVibL(4-vJ(du5WkyHnlaWe%9bFF~pIWPS`mY%?q&iDbtTeyuxF`v4SLh9O@ z>5+uQ^u!T*04}=|WPs;X+J2mj8(K|-=p!EB4+i|&fvPeVgB~%BGSoQ)nXWyu*eKiq zI&$>5)e7|HSA8-X1P`UQS$YX4cV$U@dc5Mo+-{xjrOq*H#71M1YwpPPoL-?hb7?x0 zSPW$MxfCFPh1>?oaM43E<&W*OeYdjjvrBG-uipcd5srL?bxnly1s&m_LWqm}$JGSM z8A439Au5;+^WvSE-u3=Q#q;Z_rdcrn{fsIx^z|Wrc?(S!mNFGiniE)P+fYi4n*G)f zD?u>^ccY}_Q;H70(EYC2ZOveCsG$gYIsE<7ckg@{q{v5RmRTT&KWnQTFQ&6GXpXE! z)LeXV6L4`6cJ>xBVd`>%z&J8i%QkQg*Igl-cFl%#aegzwg@{=%`g8V(ih395VJ87j zzn_FqAfk-lw3kU9{DJY5ClY{LZD@IprMM;M0IoD1q3<_$l0I?DeTkKXCYKE+r!f9< zw2hY%Gm?m7`yPXerU9nyY4su>2`sApr50;-MK6C0RmIL^5R0GW!gX z?27FBmWi1$WoDQeJ7voj$`(S1M6}o?TUmN(NLjLEOK+jbPS)=X_5IcR{(jf>d#>xb z&T~HJzVCDH&;2>)zvp>q@8oY9mq;jHV+yy$=D5E|QXb+|NHmKjohE<1iCB-H&zenkxG_G;AN^%KU%$o|ffwWSemYzXft9Mn-k50F=6e zXG8LB9?IB@K^Lj6=Chv@UloPT7uOct>Zg=dUT0)9chXNWiahhyz+Xa-mDlrY;a4%i zg02s&TORijdZjL!_#1^A^~pS?Zz2kx4Ga`-2bCRk7wNKY4Q;kP5n%nrNUe^bM#8Yy z7~lCC>}fd@h%0|~{^fUOUAEeuxJe~-7>FBG@3Y+_Yn)dRGF3sU2-hqWP(R$f~H8Bhyf-FpF3uO?S18i z#l?w%m$So{I4YAri5t80bQHdOCBOV;(|{TxnCA1W)?m7#Y#_)F6yLA(^8V8zr=mEy zBJhz#pte7Pkbxl}*L~p15 z-G>946DgVUx6YO4)*xmvaV0^@S61!kGJ(>57WN4zfCQuW%WB&;b!O6My=KclNceR= zgd*2l=Mu<3MRA#}(D+BiA*?_C{^IMd97c>89e1%S7V9-(WZ>gUtbn>3>d5MhtG$XX zPW2TVyFb-Z#mZqaZ1n7^Ull#;g-3{G zriL%HmsX0PucHC z!`y9et$T~t3wl{M$ibpUA*IoIWm#dCTWJRaOZgQ@R3J{HZ_MHhhX!HCl^|xA&VJz& z(J!?xyrED#4B7v^W7YuoWP2^1RayH^%W}l}9YS5hH7 zL^ttvk0eV@q&%BRAkrhfXunq;doXa$$l5lc=sSfl7C$!;$Y<2jvuE9DpQ8tRU#`X9X5PrJW0 zo1jL-gfH3gcvTtc95TUO%P-E<$HY9IGY0p=`H$m$rJpu@*q+LXZuimdj@GvQOs($Z zUe1Zco@{0^m%A2Y7r!iOtCwel`YK{0kPfOERb}(5p0f{-bxLt5+-~416Mbcf6nMl% z@jDkKd9kax@pZRzZPvaENkOUd7S-1uUfT8|@(Lcw6j{2_z_k{j+2+vRBVQ4)@#r>g zsE1TDi2c~cGrcgre55nfCQ9+0w1&~-$NVjxgW@BfFp273#m>ihqEyHf*IsAoG|o9t zC$RQXlVPf9!E?^Uan2wH0hYtnqNw#LyW4Tk%?GDi%!W0@tHds!IL{khSJ7>kIT?|z z0w}0{Nv(TSW6we5J;xauYrwjcPtw&@F-`7~mB_o7?P%|=GARDBzX zpZn&f@4p>P$w+#}TolRVe_(k(YhU{5qe=aKtf6geoN0%=bU)qTIVEpqk(JHX^}f7c zyoxD8*<+L1+oocZkVDGQn_W51An9uq_P)s)?)vHJ&B(TIn;EGC+__5$18@=QLA!bm zg0Lr&9lEryq3D9eBdxQ7vX)#=!7U3wmQ?47m~qKNn}Z_LB17Kt3xt#3R?ny3`8b$H zJ#D6L7NZNBwe8e3kW4!*pe35U`PC95Bl@;~DN3QlB_CV02K@FxP@i?l_jUP6)7kBu z!J9`5YjICAZwq(Y<%>U&nQv55(BZO-l^Sq|bv@;BJn+ooblXgnVBc7b8l{9v@(1do_6P)n<+REc_YDd2Rr=UsJrFTa|vUpAV4gF3E2w0sRx5p<; zC8sC*CUnT?=Y~wz7&2WpkqfqM_ln%;-L__N_GUJ$HY&h_q)-=rYM$3p}Y=DOUy$N$8 zUzt=Fsbk+;tF0Uctpo&!9Yu*~&2@<#4|+yaIk>4~EGb6vY=XBc*aoA})C~P>AGmSYR2dudz}3t9lC7|2x&I3vA85xfXKkRN^rqLkl5%79YH%F)HvcF4+_FsnYo8T36O86tca>3Woah z8yrh`%K299J>1k5UpI!X-b!_4M-yDg6cQSRr#}%dU1moDfBgHxvb}g?1*opxMuyVgDK0C&MA(_2niYkM`*-@uT%$Bj4gxYiJ;8o zJDrNV_dS?D!KZQ4>T5zR1HQL*WX)XjOCpr3>NRwt3ki|5V41?o8IrA?7K>k<$U4lR zHyYaNG<*a;nqrx^GP}jiHHrBvp=N)qQrC+NP!*yr7I`w%8KQ_{>8F5f3}v_;m~(&HC~Za z?(piQyTbD(smqDL!&%pTu~Pmk&2^L1^OTWX?f1>n8N5PMO0@sRkH`2d8;vaVUQ@4K z#~$60coV&|@Yi^;jIqJe-J6@es$l}EYRXtcoL$7totHQ7=C5tAT-C`s_3!fRa{jYE z5D@fNbwHrs%46>9j;57@NT6t;$<8F4JB3Kvs}c<|MB!SSj$ zfkHRg?b2tb_vy~NfF{aa4~@gRQUEaEo`)F)jkg3K|EHt(t3zKtO;uGv+j07Y}61p!Cj zP5}A_G^KBIW}n!HPdKF)={HaOkoXSFnmo-`IfzXt&S#-t|g44}ybAnlC8 zk%$100#X49kand|+?5p-cQGspG2L=Nxf&S$SL&9k15C8xG diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index 1671b3b2502..c820e6307f4 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -38,8 +38,6 @@ else: raise # Give up. -from lbann.viz import getGraphFromModel - def _add_to_module_namespace(stuff): """Add stuff to the module namespace. @@ -558,15 +556,6 @@ def save_proto(self, filename): """Export model to prototext file.""" save_prototext(filename, model=self.export_proto()) - def render(self, filename, format="pdf", **kwargs): - """ - Save a vizualized graph of the network to `filename`.`format`. - This function passes `kwargs` to `lbann.viz.getGraphFromModel`. - """ - g = getGraphFromModel(self, format=format, - **kwargs) - g.render(filename) - # ============================================== # Export models # ============================================== diff --git a/scripts/proto/lbann/viz.py b/scripts/proto/lbann/viz.py new file mode 100644 index 00000000000..6ac5e9f938d --- /dev/null +++ b/scripts/proto/lbann/viz.py @@ -0,0 +1,85 @@ +"""Visualize LBANN models.""" +from re import sub +from enum import Enum +from graphviz import Digraph +from lbann.proto import lbann_pb2, Model + +def visualize_layer_graph(model, filename, + file_format='pdf', + label_format='name only', + graphviz_engine='dot'): + """Visualize a model's layer graph and save to file. + + Args: + model (`lbann_pb2.Model` or `lbann.proto.Model`): Neural network + model. + filename (`str`): Output file. + file_format (`str`): Output file format. + label_format (`str`): Displayed layer information (options: + type-only, name-only, type-and-name, full). + graphviz_engine (`str`): Graphviz visualization scheme. + + """ + + # Get protobuf message + if isinstance(model, lbann_pb2.Model): + proto = model + elif isinstance(model, Model): + proto = model.export_proto() + else: + raise TypeError('expected `model` to be an ' + '`lbann_pb2.Model` or a `lbann.proto.Model`') + + # Strip extension from filename + if filename.endswith('.' + file_format): + filename = filename[:-len(file_format)-1] + + # Convert label format to lowercase with no spaces + label_format = sub(r' |-|_', '', label_format.lower()) + + # Construct graphviz graph + graph = Digraph(filename=filename, format=file_format, engine=graphviz_engine) + graph.attr('node', shape='rect') + + # Construct nodes in layer graph + layer_types = (set(lbann_pb2.Layer.DESCRIPTOR.fields_by_name.keys()) + - set(['name', 'parents', 'children', + 'data_layout', 'device_allocation', 'weights', + 'num_neurons_from_data_reader', 'freeze', + 'hint_layer', 'weights_data', + 'top', 'bottom', 'type', 'motif_layer'])) + for l in proto.layer: + + # Determine layer type + for type in layer_types: + if l.HasField(type): + break + + # Construct node label + label = '' + if label_format == 'nameonly': + label = l.name + elif label_format == 'typeonly': + label = type + elif label_format == 'typeandname': + label = '<{0}
{1}>'.format(type, l.name) + elif label_format == 'full': + label = '<' + for (index, line) in enumerate(str(l).strip().split('\n')): + if index > 0: + label += '
' + label += line + label += '>' + + # Add layer as layer graph node + graph.node(l.name, label=label) + + # Add parent/child relationships as layer graph edges + edges = set() + for l in proto.layer: + edges.update([(p, l.name) for p in l.parents.split()]) + edges.update([(l.name, c) for c in l.children.split()]) + graph.edges(edges) + + # Save to file + graph.render(filename=filename, cleanup=True, format=file_format) diff --git a/scripts/proto/lbann/viz/__init__.py b/scripts/proto/lbann/viz/__init__.py deleted file mode 100644 index 5151e313160..00000000000 --- a/scripts/proto/lbann/viz/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python3 - -from sys import * -import os -import os.path -import pprint -from graphviz import Digraph - -from lbann.viz.layer import Layer -from lbann.viz.properties import Properties - -def fixSequentialParents(layers) : - '''a hack for models that don't contain parent and children fields''' - num_layers_with_parents = 0 - num_layers_with_children = 0 - for layer in layers : - if len(layer.parents()) != 0 : num_layers_with_parents += 1 - if len(layer.children()) != 0 : num_layers_with_children += 1 - if num_layers_with_parents == 0 : - print() - print('NOTE: this model does not appear to have any parent fields;') - print(' dealing with that ...') - print() - assert(num_layers_with_children == 0) - for j in range(1, len(layers)) : - layers[j].setParents(layers[j-1]) - -#WARNING: this works for tim's rnn prototext, but may not generalize -def getLinkedLayers(layers) : - r = [] - w = {} - for layer in layers : - my_name = layer.name() - links = layer.linkedLayers() - for x in links : - if my_name == x : - w[my_name] = set([my_name]) - for layer in layers : - links = layer.linkedLayers() - my_name = layer.name() - for x in links : - if my_name != x : - if my_name in w : - w[my_name].add(x) - elif x in w : - w[x].add(my_name) - else : - print('error') - exit(9) - - for x in list(w.keys()) : - if len(w[x]) > 1 : - r.append(w[x]) - return r - -def getGraphFromModel(model, **kwargs): - """ - Create a `graphviz.Digraph` object that represents `model`. - This function passes `kwargs` to `lbann.viz.getGraphFromPrototext`. - """ - - return getGraphFromPrototext(model.export_proto()) - -def getGraphFromPrototext(proto, format="pdf", - props=None, full=False, brief=False, - ranksep=0): - """ - Create a `graphviz.Digraph` object from `proto`. - The `format` argument is used as an extension when the resulting - graph is rendered. - """ - - if props is None: - props = Properties( - os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "properties", - "properties.txt")) - - layers = [Layer(str(l).strip().split("\n")) for l in proto.layer] - - fixSequentialParents(layers) - - #get list of linked layer sets - linked = getLinkedLayers(layers) - - #build a couple of maps - edges = {} - name_to_type = {} - attributes = {} - for layer in layers : - name = layer.name() - parents = layer.parents() - - #children = layer.children() - attributes[name] = layer.attributes() - type = layer.type() - name_to_type[name] = type - for p in parents : - if p not in edges : - edges[p] = set() - edges[p].add(name) - - #write the dot file - g = Digraph(format=format) - g.attr("graph", ranksep=str(ranksep)) - - for parent in edges.keys(): - type = name_to_type[parent] - label = '' - if brief: - label = '<' + type + '' - else : - label = '<' + type + '
name: ' + parent - if full : - attr = attributes[parent] - if len(attr) : - label += '
' - for x in attr : - label += x + '
' - - label += ' >' - - g.node( - parent, - label=label, - shape=props.shape(type), - style="filled", - fillcolor=props.color(type)) - - #write edges - for parent in list(edges.keys()) : - type = name_to_type[parent] - for child in edges[parent] : - child_type = name_to_type[child] - if type == 'slice' : - g.edge(parent, child, - color="red", penwidth="2.0") - elif type == 'split' : - g.edge(parent, child, - color="darkorange", penwidth="2.0") - elif child_type == 'sum' : - g.edge(parent, child, - color="deepskyblue", penwidth="2.0") - else : - g.edge(parent, child) - - #alternatove to above: use subgraphs - #write linked layer subgraphs - for n, x in enumerate(linked): - with g.subgraph(name="cluster_"+str(n), style="dashed") as sg: - for node in x: - sg.node(node) - - return g diff --git a/scripts/proto/lbann/viz/layer.py b/scripts/proto/lbann/viz/layer.py deleted file mode 100644 index 17f50809991..00000000000 --- a/scripts/proto/lbann/viz/layer.py +++ /dev/null @@ -1,96 +0,0 @@ -import pprint - -class Layer : - def __init__(self, a) : - # Since the protobuf parser is somehow hard-corded in this script, - # __getLayer does not match to prototexts generated from lbann.proto. - # self._layer = self.__getLayer(a) - self._layer = a - - self._parents = [] - self._children = [] - self._linked_layers = [] - self._attr = [] - - for line in self._layer : - if line.find('name:') != -1 : - t = line.split() - self._name = t[1][1:-1] - for line in self._layer : - if line.find('{') != -1 : - t = line.split() - self._type = t[0] - j = self._type.find('{') - if j != -1 : - self._type = self._type[:j] - for line in self._layer : - if line.find('parents:') != -1 : - t = line.replace('"', '') - t = t.split() - self._parents = t[1:] - for line in self._layer : - if line.find('children:') != -1 : - t = line.replace('"', '') - t = t.split() - self._children = t[1:] - for line in self._layer : - if line.find('linked_layers:') != -1 : - t = line.replace('"', '') - t = t.split() - self._linked_layers = t[1:] - start = 0 - end = 0 - for j in range(1, len(self._layer)) : - if self._layer[j].find('{') != -1 : - start = j+1 - if self._layer[j].find('}') != -1 : - end = j - break - self._attr = [] - for a in self._layer[start:end] : - b = a.strip() - if b.find('weight_initialization') != -1 : - b = b.replace('weight_initialization', 'weight_init') - self._attr.append(b) - ''' - if self.name().find('sum') != -1 : - self.printme() - print 'parents:', self.parents() - #exit(0) - ''' - - def __getLayer(self, a) : - '''for internal use''' - r = [] - n = 0 - for j in range(len(a)) : - r.append(a[j][:-1]) - if a[j].find('}') != -1 : - n += 1 - if n == 2 : - break - return r - - def name(self) : - return self._name - - def setParents(self, layer) : - self._parents = [layer.name()] - - def parents(self) : - return self._parents - - def children(self) : - return self._children - - def type(self) : - return self._type - - def linkedLayers(self) : - return self._linked_layers - - def attributes(self) : - return self._attr - - def printme(self) : - pprint.pprint(self._layer) diff --git a/scripts/proto/lbann/viz/properties/__init__.py b/scripts/proto/lbann/viz/properties/__init__.py deleted file mode 100644 index d9d65691391..00000000000 --- a/scripts/proto/lbann/viz/properties/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -import pprint - -class Properties : - def __init__(self, fn) : - a = open(fn).readlines() - shapes = {} - colors = {} - arrows = {} - self._layers = {} - for j in range(len(a)) : - if a[j].find('shapes_and_colors') != -1 : - k = j+1 - while len(a[k]) > 3 : - t = a[k].split() - shapes[t[0]] = t[1] - colors[t[0]] = t[2] - arrows[t[0]] = t[3] - k += 1 - for j in range(len(a)) : - if a[j].find('layer_names_and_overrides') != -1 : - k = j+1 - while k < len(a) and len(a[k]) > 3 : - t = a[k].split() - layer_type = t[0] - layer_name = t[1] - self._layers[layer_name] = [shapes[layer_type], colors[layer_type], arrows[layer_type]] - if len(t) > 2 : - for i in t[2:] : - i = i.strip() - t2 = i.split('=') - if t2[0] == 'shape' : self._layers[layer_name][0] = t2[1] - if t2[0] == 'color' : self._layers[layer_name][1] = t2[1] - if t2[0] == 'arrow' : self._layers[layer_name][2] = t2[1] - k += 1 - - def shape(self, name) : - if name not in self._layers : - return 'rect' - ''' - print 'shape(): Nothing known about this layer:', name - print 'Please check your properties file' - print - exit(0) - ''' - return self._layers[name][0] - - def color(self, name) : - if name not in self._layers : - return 'grey' - ''' - print 'color(): Nothing known about this layer:', name - print 'Please check your properties file' - print - exit(0) - ''' - return self._layers[name][1] - - def arrow(self, name) : - if name not in self._layers : - return 'grey' - ''' - print 'arrow(): Nothing known about this layer:', name - print 'Please check your properties file' - print - exit(0) - ''' - return self._layers[name][2] diff --git a/scripts/proto/lbann/viz/properties/properties.txt b/scripts/proto/lbann/viz/properties/properties.txt deleted file mode 100644 index 809e986997c..00000000000 --- a/scripts/proto/lbann/viz/properties/properties.txt +++ /dev/null @@ -1,62 +0,0 @@ -# -# shapes: https://graphviz.gitlab.io/_pages/doc/info/shapes.html -# color names: http://www.graphviz.org/doc/info/colors.html -# -# I found this tutorial useful: http://tonyballantyne.com/graphs.html - -# default shapes, colors, and arrows -# -shapes_and_colors black -learning doubleoctagon chartreuse black -activations house orchid1 black -io house sienna2 black -regularizers rect cornflowerblue black -transform rect antiquewhite1 red - -#leave at least one blank line after each section! -#white space separating fields on each line is arbitrary - -layer_names_and_overrides -learning convolution shape=tripleoctagon color=chartreuse3 -learning fully_connected -learning deconvolution shape=octogon -io input -activations elu -activations identity -activations leaky_relu -activations log_sigmoid -activations log_softmax -activations relu -activations selu -activations sigmoid -activations softmax -activations softplus -activations softsign -regularizers batch_normalization -regularizers local_response_normalization -regularizers dropout -regularizers selu_dropout -transform reshape -transform pooling -transform concatenation -transform slice -transform split arrow=red -transform sum -transform unpooling -transform hadamard -transform zero -transform transform -transform constant -transform reduction -transform evaluation -transform gaussian -transform bernoulli -transform uniform -transform zero -transform noise -transform crop -transform categorical_random -transform discrete_random -transform stop_gradient -transform max -transform min diff --git a/scripts/proto/lbann/viz/properties/properties_rect.txt b/scripts/proto/lbann/viz/properties/properties_rect.txt deleted file mode 100644 index 3f16a27322a..00000000000 --- a/scripts/proto/lbann/viz/properties/properties_rect.txt +++ /dev/null @@ -1,49 +0,0 @@ -# -# shapes: http://www.graphviz.org/content/node-shapes -# color names: http://www.graphviz.org/doc/info/colors.html -# -# I found this tutorial useful: http://tonyballantyne.com/graphs.html - -# default shapes, colors, and arrows -# -shapes_and_colors -learning rectangle chartreuse black -activations rectangle orchid1 black -io rectangle sienna2 black -regularizers rectangle cornflowerblue black -transform rectangle antiquewhite1 black - -#leave at least one blank line after each section! -#white space separating fields on each line is arbitrary - -layer_names_and_overrides -learning convolution shape=tripleoctagon color=chartreuse3 -learning fully_connected -learning deconvolution shape=octogon -learning fully_connected_cuda -io input -activations softmax -activations elu -activations identity -activations leaky_relu -activations relu -activations sigmoid -activations smooth_relu -activations softplus -activations selu -activations tanh -regularizers batch_normalization -regularizers local_response_normalization -regularizers dropout -regularizers selu_dropout -transform reshape -transform pooling -transform concatenation -transform slice -transform split -transform sum -transform unpooling -transform hadamard -transform gaussian -transform bernoulli -transform uniform diff --git a/scripts/proto/scripts/viz.py b/scripts/proto/scripts/viz.py new file mode 100755 index 00000000000..06f4fa4ab72 --- /dev/null +++ b/scripts/proto/scripts/viz.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +import argparse +import google.protobuf.text_format as txtf +from lbann.proto import lbann_pb2 +import lbann.viz as lz + +# Parse command-line arguments +parser = argparse.ArgumentParser( + description='Visualize layer graph for LBANN model.') +parser.add_argument( + 'input', action='store', type=str, + help='model prototext file') +parser.add_argument( + 'output', action='store', nargs='?', + default='graph.pdf', type=str, + help='output file (default: graph.pdf)') +parser.add_argument( + '--file-format', action='store', default='pdf', type=str, + help='output file format (default: pdf)', metavar='FORMAT') +parser.add_argument( + '--label-format', + action='store', default='type-only', type=str, + choices=('type-only', 'name-only', 'type-and-name', 'full'), + help='displayed layer info (default: type-only)') +parser.add_argument( + '--graphviz-engine', action='store', default='dot', type=str, + help='Graphviz visualization scheme (default: dot)', metavar='ENGINE') +args = parser.parse_args() + +# Parse prototext file +proto = lbann_pb2.LbannPB() +with open(args.input, 'r') as f: + txtf.Merge(f.read(), proto) + +# Visualize +lz.visualize_layer_graph(proto.model, args.output, + file_format=args.file_format, + label_format=args.label_format, + graphviz_engine=args.graphviz_engine) diff --git a/scripts/proto/scripts/viz/lbviz b/scripts/proto/scripts/viz/lbviz deleted file mode 100755 index db987591263..00000000000 --- a/scripts/proto/scripts/viz/lbviz +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -from sys import * -import os -import pprint -from graphviz import Digraph -import google.protobuf.text_format as txtf - -from lbann.proto import lbann_pb2 -from lbann.viz import getGraphFromPrototext -from lbann.viz.properties import Properties - -usage = ''' -usage: %s model_fn.prototext [output=] [format=] [prop=] [full=1] [brief=1] [ranksep=] - -where: "output" is the output file basename; default is "graph" - - "format" refers to the output file; default is pdf, so the default - output file is "graph.pdf" You can find a list of other formats at: - http://www.graphviz.org/content/output-formats or just try your - favorite (gif, png, jpg, etc) -- it's probably supported! - Note: some formats may take a while to render, so be patient. - - "prop" is the name of the properties file; default is "properties.txt" - in the same directory of lbviz. - The properties file is a simple text file that lists colors and - shapes for the various layer types - - if "full=1" is present, all layer attributes are printed (e.g, - num_neurons, has_bias, etc). The default is to print only the - layer type and layer name - - if "brief=1", only the nodes' layer types are printed - - use "ranksep= to increase of decrease the verticle distance - between nodes. Hint: start with "ranksep=.75" and adjust up or down - from there - -note: the ordering of the optional params doesn't matter - -note: in addition to the output file, an intermediate file called - 'graph.dot' will be written -''' % argv[0] - -#===================================================================== - -if len(argv) < 2 : - print(usage) - exit(9) - -#parse cmd line -output_fn = "graph" -output_format = "pdf" -prop_fn = None -full = False -brief = False -ranksep=0 -for j in range(2, len(argv)) : - t = argv[j].split('=') - if t[0] == 'output' : - output_fn = t[1] - elif t[0] == 'format' : - output_format = t[1] - elif t[0] == 'prop' : - prop_fn = t[1] - elif t[0] == 'full' : - full = True - elif t[0] == 'brief' : - brief = True - elif t[0] == 'ranksep' : - ranksep = float(t[1]) - else : - print('badly formed or unknown cmd line option:', argv[j]) - print('================================================================') - print() - print(usage) - exit(9) - -#load properties database -props = Properties(prop_fn) if prop_fn is not None else None - -with open(argv[1], "r") as f: - s = f.read().strip() - -pb = lbann_pb2.LbannPB() -txtf.Merge(s, pb) - -print(pb.model) - -g = getGraphFromPrototext( - pb.model, - output_format, - props, - full, - brief, - ranksep) - -g.render("out") From 16f010faf6dadff8552eaef2675d0fc21b6e71b4 Mon Sep 17 00:00:00 2001 From: "Brian C. Van Essen" Date: Tue, 5 Mar 2019 18:37:16 -0800 Subject: [PATCH 151/443] Fixed a bug in the copy function of the partitioned_io_buffer. Also added a proper destructor. --- .../io/data_buffers/partitioned_io_buffer.hpp | 4 ++-- src/io/data_buffers/partitioned_io_buffer.cpp | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/include/lbann/io/data_buffers/partitioned_io_buffer.hpp b/include/lbann/io/data_buffers/partitioned_io_buffer.hpp index e3fcdad1bf2..95b992673e3 100644 --- a/include/lbann/io/data_buffers/partitioned_io_buffer.hpp +++ b/include/lbann/io/data_buffers/partitioned_io_buffer.hpp @@ -85,8 +85,8 @@ class partitioned_io_buffer : public generic_io_buffer { partitioned_io_buffer(lbann_comm *comm, int num_parallel_readers, std::map data_readers, int num_child_layers); partitioned_io_buffer(const partitioned_io_buffer& other); partitioned_io_buffer& operator=(const partitioned_io_buffer& other); - ~partitioned_io_buffer() = default; - partitioned_io_buffer* copy() const override { return new partitioned_io_buffer(*this); } + ~partitioned_io_buffer(); + partitioned_io_buffer* copy() const override; std::string get_type() const override { return "partitioned"; } diff --git a/src/io/data_buffers/partitioned_io_buffer.cpp b/src/io/data_buffers/partitioned_io_buffer.cpp index ba362df402b..fb8556b7ea0 100644 --- a/src/io/data_buffers/partitioned_io_buffer.cpp +++ b/src/io/data_buffers/partitioned_io_buffer.cpp @@ -34,11 +34,25 @@ lbann::partitioned_io_buffer::partitioned_io_buffer(lbann_comm *comm, int num_pa m_data_buffers[execution_mode::testing] = new data_buffer(comm, num_child_layers); } +lbann::partitioned_io_buffer::~partitioned_io_buffer() { + for (auto& buf : m_data_buffers) { + delete buf.second; + } +} + lbann::partitioned_io_buffer::partitioned_io_buffer(const lbann::partitioned_io_buffer& other) : generic_io_buffer(other) { - for (auto& buf : m_data_buffers) { - buf.second = buf.second->copy(); + for (const auto& buf : other.m_data_buffers) { + m_data_buffers[buf.first] = buf.second->copy(); + } +} + +lbann::partitioned_io_buffer* lbann::partitioned_io_buffer::copy() const { + auto new_io_buffer = new partitioned_io_buffer(*this); + for (const auto& buf : m_data_buffers) { + new_io_buffer->m_data_buffers[buf.first] = buf.second->copy(); } + return new_io_buffer; } lbann::partitioned_io_buffer& lbann::partitioned_io_buffer::operator=(const lbann::partitioned_io_buffer& other) { From 5f7c59bee9a9f2a1068bdef5e3903c8c9668d706 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Wed, 6 Mar 2019 08:53:59 -0800 Subject: [PATCH 152/443] modified to be more complete. Compiles, but not yet tested. --- model_zoo/jag_utils/detect_corruption.cpp | 163 +++++++++++++--------- 1 file changed, 96 insertions(+), 67 deletions(-) diff --git a/model_zoo/jag_utils/detect_corruption.cpp b/model_zoo/jag_utils/detect_corruption.cpp index 9751bf5fc71..752c6e38e71 100644 --- a/model_zoo/jag_utils/detect_corruption.cpp +++ b/model_zoo/jag_utils/detect_corruption.cpp @@ -38,12 +38,16 @@ #include #include #include "lbann/lbann.hpp" +#include "lbann/utils/jag_utils.hpp" #include using namespace lbann; void get_input_names(std::unordered_set &s); void get_scalar_names(std::unordered_set &s); +void get_image_names(std::unordered_set &s); +void print_errs(world_comm_ptr &comm, int np, int rank, std::ostringstream &s, const char *msg); + //========================================================================== int main(int argc, char *argv[]) { int random_seed = lbann_default_random_seed; @@ -59,66 +63,63 @@ int main(int argc, char *argv[]) { // sanity check invocation if (!opts->has_string("filelist")) { if (master) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: usage: " + argv[0] + " --filelist="); + throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: usage: " + argv[0] + " --filelist= \nwhere: 'filelist' is a file that contains the fully qualified filenames of the conduit *'bundle' files that are to be inspected.\nfunction: attemptsto detect and report currupt files and/or samples within those files."); } } - // master reads the filelist and bcasts to others - std::vector files; - std::string f; - int size; - if (master) { - std::stringstream s; - std::ifstream in(opts->get_string("filelist").c_str()); - if (!in) { - throw lbann_exception(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: failed to open " + opts->get_string("filelist") + " for reading"); - } - std::string line; - while (getline(in, line)) { - if (line.size()) { - s << line << " "; - //files.push_back(line); - } - } - in.close(); - f = s.str(); - size = s.str().size(); - std::cout << "size: " << size << "\n"; - } - comm->world_broadcast(0, &size, 1); - f.resize(size); - comm->world_broadcast(0, &f[0], size); - - // unpack the filenames into a vector - std::stringstream s2(f); - std::string filename; - while (s2 >> filename) { - if (filename.size()) { - files.push_back(filename); - } - } - if (rank == 1) std::cerr << "num files: " << files.size() << "\n"; + const std::string fn = opts->get_string("filelist"); + std::vector filenames; + read_filelist(comm.get(), fn, filenames); + if (master) std::cerr << "num files: " << filenames.size() << "\n"; std::unordered_set input_names; std::unordered_set scalar_names; + std::unordered_set image_names; get_input_names(input_names); get_scalar_names(scalar_names); + get_image_names(image_names); + if (master) { + std::cout << "\nchecking the following inputs: "; + for (auto t : input_names) std::cout << t << " "; + std::cout << "\n"; + std::cout << "\nchecking the following scalars: "; + for (auto t : scalar_names) std::cout << t << " "; + std::cout << "\n"; + std::cout << "\nchecking the following images: "; + for (auto t : image_names) std::cout << t << " "; + std::cout << "\n"; + } + + //================================================================ // detect corruption! + + //these error conditions ar liste in the order in which they're + //tested. Upon failure, we call continue," i.e, no further tests + //are cunducted + std::ostringstream open_err; //failed to open file + std::ostringstream children_err; //failed to read child names + std::ostringstream success_flag_err; //failed to read success flag + + std::ostringstream sample_err; //catch all for errors in reading inputs, + //scalars, and images hid_t hdf5_file_hnd; std::string key; conduit::Node n_ok; conduit::Node tmp; size_t h = 0; - for (size_t j=rank; j(tmp.value()); } } catch (...) { - std::cerr << rank << " :: " << "exception reading an input for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; + //std::cerr << rank << " :: " << "exception reading an input for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; + success_flag_err << filenames[j] << "\n"; + sample_err << filenames[j] << " " << cnames[i] << "\n"; continue; } @@ -157,39 +161,46 @@ int main(int argc, char *argv[]) { for (auto t : scalar_names) { key = cnames[i] + "/outputs/scalars/" + t; conduit::relay::io::hdf5_read(hdf5_file_hnd, key, tmp); + total += static_cast(tmp.value()); } } catch (...) { - std::cerr << rank << " :: " << "exception reading an scalar for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; + //std::cerr << rank << " :: " << "exception reading an scalar for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; + sample_err << filenames[j] << " " << cnames[i] << "\n"; continue; } try { - key = cnames[i] + "/outputs/images/(0.0, 0.0)//0.0/emi"; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, tmp); - } catch (...) { - std::cerr << rank << " :: " << "exception reading image: (0.0, 0.0) for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; - continue; - } - - try { - key = cnames[i] + "/outputs/images/(90.0, 0.0)//0.0/emi"; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, tmp); - } catch (...) { - std::cerr << rank << " :: " << "exception reading image: (90.0, 0.0) for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; - continue; - } - - - try { - key = cnames[i] + "/outputs/images/(90.0, 78.0)//0.0/emi"; - conduit::relay::io::hdf5_read(hdf5_file_hnd, key, tmp); + for (auto t : image_names) { + key = cnames[i] + "/outputs/images/" + t + "/0.0/emi"; + conduit::relay::io::hdf5_read(hdf5_file_hnd, key, tmp); + conduit::float32_array emi = tmp.value(); + const size_t image_size = emi.number_of_elements(); + for (size_t k=0; kglobal_barrier(); + if (master) { + std::cout << "totals; this is a sanity check; please IGNORE!!\n"; + } + std::cout << rank << " :: " << total << "\n"; + + // print erros, if any + print_errs(comm, np, rank, open_err, "failed to open these files:"); + print_errs(comm, np, rank, children_err, "failed to read children from these files:"); + print_errs(comm, np, rank, success_flag_err, "failed to read success flag for these samples:"); + print_errs(comm, np, rank, sample_err, "failed to read input or scalars or images for these samples:"); + } catch (exception const &e) { El::ReportException(e); return EXIT_FAILURE; @@ -234,4 +245,22 @@ void get_scalar_names(std::unordered_set &s) { s.insert("tMINradius"); s.insert("MINradius"); } + +void get_image_names(std::unordered_set &s) { + s.insert("(0.0, 0.0)"); + s.insert("(90.0, 0.0)"); + s.insert("90.0, 78.0)"); +} + +void print_errs(world_comm_ptr &comm, int np, int rank, std::ostringstream &s, const char *msg) { + comm->global_barrier(); + if (rank == 0) { std::cout << "\n" << msg << "\n"; } + for (int i=0; iglobal_barrier(); + if (rank == i) { + std::cout << s.str(); + } + } + comm->global_barrier(); +} #endif //#ifdef LBANN_HAS_CONDUIT From 6dbec3aa8cc4d7562600cf94c6bd1244277dd683 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 6 Mar 2019 11:21:11 -0800 Subject: [PATCH 153/443] fix ternary operator --- include/lbann/io/data_buffers/partitioned_io_buffer.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/lbann/io/data_buffers/partitioned_io_buffer.hpp b/include/lbann/io/data_buffers/partitioned_io_buffer.hpp index 95b992673e3..a4a6619d769 100644 --- a/include/lbann/io/data_buffers/partitioned_io_buffer.hpp +++ b/include/lbann/io/data_buffers/partitioned_io_buffer.hpp @@ -59,7 +59,7 @@ class data_buffer { m_input_buffers.clear(); m_input_buffers.reserve(other.m_input_buffers.size()); for (const auto& ptr : other.m_input_buffers) { - m_input_buffers.emplace_back(ptr ? nullptr : ptr->Copy()); + m_input_buffers.emplace_back(ptr ? ptr->Copy() : nullptr); } } data_buffer& operator=(const data_buffer& other) { @@ -68,7 +68,7 @@ class data_buffer { m_input_buffers.clear(); m_input_buffers.reserve(other.m_input_buffers.size()); for (const auto& ptr : other.m_input_buffers) { - m_input_buffers.emplace_back(ptr ? nullptr : ptr->Copy()); + m_input_buffers.emplace_back(ptr ? ptr->Copy() : nullptr); } return *this; } From 98a3d2d0c21dae52350ba3a6dada9a2cf68ef512 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 6 Mar 2019 11:25:38 -0800 Subject: [PATCH 154/443] remove redundant loop --- src/io/data_buffers/partitioned_io_buffer.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/io/data_buffers/partitioned_io_buffer.cpp b/src/io/data_buffers/partitioned_io_buffer.cpp index fb8556b7ea0..6e248a1ac1d 100644 --- a/src/io/data_buffers/partitioned_io_buffer.cpp +++ b/src/io/data_buffers/partitioned_io_buffer.cpp @@ -48,11 +48,7 @@ lbann::partitioned_io_buffer::partitioned_io_buffer(const lbann::partitioned_io_ } lbann::partitioned_io_buffer* lbann::partitioned_io_buffer::copy() const { - auto new_io_buffer = new partitioned_io_buffer(*this); - for (const auto& buf : m_data_buffers) { - new_io_buffer->m_data_buffers[buf.first] = buf.second->copy(); - } - return new_io_buffer; + return new partitioned_io_buffer(*this); } lbann::partitioned_io_buffer& lbann::partitioned_io_buffer::operator=(const lbann::partitioned_io_buffer& other) { From f1830981201129d37f98dfc7ad01a855a3c4b6b3 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 6 Mar 2019 11:27:22 -0800 Subject: [PATCH 155/443] clean up memory before overwriting in operator= --- src/io/data_buffers/partitioned_io_buffer.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/io/data_buffers/partitioned_io_buffer.cpp b/src/io/data_buffers/partitioned_io_buffer.cpp index 6e248a1ac1d..c7b4538a090 100644 --- a/src/io/data_buffers/partitioned_io_buffer.cpp +++ b/src/io/data_buffers/partitioned_io_buffer.cpp @@ -54,6 +54,7 @@ lbann::partitioned_io_buffer* lbann::partitioned_io_buffer::copy() const { lbann::partitioned_io_buffer& lbann::partitioned_io_buffer::operator=(const lbann::partitioned_io_buffer& other) { generic_io_buffer::operator=(other); for (auto& buf : m_data_buffers) { + if (buf.second) delete buf.second; buf.second = buf.second->copy(); } return *this; From 06ae009d81479c80d96a47b32056171a38571c68 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Wed, 6 Mar 2019 12:08:17 -0800 Subject: [PATCH 156/443] Updating Python package setup script with changes to lbviz. Review suggestions from @oyamay. --- scripts/proto/scripts/{viz.py => lbviz} | 0 scripts/proto/setup.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename scripts/proto/scripts/{viz.py => lbviz} (100%) diff --git a/scripts/proto/scripts/viz.py b/scripts/proto/scripts/lbviz similarity index 100% rename from scripts/proto/scripts/viz.py rename to scripts/proto/scripts/lbviz diff --git a/scripts/proto/setup.py b/scripts/proto/setup.py index e9f638539dc..ed2c39d03cd 100755 --- a/scripts/proto/setup.py +++ b/scripts/proto/setup.py @@ -25,7 +25,7 @@ def getLBANNVersion(): license="Apache 2.0", packages=["lbann"], scripts=["scripts/plot/lbplot", - "scripts/viz/lbviz"], + "scripts/lbviz"], install_requires=["protobuf>=3.6.1", "onnx>=1.3.0", "numpy>=1.16.0", From 54cd017e2a4bf40652aeef71cbdfdc65cf2cf3c2 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Wed, 6 Mar 2019 14:31:46 -0800 Subject: [PATCH 157/443] add batch interval at test time callback, default is still zero. --- src/models/model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/model.cpp b/src/models/model.cpp index 3214d847874..cf9fd892edc 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -1256,7 +1256,7 @@ void model::do_batch_begin_cbs(execution_mode mode) { break; case execution_mode::validation: case execution_mode::testing: - if (get_cur_step() % cb->get_batch_interval() == 0) { + if (get_step() % cb->get_batch_interval() == 0) { cb->on_batch_evaluate_begin(this); } break; From 3893330ac39371f4c650a72b51a605d21c257b44 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Thu, 7 Mar 2019 01:34:07 -0800 Subject: [PATCH 158/443] Add minmax to metric reported for multiple trainers --- src/callbacks/callback_print.cpp | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/callbacks/callback_print.cpp b/src/callbacks/callback_print.cpp index 0295eb12b81..7c39d0f3b29 100644 --- a/src/callbacks/callback_print.cpp +++ b/src/callbacks/callback_print.cpp @@ -151,7 +151,7 @@ void lbann_callback_print::report_results(model *m) { } if (comm->am_trainer_master()) { - const int num_models = comm->get_num_trainers(); + const int num_trainers = comm->get_num_trainers(); // Report objective function value const EvalType obj_fn = m->get_objective_function()->get_mean_value(mode); @@ -161,12 +161,12 @@ void lbann_callback_print::report_results(model *m) { std::vector num_samples_list(comm->get_num_trainers()); comm->intertrainer_gather(obj_fn, obj_fn_list); comm->intertrainer_gather(obj_fn_samples, num_samples_list); - for (int i = 0; i < num_models; ++i) { + for (int i = 0; i < num_trainers; ++i) { std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " << "objective function : " << obj_fn_list[i] << std::endl; } - if (num_models > 1) { + if (num_trainers > 1) { const EvalType avg_obj_fn = (std::inner_product(num_samples_list.begin(), num_samples_list.end(), obj_fn_list.begin(), @@ -192,13 +192,14 @@ void lbann_callback_print::report_results(model *m) { std::vector num_samples_list(comm->get_num_trainers()); comm->intertrainer_gather(score, score_list); comm->intertrainer_gather(score_samples, num_samples_list); - for (int i = 0; i < num_models; ++i) { + for (int i = 0; i < num_trainers; ++i) { std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " << met->name() << " : " << score_list[i] << met->get_unit() << std::endl; } - if (num_models > 1) { + if (num_trainers > 1) { + const EvalType min_score = *std::min_element(begin(score_list), end(score_list)); const EvalType avg_score = (std::inner_product(num_samples_list.begin(), num_samples_list.end(), score_list.begin(), @@ -206,10 +207,19 @@ void lbann_callback_print::report_results(model *m) { / std::accumulate(num_samples_list.begin(), num_samples_list.end(), 0)); - std::cout << m->get_name() << " (global) " << mode_string << " " + const EvalType max_score = *std::max_element(begin(score_list), end(score_list)); + std::cout << m->get_name() << " (global min) " << mode_string << " " + << met->name() << " : " + << min_score << met->get_unit() + << std::endl; + std::cout << m->get_name() << " (global mean) " << mode_string << " " << met->name() << " : " << avg_score << met->get_unit() << std::endl; + std::cout << m->get_name() << " (global max) " << mode_string << " " + << met->name() << " : " + << max_score << met->get_unit() + << std::endl; } } else { comm->intertrainer_gather(score, comm->get_intertrainer_master()); From ccb15ddaedcf70fae04bd045a49a786b624db4e9 Mon Sep 17 00:00:00 2001 From: "David A. Hysom" Date: Thu, 7 Mar 2019 09:14:34 -0800 Subject: [PATCH 159/443] a few tweaks for better formatting, etc, of results --- model_zoo/jag_utils/detect_corruption.cpp | 54 +++++++++++------------ 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/model_zoo/jag_utils/detect_corruption.cpp b/model_zoo/jag_utils/detect_corruption.cpp index 752c6e38e71..f7ff73607fb 100644 --- a/model_zoo/jag_utils/detect_corruption.cpp +++ b/model_zoo/jag_utils/detect_corruption.cpp @@ -70,7 +70,6 @@ int main(int argc, char *argv[]) { const std::string fn = opts->get_string("filelist"); std::vector filenames; read_filelist(comm.get(), fn, filenames); - if (master) std::cerr << "num files: " << filenames.size() << "\n"; std::unordered_set input_names; std::unordered_set scalar_names; @@ -80,15 +79,15 @@ int main(int argc, char *argv[]) { get_image_names(image_names); if (master) { - std::cout << "\nchecking the following inputs: "; - for (auto t : input_names) std::cout << t << " "; - std::cout << "\n"; - std::cout << "\nchecking the following scalars: "; - for (auto t : scalar_names) std::cout << t << " "; - std::cout << "\n"; - std::cout << "\nchecking the following images: "; - for (auto t : image_names) std::cout << t << " "; - std::cout << "\n"; + std::cerr << "\nchecking the following inputs: \n"; + for (auto t : input_names) std::cerr << t << " "; + std::cerr << "\n"; + std::cerr << "\nchecking the following scalars: "; + for (auto t : scalar_names) std::cerr << t << " "; + std::cerr << "\n"; + std::cerr << "\nchecking the following images: "; + for (auto t : image_names) std::cerr << t << " "; + std::cerr << "\n\n"; } //================================================================ @@ -107,18 +106,17 @@ int main(int argc, char *argv[]) { std::string key; conduit::Node n_ok; conduit::Node tmp; - size_t h = 0; + int h = 0; // used to ensure all values are used double total = 0; for (size_t j=rank; j(tmp.value()); } } catch (...) { - //std::cerr << rank << " :: " << "exception reading an input for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; success_flag_err << filenames[j] << "\n"; sample_err << filenames[j] << " " << cnames[i] << "\n"; continue; @@ -164,7 +159,6 @@ int main(int argc, char *argv[]) { total += static_cast(tmp.value()); } } catch (...) { - //std::cerr << rank << " :: " << "exception reading an scalar for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; sample_err << filenames[j] << " " << cnames[i] << "\n"; continue; } @@ -180,7 +174,6 @@ int main(int argc, char *argv[]) { } } } catch (...) { - //std::cerr << rank << " :: " << "exception reading image: (0.0, 0.0) for sample: " << cnames[i] << " which is " << i << " of " << cnames[i] << "; "<< files[j] << "\n"; sample_err << filenames[j] << " " << cnames[i] << "\n"; continue; } @@ -188,18 +181,21 @@ int main(int argc, char *argv[]) { } } - // print out totals, to ensure a compiler doesn't discard unused values - comm->global_barrier(); if (master) { - std::cout << "totals; this is a sanity check; please IGNORE!!\n"; + int h2 = comm->reduce(h, comm->get_world_comm()); + double total2 = comm->reduce(total, comm->get_world_comm()); + std::cerr << "\nnum files processed: " << h2 << "\n" + << "sanity check - please ignore: " << total2 << "\n\n"; + } else { + comm->reduce(h, 0, comm->get_world_comm()); + comm->reduce(total, 0, comm->get_world_comm()); } - std::cout << rank << " :: " << total << "\n"; // print erros, if any - print_errs(comm, np, rank, open_err, "failed to open these files:"); - print_errs(comm, np, rank, children_err, "failed to read children from these files:"); - print_errs(comm, np, rank, success_flag_err, "failed to read success flag for these samples:"); - print_errs(comm, np, rank, sample_err, "failed to read input or scalars or images for these samples:"); + print_errs(comm, np, rank, open_err, "failed to open these files (if any):"); + print_errs(comm, np, rank, children_err, "failed to read children from these files (if any):"); + print_errs(comm, np, rank, success_flag_err, "failed to read success flag for these samples (if any):"); + print_errs(comm, np, rank, sample_err, "failed to read input or scalars or images for these samples (if any):"); } catch (exception const &e) { El::ReportException(e); @@ -249,16 +245,16 @@ void get_scalar_names(std::unordered_set &s) { void get_image_names(std::unordered_set &s) { s.insert("(0.0, 0.0)"); s.insert("(90.0, 0.0)"); - s.insert("90.0, 78.0)"); + s.insert("(90.0, 78.0)"); } void print_errs(world_comm_ptr &comm, int np, int rank, std::ostringstream &s, const char *msg) { comm->global_barrier(); - if (rank == 0) { std::cout << "\n" << msg << "\n"; } + if (rank == 0) { std::cerr << "\n" << msg << "\n"; } for (int i=0; iglobal_barrier(); if (rank == i) { - std::cout << s.str(); + std::cerr << s.str(); } } comm->global_barrier(); From e06b11751c17401533411ffae96d0ba0b6ba3512 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Thu, 7 Mar 2019 13:51:46 -0800 Subject: [PATCH 160/443] Making models for jag topology analysis accessible to all --- .../jag/wae_cycle_gan/cycle_gan.prototext | 1082 +++++++++++++++++ .../wae_cycle_gan/cycle_gan_only.prototext | 974 +++++++++++++++ .../models/jag/wae_cycle_gan/wae.prototext | 734 +++++++++++ .../jag/wae_cycle_gan/wae_fw_inv.prototext | 923 ++++++++++++++ .../jag/wae_cycle_gan/wae_nobn.prototext | 734 +++++++++++ 5 files changed, 4447 insertions(+) create mode 100644 model_zoo/models/jag/wae_cycle_gan/cycle_gan.prototext create mode 100644 model_zoo/models/jag/wae_cycle_gan/cycle_gan_only.prototext create mode 100644 model_zoo/models/jag/wae_cycle_gan/wae.prototext create mode 100644 model_zoo/models/jag/wae_cycle_gan/wae_fw_inv.prototext create mode 100644 model_zoo/models/jag/wae_cycle_gan/wae_nobn.prototext diff --git a/model_zoo/models/jag/wae_cycle_gan/cycle_gan.prototext b/model_zoo/models/jag/wae_cycle_gan/cycle_gan.prototext new file mode 100644 index 00000000000..87969aa711e --- /dev/null +++ b/model_zoo/models/jag/wae_cycle_gan/cycle_gan.prototext @@ -0,0 +1,1082 @@ +model { + name: "cycgan_model" + shareable_training_data_reader:false + serialize_background_io: true + procs_per_trainer:0 + objective_function { + l2_weight_regularization { + scale_factor: 0.0001 + } + layer_term { + scale_factor: 1.0 + layer: "disc1_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_fake_bce" + } + layer_term { + #scale_factor: 0.05 + scale_factor: 0.01 + layer: "g_adv1_bce" + } + layer_term { + #scale_factor: 0.025 + scale_factor: 1.0 + layer: "l_l2_y" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_inv_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_inv_fake_bce" + } + layer_term { + #scale_factor: 0.05 + scale_factor: 0.01 + layer: "g_inv_adv1_bce" + } + layer_term { + #scale_factor: 0.025 + scale_factor: 1.0 + layer: "l_l2_x" + } + layer_term { + scale_factor: 0.1 + layer: "L_cyc_x" + } + } + num_epochs: 40 + super_steps: 10 + metric { + layer_metric { + name: "fw_latent_loss" + layer: "l_l2_y" + } + } + metric { + layer_metric { + name: "inv_l1_loss" + layer: "l_l2_x" + } + } + metric { + layer_metric { + name: "X_cyclic_loss" + layer: "L_cyc_x" + } + } + data_layout: "data_parallel" + layer { + input { + io_buffer: "partitioned" + data_set_per_model: true + target_mode: "N/A" + } + name: "data" + data_layout: "data_parallel" + parents: " " + } + layer { + name: "zero" + data_layout: "data_parallel" + constant { + value: 0.0 + num_neurons: "1" + } + } + layer { + name: "one" + data_layout: "data_parallel" + constant { + value: 1.0 + num_neurons: "1" + } + } + layer { + name: "slice_data" + data_layout: "data_parallel" + parents: "data" + #children: "image_data_dummy param_data_id" + children: "image_data_id param_data_id" + slice { + #slice_points: "0 2500 2511" + get_slice_points_from_reader: "independent" + } + } + layer { + identity { + } + #name: "image_data_dummy" + name: "image_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + identity { + } + name: "param_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } +########Data space end here + ###Encoder from WAE + ######################### + layer { + fully_connected { + #num_neurons: 32 + num_neurons: 1024 + has_bias: true + } + name: "encodefc1" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc1linearity" + parents: "image_data_id" + } + layer { + elu { + } + name: "encodeleaky_relu1" + data_layout: "data_parallel" + parents: "encodefc1" + } + layer { + parents: "encodeleaky_relu1" + name: "encodefc1_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "encodefc2" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc2linearity" + parents: "encodefc1_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu2" + data_layout: "data_parallel" + parents: "encodefc2" + } + layer { + parents: "encodeleaky_relu2" + name: "encodefc2_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 32 + has_bias: true + } + name: "encodefc3" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc3linearity" + parents: "encodefc2_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu3" + data_layout: "data_parallel" + parents: "encodefc3" + } + layer { + parents: "encodeleaky_relu3" + name: "encodefc3_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + #gen output is latent dim + num_neurons: 20 + has_bias: true + } + #z_sample + name: "encodefc4" + data_layout: "data_parallel" + #weights: "encodefc4linearity" + freeze: true + parents: "encodefc3_bn" + } + ##################### + + layer { + parents: "encodefc4" + #name: "sample" + ###This is actually sample in latent space, call image_data_dummy for legacy + name: "image_data_dummy" + data_layout: "data_parallel" + identity {} + } + #####WAE Encoder ends here, sample feeds or replaces image data dummy + ###Generator starts here + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "gen1fc1" + data_layout: "data_parallel" + weights: "gen1fc1linearity gen1fc1bias" + parents: "param_data_id" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu1" + data_layout: "data_parallel" + parents: "gen1fc1" + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "gen1fc2" + data_layout: "data_parallel" + weights: "gen1fc2linearity gen1fc2bias" + parents: "gen1leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu2" + data_layout: "data_parallel" + parents: "gen1fc2" + } + #layer { + # dropout { + # keep_prob: 0.8 + # } + # name: "gen1dropout1" + # data_layout: "data_parallel" + # parents: "gen1leaky_relu2" + #} + layer { + fully_connected { + num_neurons: 2048 + has_bias: true + } + name: "gen1fc3" + data_layout: "data_parallel" + weights: "gen1fc3linearity gen1fc3bias" + #parents: "gen1dropout1" + parents: "gen1leaky_relu2" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu3" + data_layout: "data_parallel" + parents: "gen1fc3" + } + layer { + fully_connected { + #num_neurons: 2500 + #get_slice_points_from_reader: "independent" + #get_num_neurons_of_slice_from_reader: [ 1 ] + #replace image_dim with latent_dim + num_neurons: 20 + has_bias: true + } + name: "gen1fc4" + data_layout: "data_parallel" + weights: "gen1fc4linearity gen1fc4bias" + parents: "gen1leaky_relu3" + } + #concat latenty sample (image_data_dummy) and param + layer { + name: "concat_latent_sample_n_param" + data_layout: "data_parallel" + parents: "image_data_dummy param_data_id" + concatenation { + } + } + #####Discriminator + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1fc1_real" + data_layout: "data_parallel" + weights: "d1fc1linearity d1fc1bias" + #parents: "data" + parents: "concat_latent_sample_n_param" + } + layer { + leaky_relu { + } + name: "d1leaky_relu1_real" + data_layout: "data_parallel" + parents: "d1fc1_real" + } + layer { + parents: "d1leaky_relu1_real" + name: "d1fc1_real_bn" + weights: "d1fc1_w0 d1fc1_w1 d1fc1_w2 d1fc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1fc2_real" + data_layout: "data_parallel" + weights: "d1fc2linearity d1fc2bias" + #parents: "d1leaky_relu1_real" + parents: "d1fc1_real_bn" + } + layer { + leaky_relu { + } + name: "d1leaky_relu2_real" + data_layout: "data_parallel" + parents: "d1fc2_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1fc3_real" + data_layout: "data_parallel" + weights: "d1fc3linearity d1fc3bias" + parents: "d1leaky_relu2_real" + } + layer { + name: "concat_gsample_n_param" + data_layout: "data_parallel" + parents: "gen1fc4 param_data_id" + children: "d1_stop_gradient d2_dummy" + concatenation { + } + } + layer { + name: "d1_stop_gradient" + data_layout: "data_parallel" + parents: "concat_gsample_n_param" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1fc1_fake" + data_layout: "data_parallel" + weights: "d1fc1linearity d1fc1bias" + parents: "d1_stop_gradient" + } + layer { + leaky_relu { + } + name: "d1leaky_relu1_fake" + data_layout: "data_parallel" + parents: "d1fc1_fake" + } + layer { + parents: "d1leaky_relu1_fake" + name: "d1fc1_fake_bn" + weights: "d1fc1_w0 d1fc1_w1 d1fc1_w2 d1fc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1fc2_fake" + data_layout: "data_parallel" + weights: "d1fc2linearity d1fc2bias" + #parents: "d1leaky_relu1_fake" + parents: "d1fc1_fake_bn" + } + layer { + leaky_relu { + } + name: "d1leaky_relu2_fake" + data_layout: "data_parallel" + parents: "d1fc2_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1fc3_fake" + data_layout: "data_parallel" + weights: "d1fc3linearity d1fc3bias" + parents: "d1leaky_relu2_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_real_bce" + data_layout: "data_parallel" + parents: "d1fc3_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_fake_bce" + data_layout: "data_parallel" + parents: "d1fc3_fake zero" + } + layer { + identity { + } + name: "d2_dummy" + data_layout: "data_parallel" + parents: "concat_gsample_n_param" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d2fc1" + data_layout: "data_parallel" + parents: "d2_dummy" + } + layer { + leaky_relu { + } + name: "d2leaky_relu1" + data_layout: "data_parallel" + parents: "d2fc1" + } + layer { + parents: "d2leaky_relu1" + name: "d2fc1_bn" + freeze: true + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d2fc2" + data_layout: "data_parallel" + #parents: "d2leaky_relu1" + parents: "d2fc1_bn" + } + layer { + leaky_relu { + } + name: "d2leaky_relu2" + data_layout: "data_parallel" + parents: "d2fc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d2fc3" + data_layout: "data_parallel" + parents: "d2leaky_relu2" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_adv1_bce" + data_layout: "data_parallel" + parents: "d2fc3 one" + } + layer { + name: "gsample_minus_y" + data_layout: "data_parallel" + parents: "gen1fc4 image_data_dummy" + weighted_sum { + scaling_factors: "1 -1" + } + } + layer { + name: "l_l2_y" + data_layout: "data_parallel" + l2_norm2 { + } + parents: "gsample_minus_y" + } + layer { + fully_connected { + #num_neurons: 64 + num_neurons: 16 + has_bias: true + } + name: "gen2fc1" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "image_data_dummy" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1" + data_layout: "data_parallel" + parents: "gen2fc1" + } + layer { + fully_connected { + #num_neurons: 512 + num_neurons: 128 + has_bias: true + } + name: "gen2fc2" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2" + data_layout: "data_parallel" + parents: "gen2fc2" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen2fc3" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3" + data_layout: "data_parallel" + parents: "gen2fc3" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3" + } + layer { + name: "concat_param_n_img" + data_layout: "data_parallel" + parents: "param_data_id image_data_dummy" + concatenation { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1_invfc1_real" + data_layout: "data_parallel" + weights: "d1_invfc1linearity d1_invfc1bias" + parents: "concat_param_n_img" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu1_real" + data_layout: "data_parallel" + parents: "d1_invfc1_real" + } + layer { + parents: "d1_invleaky_relu1_real" + name: "d1invfc1_real_bn" + weights: "d1invfc1_w0 d1invfc1_w1 d1invfc1_w2 d1invfc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1_invfc2_real" + data_layout: "data_parallel" + weights: "d1_invfc2linearity d1_invfc2bias" + #parents: "d1_invleaky_relu1_real" + parents: "d1invfc1_real_bn" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu2_real" + data_layout: "data_parallel" + parents: "d1_invfc2_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1_invfc3_real" + data_layout: "data_parallel" + weights: "d1_invfc3linearity d1_invfc3bias" + parents: "d1_invleaky_relu2_real" + } + layer { + name: "concat_gsample2_n_img" + data_layout: "data_parallel" + parents: "gen2fc4 image_data_dummy" + children: "d1_inv_stop_gradient d2_inv_dummy" + concatenation { + } + } + layer { + name: "d1_inv_stop_gradient" + data_layout: "data_parallel" + parents: "concat_gsample2_n_img" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1_invfc1_fake" + data_layout: "data_parallel" + weights: "d1_invfc1linearity d1_invfc1bias" + parents: "d1_inv_stop_gradient" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu1_fake" + data_layout: "data_parallel" + parents: "d1_invfc1_fake" + } + layer { + parents: "d1_invleaky_relu1_fake" + name: "d1invfc1_fake_bn" + weights: "d1invfc1_w0 d1invfc1_w1 d1invfc1_w2 d1invfc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1_invfc2_fake" + data_layout: "data_parallel" + weights: "d1_invfc2linearity d1_invfc2bias" + #parents: "d1_invleaky_relu1_fake" + parents: "d1invfc1_fake_bn" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu2_fake" + data_layout: "data_parallel" + parents: "d1_invfc2_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1_invfc3_fake" + data_layout: "data_parallel" + weights: "d1_invfc3linearity d1_invfc3bias" + parents: "d1_invleaky_relu2_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_inv_real_bce" + data_layout: "data_parallel" + parents: "d1_invfc3_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_inv_fake_bce" + data_layout: "data_parallel" + parents: "d1_invfc3_fake zero" + } + layer { + identity { + } + name: "d2_inv_dummy" + data_layout: "data_parallel" + parents: "concat_gsample2_n_img" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d2_invfc1" + data_layout: "data_parallel" + parents: "d2_inv_dummy" + } + layer { + leaky_relu { + } + name: "d2_invleaky_relu1" + data_layout: "data_parallel" + parents: "d2_invfc1" + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d2_invfc2" + data_layout: "data_parallel" + parents: "d2_invleaky_relu1" + } + layer { + leaky_relu { + } + name: "d2_invleaky_relu2" + data_layout: "data_parallel" + parents: "d2_invfc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d2_invfc3" + data_layout: "data_parallel" + parents: "d2_invleaky_relu2" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_inv_adv1_bce" + data_layout: "data_parallel" + parents: "d2_invfc3 one" + } + #layer { + # name: "gsample2_minus_x" + # data_layout: "data_parallel" + # parents: "gen2fc4 param_data_id" + # weighted_sum { + # scaling_factors: "1 -1" + # } + #} + #@todo: replace with mean abs layer + layer { + name: "l_l2_x" + data_layout: "data_parallel" + #l2_norm2 { + #} + mean_absolute_error{ + } + #parents: "gsample2_minus_x" + parents: "gen2fc4 param_data_id" + } + weights { + name: "gen1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc1bias" + } + weights { + name: "gen1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc2bias" + } + weights { + name: "gen1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc3bias" + } + weights { + name: "gen1fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc4bias" + } + weights { + name: "d1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc1bias" + } + weights { + name: "d1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc2bias" + } + weights { + name: "d1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc3bias" + } + weights { + name: "gen2fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc1bias" + } + weights { + name: "gen2fc2bias" + } + weights { + name: "gen2fc3bias" + } + weights { + name: "gen2fc4bias" + } + weights { + name: "d1_invfc1linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc1bias" + } + weights { + name: "d1_invfc2linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc2bias" + } + weights { + name: "d1_invfc3linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc3bias" + } + ###Weights for batch norm + weights { + name: "d1fc1_w0" + } + weights { + name: "d1fc1_w1" + } + weights { + name: "d1fc1_w2" + } + weights { + name: "d1fc1_w3" + } + weights { + name: "d1invfc1_w0" + } + weights { + name: "d1invfc1_w1" + } + weights { + name: "d1invfc1_w2" + } + weights { + name: "d1invfc1_w3" + } + mini_batch_size: 128 + callback { + print { + interval: 10 + } + } + callback { + timer { + } + } + callback { gpu_memory_usage {} } + #callback { debug {} } + #callback { + # summary { + # dir: "." + # mat_interval: 25 + # } + #} + callback { + replace_weights { + source_layers: "d1fc1_real d1fc2_real d1fc3_real d1_invfc1_real d1_invfc2_real d1_invfc3_real d1fc1_real_bn" + destination_layers: "d2fc1 d2fc2 d2fc3 d2_invfc1 d2_invfc2 d2_invfc3 d2fc1_bn" + batch_interval: 1 + } + } + #callback { + # ltfb { + # round_size: 100 + # eval_metrics: "l_l2_y_eval" + # increasing_metric_mode: false + # weights_tosend: "gen1fc1linearity gen1fc1bias gen1fc2linearity gen1fc2bias gen1fc3linearity gen1fc3bias gen1fc4linearity gen1fc4bias gen2fc1_linearity_weights gen2fc1_bias_weights gen2fc2_linearity_weights gen2fc2_bias_weights gen2fc3_linearity_weights gen2fc3_bias_weights gen2fc4_linearity_weights gen2fc4_bias_weights" + + # } + # } + block_size: 256 + ####For metric, loss per individual sample + layer { + name: "fw_latent_loss" + data_layout: "data_parallel" + parents: "param_data_id encodefc4 l_l2_x l_l2_y L_cyc_x" + concatenation { + } + } + + callback { save_model { dir: "model" } } + ##########X cyclic loss, input to this path is Y_fake (gen1fc4) from fw model + #### Shares weight with path that takes real/encoder (latent) image + layer { + fully_connected { + #num_neurons: 64 + num_neurons: 16 + has_bias: true + } + name: "gen2fc1_cyclic" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "gen1fc4" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1_cyclic" + data_layout: "data_parallel" + parents: "gen2fc1_cyclic" + } + layer { + fully_connected { + #num_neurons: 512 + num_neurons: 128 + has_bias: true + } + name: "gen2fc2_cyclic" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2_cyclic" + data_layout: "data_parallel" + parents: "gen2fc2_cyclic" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen2fc3_cyclic" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2_cyclic" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3_cyclic" + data_layout: "data_parallel" + parents: "gen2fc3_cyclic" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4_cyclic" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3_cyclic" + } + layer { + name: "L_cyc_x" + data_layout: "data_parallel" + mean_absolute_error{ + } + parents: "gen2fc4_cyclic param_data_id" + } +} diff --git a/model_zoo/models/jag/wae_cycle_gan/cycle_gan_only.prototext b/model_zoo/models/jag/wae_cycle_gan/cycle_gan_only.prototext new file mode 100644 index 00000000000..9809f5b352c --- /dev/null +++ b/model_zoo/models/jag/wae_cycle_gan/cycle_gan_only.prototext @@ -0,0 +1,974 @@ +model { + name: "cycgan_model" + shareable_training_data_reader:false + serialize_background_io: true + procs_per_trainer:0 + objective_function { + l2_weight_regularization { + scale_factor: 0.0001 + } + layer_term { + scale_factor: 1.0 + layer: "disc1_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_fake_bce" + } + layer_term { + #scale_factor: 0.05 + scale_factor: 0.01 + layer: "g_adv1_bce" + } + layer_term { + #scale_factor: 0.025 + scale_factor: 1.0 + layer: "l_l2_y" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_inv_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_inv_fake_bce" + } + layer_term { + #scale_factor: 0.05 + scale_factor: 0.01 + layer: "g_inv_adv1_bce" + } + layer_term { + #scale_factor: 0.025 + scale_factor: 1.0 + layer: "l_l2_x" + } + layer_term { + scale_factor: 0.1 + layer: "L_cyc_x" + } + } + num_epochs: 40 + super_steps: 10 + metric { + layer_metric { + name: "fw_latent_loss" + layer: "l_l2_y" + } + } + metric { + layer_metric { + name: "inv_l1_loss" + layer: "l_l2_x" + } + } + metric { + layer_metric { + name: "X_cyclic_loss" + layer: "L_cyc_x" + } + } + data_layout: "data_parallel" + layer { + input { + io_buffer: "partitioned" + data_set_per_model: true + target_mode: "N/A" + } + name: "data" + data_layout: "data_parallel" + parents: " " + } + layer { + name: "zero" + data_layout: "data_parallel" + constant { + value: 0.0 + num_neurons: "1" + } + } + layer { + name: "one" + data_layout: "data_parallel" + constant { + value: 1.0 + num_neurons: "1" + } + } + layer { + name: "slice_data" + data_layout: "data_parallel" + parents: "data" + children: "image_data_dummy param_data_id" + #children: "image_data_id param_data_id" + slice { + #slice_points: "0 2500 2511" + get_slice_points_from_reader: "independent" + } + } + layer { + identity { + } + name: "image_data_dummy" + #name: "image_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + identity { + } + name: "param_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } +########Data space end here + ###Generator starts here + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "gen1fc1" + data_layout: "data_parallel" + weights: "gen1fc1linearity gen1fc1bias" + parents: "param_data_id" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu1" + data_layout: "data_parallel" + parents: "gen1fc1" + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "gen1fc2" + data_layout: "data_parallel" + weights: "gen1fc2linearity gen1fc2bias" + parents: "gen1leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu2" + data_layout: "data_parallel" + parents: "gen1fc2" + } + #layer { + # dropout { + # keep_prob: 0.8 + # } + # name: "gen1dropout1" + # data_layout: "data_parallel" + # parents: "gen1leaky_relu2" + #} + layer { + fully_connected { + num_neurons: 2048 + has_bias: true + } + name: "gen1fc3" + data_layout: "data_parallel" + weights: "gen1fc3linearity gen1fc3bias" + #parents: "gen1dropout1" + parents: "gen1leaky_relu2" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu3" + data_layout: "data_parallel" + parents: "gen1fc3" + } + layer { + fully_connected { + #num_neurons: 2500 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 1 ] + #replace image_dim with latent_dim + #num_neurons: 20 + has_bias: true + } + name: "gen1fc4" + data_layout: "data_parallel" + weights: "gen1fc4linearity gen1fc4bias" + parents: "gen1leaky_relu3" + } + #concat latenty sample (image_data_dummy) and param + layer { + name: "concat_latent_sample_n_param" + data_layout: "data_parallel" + parents: "image_data_dummy param_data_id" + concatenation { + } + } + #####Discriminator + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1fc1_real" + data_layout: "data_parallel" + weights: "d1fc1linearity d1fc1bias" + #parents: "data" + parents: "concat_latent_sample_n_param" + } + layer { + leaky_relu { + } + name: "d1leaky_relu1_real" + data_layout: "data_parallel" + parents: "d1fc1_real" + } + layer { + parents: "d1leaky_relu1_real" + name: "d1fc1_real_bn" + weights: "d1fc1_w0 d1fc1_w1 d1fc1_w2 d1fc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1fc2_real" + data_layout: "data_parallel" + weights: "d1fc2linearity d1fc2bias" + #parents: "d1leaky_relu1_real" + parents: "d1fc1_real_bn" + } + layer { + leaky_relu { + } + name: "d1leaky_relu2_real" + data_layout: "data_parallel" + parents: "d1fc2_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1fc3_real" + data_layout: "data_parallel" + weights: "d1fc3linearity d1fc3bias" + parents: "d1leaky_relu2_real" + } + layer { + name: "concat_gsample_n_param" + data_layout: "data_parallel" + parents: "gen1fc4 param_data_id" + children: "d1_stop_gradient d2_dummy" + concatenation { + } + } + layer { + name: "d1_stop_gradient" + data_layout: "data_parallel" + parents: "concat_gsample_n_param" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1fc1_fake" + data_layout: "data_parallel" + weights: "d1fc1linearity d1fc1bias" + parents: "d1_stop_gradient" + } + layer { + leaky_relu { + } + name: "d1leaky_relu1_fake" + data_layout: "data_parallel" + parents: "d1fc1_fake" + } + layer { + parents: "d1leaky_relu1_fake" + name: "d1fc1_fake_bn" + weights: "d1fc1_w0 d1fc1_w1 d1fc1_w2 d1fc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1fc2_fake" + data_layout: "data_parallel" + weights: "d1fc2linearity d1fc2bias" + #parents: "d1leaky_relu1_fake" + parents: "d1fc1_fake_bn" + } + layer { + leaky_relu { + } + name: "d1leaky_relu2_fake" + data_layout: "data_parallel" + parents: "d1fc2_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1fc3_fake" + data_layout: "data_parallel" + weights: "d1fc3linearity d1fc3bias" + parents: "d1leaky_relu2_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_real_bce" + data_layout: "data_parallel" + parents: "d1fc3_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_fake_bce" + data_layout: "data_parallel" + parents: "d1fc3_fake zero" + } + layer { + identity { + } + name: "d2_dummy" + data_layout: "data_parallel" + parents: "concat_gsample_n_param" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d2fc1" + data_layout: "data_parallel" + parents: "d2_dummy" + } + layer { + leaky_relu { + } + name: "d2leaky_relu1" + data_layout: "data_parallel" + parents: "d2fc1" + } + layer { + parents: "d2leaky_relu1" + name: "d2fc1_bn" + freeze: true + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d2fc2" + data_layout: "data_parallel" + #parents: "d2leaky_relu1" + parents: "d2fc1_bn" + } + layer { + leaky_relu { + } + name: "d2leaky_relu2" + data_layout: "data_parallel" + parents: "d2fc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d2fc3" + data_layout: "data_parallel" + parents: "d2leaky_relu2" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_adv1_bce" + data_layout: "data_parallel" + parents: "d2fc3 one" + } + layer { + name: "gsample_minus_y" + data_layout: "data_parallel" + parents: "gen1fc4 image_data_dummy" + weighted_sum { + scaling_factors: "1 -1" + } + } + layer { + name: "l_l2_y" + data_layout: "data_parallel" + l2_norm2 { + } + parents: "gsample_minus_y" + } + layer { + fully_connected { + #num_neurons: 64 + num_neurons: 16 + has_bias: true + } + name: "gen2fc1" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "image_data_dummy" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1" + data_layout: "data_parallel" + parents: "gen2fc1" + } + layer { + fully_connected { + #num_neurons: 512 + num_neurons: 128 + has_bias: true + } + name: "gen2fc2" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2" + data_layout: "data_parallel" + parents: "gen2fc2" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen2fc3" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3" + data_layout: "data_parallel" + parents: "gen2fc3" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3" + } + layer { + name: "concat_param_n_img" + data_layout: "data_parallel" + parents: "param_data_id image_data_dummy" + concatenation { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1_invfc1_real" + data_layout: "data_parallel" + weights: "d1_invfc1linearity d1_invfc1bias" + parents: "concat_param_n_img" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu1_real" + data_layout: "data_parallel" + parents: "d1_invfc1_real" + } + layer { + parents: "d1_invleaky_relu1_real" + name: "d1invfc1_real_bn" + weights: "d1invfc1_w0 d1invfc1_w1 d1invfc1_w2 d1invfc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1_invfc2_real" + data_layout: "data_parallel" + weights: "d1_invfc2linearity d1_invfc2bias" + #parents: "d1_invleaky_relu1_real" + parents: "d1invfc1_real_bn" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu2_real" + data_layout: "data_parallel" + parents: "d1_invfc2_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1_invfc3_real" + data_layout: "data_parallel" + weights: "d1_invfc3linearity d1_invfc3bias" + parents: "d1_invleaky_relu2_real" + } + layer { + name: "concat_gsample2_n_img" + data_layout: "data_parallel" + parents: "gen2fc4 image_data_dummy" + children: "d1_inv_stop_gradient d2_inv_dummy" + concatenation { + } + } + layer { + name: "d1_inv_stop_gradient" + data_layout: "data_parallel" + parents: "concat_gsample2_n_img" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d1_invfc1_fake" + data_layout: "data_parallel" + weights: "d1_invfc1linearity d1_invfc1bias" + parents: "d1_inv_stop_gradient" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu1_fake" + data_layout: "data_parallel" + parents: "d1_invfc1_fake" + } + layer { + parents: "d1_invleaky_relu1_fake" + name: "d1invfc1_fake_bn" + weights: "d1invfc1_w0 d1invfc1_w1 d1invfc1_w2 d1invfc1_w3" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d1_invfc2_fake" + data_layout: "data_parallel" + weights: "d1_invfc2linearity d1_invfc2bias" + #parents: "d1_invleaky_relu1_fake" + parents: "d1invfc1_fake_bn" + } + layer { + leaky_relu { + } + name: "d1_invleaky_relu2_fake" + data_layout: "data_parallel" + parents: "d1_invfc2_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d1_invfc3_fake" + data_layout: "data_parallel" + weights: "d1_invfc3linearity d1_invfc3bias" + parents: "d1_invleaky_relu2_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_inv_real_bce" + data_layout: "data_parallel" + parents: "d1_invfc3_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_inv_fake_bce" + data_layout: "data_parallel" + parents: "d1_invfc3_fake zero" + } + layer { + identity { + } + name: "d2_inv_dummy" + data_layout: "data_parallel" + parents: "concat_gsample2_n_img" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "d2_invfc1" + data_layout: "data_parallel" + parents: "d2_inv_dummy" + } + layer { + leaky_relu { + } + name: "d2_invleaky_relu1" + data_layout: "data_parallel" + parents: "d2_invfc1" + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "d2_invfc2" + data_layout: "data_parallel" + parents: "d2_invleaky_relu1" + } + layer { + leaky_relu { + } + name: "d2_invleaky_relu2" + data_layout: "data_parallel" + parents: "d2_invfc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "d2_invfc3" + data_layout: "data_parallel" + parents: "d2_invleaky_relu2" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_inv_adv1_bce" + data_layout: "data_parallel" + parents: "d2_invfc3 one" + } + #layer { + # name: "gsample2_minus_x" + # data_layout: "data_parallel" + # parents: "gen2fc4 param_data_id" + # weighted_sum { + # scaling_factors: "1 -1" + # } + #} + #@todo: replace with mean abs layer + layer { + name: "l_l2_x" + data_layout: "data_parallel" + #l2_norm2 { + #} + mean_absolute_error{ + } + #parents: "gsample2_minus_x" + parents: "gen2fc4 param_data_id" + } + weights { + name: "gen1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc1bias" + } + weights { + name: "gen1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc2bias" + } + weights { + name: "gen1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc3bias" + } + weights { + name: "gen1fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc4bias" + } + weights { + name: "d1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc1bias" + } + weights { + name: "d1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc2bias" + } + weights { + name: "d1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "d1fc3bias" + } + weights { + name: "gen2fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc1bias" + } + weights { + name: "gen2fc2bias" + } + weights { + name: "gen2fc3bias" + } + weights { + name: "gen2fc4bias" + } + weights { + name: "d1_invfc1linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc1bias" + } + weights { + name: "d1_invfc2linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc2bias" + } + weights { + name: "d1_invfc3linearity" + he_normal_initializer { + } + } + weights { + name: "d1_invfc3bias" + } + ###Weights for batch norm + weights { + name: "d1fc1_w0" + } + weights { + name: "d1fc1_w1" + } + weights { + name: "d1fc1_w2" + } + weights { + name: "d1fc1_w3" + } + weights { + name: "d1invfc1_w0" + } + weights { + name: "d1invfc1_w1" + } + weights { + name: "d1invfc1_w2" + } + weights { + name: "d1invfc1_w3" + } + mini_batch_size: 128 + callback { + print { + interval: 10 + } + } + callback { + timer { + } + } + callback { gpu_memory_usage {} } + #callback { debug {} } + #callback { + # summary { + # dir: "." + # mat_interval: 25 + # } + #} + callback { + replace_weights { + source_layers: "d1fc1_real d1fc2_real d1fc3_real d1_invfc1_real d1_invfc2_real d1_invfc3_real d1fc1_real_bn" + destination_layers: "d2fc1 d2fc2 d2fc3 d2_invfc1 d2_invfc2 d2_invfc3 d2fc1_bn" + batch_interval: 1 + } + } + #callback { + # ltfb { + # round_size: 100 + # eval_metrics: "l_l2_y_eval" + # increasing_metric_mode: false + # weights_tosend: "gen1fc1linearity gen1fc1bias gen1fc2linearity gen1fc2bias gen1fc3linearity gen1fc3bias gen1fc4linearity gen1fc4bias gen2fc1_linearity_weights gen2fc1_bias_weights gen2fc2_linearity_weights gen2fc2_bias_weights gen2fc3_linearity_weights gen2fc3_bias_weights gen2fc4_linearity_weights gen2fc4_bias_weights" + + # } + # } + block_size: 256 + ####For metric, loss per individual sample + layer { + name: "fw_latent_loss" + data_layout: "data_parallel" + parents: "param_data_id l_l2_x l_l2_y L_cyc_x" + concatenation { + } + } + + #callback { save_model { dir: "model" } } + ##########X cyclic loss, input to this path is Y_fake (gen1fc4) from fw model + #### Shares weight with path that takes real/encoder (latent) image + layer { + fully_connected { + #num_neurons: 64 + num_neurons: 16 + has_bias: true + } + name: "gen2fc1_cyclic" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "gen1fc4" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1_cyclic" + data_layout: "data_parallel" + parents: "gen2fc1_cyclic" + } + layer { + fully_connected { + #num_neurons: 512 + num_neurons: 128 + has_bias: true + } + name: "gen2fc2_cyclic" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2_cyclic" + data_layout: "data_parallel" + parents: "gen2fc2_cyclic" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen2fc3_cyclic" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2_cyclic" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3_cyclic" + data_layout: "data_parallel" + parents: "gen2fc3_cyclic" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4_cyclic" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3_cyclic" + } + layer { + name: "L_cyc_x" + data_layout: "data_parallel" + mean_absolute_error{ + } + parents: "gen2fc4_cyclic param_data_id" + } +} diff --git a/model_zoo/models/jag/wae_cycle_gan/wae.prototext b/model_zoo/models/jag/wae_cycle_gan/wae.prototext new file mode 100644 index 00000000000..e4c42f92551 --- /dev/null +++ b/model_zoo/models/jag/wae_cycle_gan/wae.prototext @@ -0,0 +1,734 @@ +model { + random_init_models_differently: true + serialize_background_io: true + name: "wae_model" + objective_function { + l2_weight_regularization { + scale_factor: 0.0001 + } + layer_term { + scale_factor: 1.0 + layer: "disc1_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_fake_bce" + } + layer_term { + #lam = 0.01 + scale_factor: 0.01 + layer: "g_adv1_bce" + } + layer_term { + scale_factor: 1.0 + layer: "img_loss" + } + layer_term { + scale_factor: 1.0 + layer: "rec_error" + } + } + metric { + layer_metric { + name: "wae_loss" + layer: "img_loss" + } + } + num_epochs: 40 + data_layout: "data_parallel" + layer { + input { + io_buffer: "partitioned" + data_set_per_model: true + target_mode: "N/A" + } + name: "data1" + data_layout: "data_parallel" + } + #z or sample_z + #@todo z = -1+2*np.random.rand(batch_size, zdim=20) + layer { + name: "sample_z" + data_layout: "data_parallel" + gaussian { + mean: 0.0 + stdev: 1.0 + neuron_dims: "20" + } + } + + layer { + name: "zero" + data_layout: "data_parallel" + constant { + value: 0.0 + num_neurons: "1" + } + } + layer { + name: "one" + data_layout: "data_parallel" + constant { + value: 1.0 + num_neurons: "1" + } + } + + layer { + name: "slice_data" + data_layout: "data_parallel" + parents: "data1" + children: "image_data_dummy param_data_id" + slice { + get_slice_points_from_reader: "independent" + } + } + layer { + identity { + } + name: "image_data_dummy" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + identity { + } + name: "param_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + + #concate image data with sample_z + layer { + name: "concat_y_n_samplez" + data_layout: "data_parallel" + parents: "image_data_dummy sample_z" + concatenation { + } + } + + ###generator == encoder + layer { + fully_connected { + #num_neurons: 32 + num_neurons: 1024 + has_bias: true + } + name: "encodefc1" + data_layout: "data_parallel" + #weights: "encodefc1linearity" + parents: "image_data_dummy" + } + layer { + elu { + } + name: "encodeleaky_relu1" + data_layout: "data_parallel" + parents: "encodefc1" + } + layer { + parents: "encodeleaky_relu1" + name: "encodefc1_bn" + data_layout: "data_parallel" + batch_normalization { + #decay: 0.99 + #scale_init: 1.0 + #bias_init: 0.0 + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "encodefc2" + data_layout: "data_parallel" + #weights: "encodefc2linearity" + parents: "encodefc1_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu2" + data_layout: "data_parallel" + parents: "encodefc2" + } + layer { + parents: "encodeleaky_relu2" + name: "encodefc2_bn" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 32 + has_bias: true + } + name: "encodefc3" + data_layout: "data_parallel" + #weights: "encodefc3linearity" + parents: "encodefc2_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu3" + data_layout: "data_parallel" + parents: "encodefc3" + } + layer { + parents: "encodeleaky_relu3" + name: "encodefc3_bn" + data_layout: "data_parallel" + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + #gen output is latent dim + num_neurons: 20 + has_bias: true + } + #z_sample + name: "encodefc4" + data_layout: "data_parallel" + #weights: "encodefc4linearity" + parents: "encodefc3_bn" + } + + ####Discriminator + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d1fc1_real" + data_layout: "data_parallel" + weights: "wae_d1fc1linearity wae_d1fc1bias" + parents: "concat_y_n_samplez" + } + layer { + leaky_relu { + } + #@todo: use "acts" for activation instead of actualy type + name: "wae_d1leaky_relu1_real" + data_layout: "data_parallel" + parents: "wae_d1fc1_real" + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d1fc2_real" + data_layout: "data_parallel" + weights: "wae_d1fc2linearity wae_d1fc2bias" + parents: "wae_d1leaky_relu1_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu2_real" + data_layout: "data_parallel" + parents: "wae_d1fc2_real" + } + layer { + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d1fc3_real" + data_layout: "data_parallel" + weights: "wae_d1fc3linearity wae_d1fc3bias" + parents: "wae_d1leaky_relu2_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu3_real" + data_layout: "data_parallel" + parents: "wae_d1fc3_real" + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d1fc4_real" + data_layout: "data_parallel" + weights: "wae_d1fc4linearity wae_d1fc4bias" + parents: "wae_d1leaky_relu3_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu4_real" + data_layout: "data_parallel" + parents: "wae_d1fc4_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + ## This is D_prior + name: "wae_d1fc5_real" + data_layout: "data_parallel" + weights: "wae_d1fc5linearity wae_d1fc5bias" + parents: "wae_d1leaky_relu4_real" + } + layer { + name: "concat_y_n_zsample" + data_layout: "data_parallel" + parents: "image_data_dummy encodefc4" + children: "wae_d1_stop_gradient wae_d2_dummy" + concatenation { + } + } + layer { + name: "wae_d1_stop_gradient" + data_layout: "data_parallel" + parents: "concat_y_n_zsample" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d1fc1_fake" + data_layout: "data_parallel" + weights: "wae_d1fc1linearity wae_d1fc1bias" + parents: "wae_d1_stop_gradient" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu1_fake" + data_layout: "data_parallel" + parents: "wae_d1fc1_fake" + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d1fc2_fake" + data_layout: "data_parallel" + weights: "wae_d1fc2linearity wae_d1fc2bias" + parents: "wae_d1leaky_relu1_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu2_fake" + data_layout: "data_parallel" + parents: "wae_d1fc2_fake" + } + layer { + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d1fc3_fake" + data_layout: "data_parallel" + weights: "wae_d1fc3linearity wae_d1fc3bias" + parents: "wae_d1leaky_relu2_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu3_fake" + data_layout: "data_parallel" + parents: "wae_d1fc3_fake" + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d1fc4_fake" + data_layout: "data_parallel" + weights: "wae_d1fc4linearity wae_d1fc4bias" + parents: "wae_d1leaky_relu3_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu4_fake" + data_layout: "data_parallel" + parents: "wae_d1fc4_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + #This is D_sample + name: "wae_d1fc5_fake" + data_layout: "data_parallel" + weights: "wae_d1fc5linearity wae_d1fc5bias" + parents: "wae_d1leaky_relu4_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_real_bce" + data_layout: "data_parallel" + parents: "wae_d1fc5_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_fake_bce" + data_layout: "data_parallel" + parents: "wae_d1fc5_fake zero" + } + layer { + identity { + } + name: "wae_d2_dummy" + data_layout: "data_parallel" + parents: "concat_y_n_zsample" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d2fc1" + data_layout: "data_parallel" + parents: "wae_d2_dummy" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu1" + data_layout: "data_parallel" + parents: "wae_d2fc1" + } + layer { + freeze: true + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d2fc2" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu1" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu2" + data_layout: "data_parallel" + parents: "wae_d2fc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d2fc3" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu2" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu3" + data_layout: "data_parallel" + parents: "wae_d2fc3" + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d2fc4" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu3" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu4" + data_layout: "data_parallel" + parents: "wae_d2fc4" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "wae_d2fc5" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu4" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_adv1_bce" + data_layout: "data_parallel" + parents: "wae_d2fc5 one" + } + layer { + name: "decode0_minus_y" + data_layout: "data_parallel" + parents: "decode0 image_data_dummy" + weighted_sum { + scaling_factors: "1 -1" + } + } + #L2loss + layer { + l2_norm2 { + } + name: "rec_error" + data_layout: "data_parallel" + parents: "decode0_minus_y" + } + + layer { + parents: "decode0 image_data_dummy" + name: "img_loss" + data_layout: "data_parallel" + mean_squared_error {} + } + + + ###################### + # Decoder + ###################### + + # decode3 + layer { + parents: "encodefc4" + name: "decode3" + weights: "decode3linearity decode3bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 32 + has_bias: true + } + } + layer { + parents: "decode3" + name: "decode3_tanh" + data_layout: "data_parallel" + elu {} + } + layer { + parents: "decode3_tanh" + name: "decode3_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode2 + layer { + parents: "decode3_dropout" + name: "decode2" + weights: "decode2linearity decode2bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 256 + has_bias: true + } + } + layer { + parents: "decode2" + name: "decode2_tanh" + data_layout: "data_parallel" + tanh {} + } + layer { + parents: "decode2_tanh" + name: "decode2_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode1 + layer { + parents: "decode2_dropout" + name: "decode1" + weights: "decode1linearity decode1bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 1024 + has_bias: true + } + } + layer { + parents: "decode1" + name: "decode1_elu" + data_layout: "data_parallel" + tanh { + } + } + layer { + parents: "decode1_elu" + name: "decode1_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode0 + layer { + parents: "decode1_dropout" + name: "decode0" + weights: "decode0linearity decode0bias" + data_layout: "data_parallel" + fully_connected { + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 1 ] + has_bias: true + } + } + + ###################### + ###@todo : delete not used, LTFB uses encodefc*linearity_weights instead + weights { + name: "encodefc1linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc2linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc3linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc4linearity" + he_normal_initializer { + } + } + + #Decoder weights here to be used in WAE+cyclic model + weights { + name: "decode0linearity" + he_normal_initializer { + } + } + weights { + name: "decode0bias" + } + weights { + name: "decode1linearity" + he_normal_initializer { + } + } + weights { + name: "decode1bias" + } + weights { + name: "decode2linearity" + he_normal_initializer { + } + } + weights { + name: "decode2bias" + } + weights { + name: "decode3linearity" + he_normal_initializer { + } + } + weights { + name: "decode3bias" + } + + + #Discriminator (shared) + weights { + name: "wae_d1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc1bias" + } + weights { + name: "wae_d1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc2bias" + } + weights { + name: "wae_d1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc3bias" + } + weights { + name: "wae_d1fc4linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc4bias" + } + weights { + name: "wae_d1fc5linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc5bias" + } + mini_batch_size: 128 + callback { + print { + interval: 1 + } + } + callback { + timer { + } + } + callback { + replace_weights { + source_layers: "wae_d1fc1_real wae_d1fc2_real wae_d1fc3_real wae_d1fc4_real wae_d1fc5_real" + destination_layers: "wae_d2fc1 wae_d2fc2 wae_d2fc3 wae_d2fc4 wae_d2fc5" + batch_interval: 2 + } + } + #callback { + # ltfb { + # batch_interval: 100 + # low_score_wins: true + # metric: "l_l2_y_eval" + # weights: "encodefc1_linearity_weights encodefc1_bias_weights encodefc2_linearity_weights encodefc2_bias_weights encodefc3_linearity_weights encodefc3_bias_weights encodefc4_linearity_weights encodefc4_bias_weights" + # } + + # } + + callback { save_model { dir: "model" } } + block_size: 256 + procs_per_trainer:0 +} diff --git a/model_zoo/models/jag/wae_cycle_gan/wae_fw_inv.prototext b/model_zoo/models/jag/wae_cycle_gan/wae_fw_inv.prototext new file mode 100644 index 00000000000..7b4a7869cda --- /dev/null +++ b/model_zoo/models/jag/wae_cycle_gan/wae_fw_inv.prototext @@ -0,0 +1,923 @@ +#Augumented version of ae_cyc.prototext so we can we ae_loss, fw_latent_loss and fw_out_loss all in the same file instead of 3 files, a request from MLSI ML team. This augmentation involves replicating blocks for fw_model from cycle gan and encode from autoencoder. +model { + name: "wae_fw_inv_model" + shareable_training_data_reader:false + serialize_background_io: true + data_layout: "data_parallel" + mini_batch_size: 256 + block_size: 4096 + num_epochs: 1 + num_parallel_readers: 0 + procs_per_trainer: 0 + + ################################################### + # Objective function + ################################################### + + objective_function { + layer_term { layer: "mean_squared_error" } + #layer_term { layer: "kl_divergence" } + l2_weight_regularization { + scale_factor: 1e-4 + } + } + + ################################################### + # Metrics + ################################################### + + metric { + layer_metric { + name: "wae_loss" + #layer: "mean_squared_error" + layer: "ae_loss" + } + } + metric { + layer_metric { + name: "fw_latent_loss" + layer: "fw_latent_loss" + } + } + metric { + layer_metric { + name: "fw_out_loss" + layer: "fw_out_loss" + } + } + metric { + layer_metric { + name: "inv_loss" + layer: "inv_loss" + } + } + metric { + layer_metric { + name: "L_cyc_x_loss" + layer: "L_cyc_x" + } + } + ################################################### + # Callbacks + ################################################### + callback { + print { + interval: 1 + } + } + callback { timer {} } + + ################################################### + # start of layers + ################################################### + + ###################### + # Data + ###################### + #Layer from cycle GAN + layer { + input { + io_buffer: "partitioned" + target_mode: "N/A" + } + name: "data" + data_layout: "data_parallel" + parents: " " + } + layer { + name: "slice_data" + data_layout: "data_parallel" + parents: "data" + children: "image_data_id param_data_id" + slice { + #slice_points: "0 16384 16389" + get_slice_points_from_reader: "independent" + } + } + layer { + identity { + } + name: "image_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + identity { + } + name: "param_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + fully_connected { + num_neurons: 64 + #num_neurons: 256 + has_bias: true + } + name: "gen1fc1" + data_layout: "data_parallel" + weights: "gen1fc1linearity gen1fc1bias" + parents: "param_data_id" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu1_1" + data_layout: "data_parallel" + parents: "gen1fc1" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen1fc2" + data_layout: "data_parallel" + weights: "gen1fc2linearity gen1fc2bias" + parents: "gen1leaky_relu1_1" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu2_1" + data_layout: "data_parallel" + parents: "gen1fc2" + } + layer { + dropout { + keep_prob: 0.8 + } + name: "gen1dropout1_1" + data_layout: "data_parallel" + parents: "gen1leaky_relu2_1" + } + layer { + fully_connected { + #num_neurons: 8192 + num_neurons: 2048 + has_bias: true + } + name: "gen1fc3" + data_layout: "data_parallel" + weights: "gen1fc3linearity gen1fc3bias" + parents: "gen1dropout1_1" + } + layer { + leaky_relu { + } + name: "gen1leaky_relu3_1" + data_layout: "data_parallel" + parents: "gen1fc3" + } + layer { + fully_connected { + #num_neurons: 16384 + #latent_dim + num_neurons: 20 + has_bias: true + } + name: "gen1fc4" + data_layout: "data_parallel" + weights: "gen1fc4linearity gen1fc4bias" + parents: "gen1leaky_relu3_1" + } + + weights { + name: "gen1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc1bias" + } + weights { + name: "gen1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc2bias" + } + weights { + name: "gen1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc3bias" + } + weights { + name: "gen1fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen1fc4bias" + } + + ###Encoder from WAE + ######################### + layer { + fully_connected { + #num_neurons: 32 + num_neurons: 1024 + has_bias: true + } + name: "encodefc1" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc1linearity" + parents: "image_data_id" + } + layer { + elu { + } + name: "encodeleaky_relu1" + data_layout: "data_parallel" + parents: "encodefc1" + } + layer { + parents: "encodeleaky_relu1" + name: "encodefc1_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "encodefc2" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc2linearity" + parents: "encodefc1_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu2" + data_layout: "data_parallel" + parents: "encodefc2" + } + layer { + parents: "encodeleaky_relu2" + name: "encodefc2_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + num_neurons: 32 + has_bias: true + } + name: "encodefc3" + data_layout: "data_parallel" + freeze: true + #weights: "encodefc3linearity" + parents: "encodefc2_bn" + } + layer { + tanh { + } + name: "encodeleaky_relu3" + data_layout: "data_parallel" + parents: "encodefc3" + } + layer { + parents: "encodeleaky_relu3" + name: "encodefc3_bn" + data_layout: "data_parallel" + freeze: true + batch_normalization { + epsilon: 1e-3 + } + } + layer { + fully_connected { + #gen output is latent dim + num_neurons: 20 + has_bias: true + } + #z_sample + name: "encodefc4" + data_layout: "data_parallel" + #weights: "encodefc4linearity" + freeze: true + parents: "encodefc3_bn" + } + ##################### + + layer { + parents: "encodefc4" + #name: "sample" + ###This is actually sample in latent space, call image_data_dummy for legacy + name: "image_data_dummy" + data_layout: "data_parallel" + identity {} + } + ####output of encoder goes to decoder and cycGAN duplicates + ###################### + # Decoder for foward output loss + + # decode3 + layer { + parents: "gen1fc4" + name: "decode3" + data_layout: "data_parallel" + weights: "decode3linearity decode3bias" + fully_connected { + num_neurons: 32 + has_bias: true + } + } + layer { + parents: "decode3" + name: "decode3_tanh" + data_layout: "data_parallel" + elu {} + } + layer { + parents: "decode3_tanh" + name: "decode3_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode2 + layer { + parents: "decode3_dropout" + name: "decode2" + data_layout: "data_parallel" + weights: "decode2linearity decode2bias" + fully_connected { + num_neurons: 256 + has_bias: true + } + } + layer { + parents: "decode2" + name: "decode2_tanh" + data_layout: "data_parallel" + tanh {} + } + layer { + parents: "decode2_tanh" + name: "decode2_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode1 + layer { + parents: "decode2_dropout" + name: "decode1" + data_layout: "data_parallel" + weights: "decode1linearity decode1bias" + fully_connected { + num_neurons: 1024 + has_bias: true + } + } + layer { + parents: "decode1" + name: "decode1_elu" + data_layout: "data_parallel" + tanh { + } + } + layer { + parents: "decode1_elu" + name: "decode1_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode0 + layer { + parents: "decode1_dropout" + name: "decode0" + data_layout: "data_parallel" + weights: "decode0linearity decode0bias" + fully_connected { + #num_neurons: 16384 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 1 ] + has_bias: true + } + } + + ###################### + #Need this? + #layer { + # parents: "decode0" + # name: "sigmoid" + # data_layout: "data_parallel" + # sigmoid {} + #} + + ###################### + # Reconstruction + ###################### + + layer { + #parents: "sigmoid" + parents: "decode0" + name: "reconstruction" + data_layout: "data_parallel" + split {} + } + layer { + parents: "reconstruction image_data_id" + #name: "binary_cross_entropy" + name: "mean_squared_error" + data_layout: "data_parallel" + #binary_cross_entropy {} + mean_squared_error {} + } + layer { + parents: "reconstruction image_data_id" + name: "fw_out_loss" + data_layout: "data_parallel" + mean_squared_error {} + } + + ####Decoder weights + weights { + name: "decode0linearity" + he_normal_initializer { + } + } + weights { + name: "decode0bias" + } + + weights { + name: "decode1linearity" + he_normal_initializer { + } + } + weights { + name: "decode1bias" + } + weights { + name: "decode2linearity" + he_normal_initializer { + } + } + weights { + name: "decode2bias" + } + weights { + name: "decode3linearity" + he_normal_initializer { + } + } + weights { + name: "decode3bias" + } + +#Decoder duplicated for ae_loss + # decode3 + layer { + parents: "image_data_dummy" + name: "ae_decode3" + data_layout: "data_parallel" + weights: "decode3linearity decode3bias" + fully_connected { + num_neurons: 32 + has_bias: true + } + } + layer { + parents: "ae_decode3" + name: "ae_decode3_tanh" + data_layout: "data_parallel" + elu {} + } + layer { + parents: "ae_decode3_tanh" + name: "ae_decode3_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode2 + layer { + parents: "ae_decode3_dropout" + name: "ae_decode2" + data_layout: "data_parallel" + weights: "decode2linearity decode2bias" + fully_connected { + num_neurons: 256 + has_bias: true + } + } + layer { + parents: "ae_decode2" + name: "ae_decode2_tanh" + data_layout: "data_parallel" + tanh {} + } + layer { + parents: "ae_decode2_tanh" + name: "ae_decode2_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode1 + layer { + parents: "ae_decode2_dropout" + name: "ae_decode1" + data_layout: "data_parallel" + weights: "decode1linearity decode1bias" + fully_connected { + num_neurons: 1024 + has_bias: true + } + } + layer { + parents: "ae_decode1" + name: "ae_decode1_elu" + data_layout: "data_parallel" + tanh { + } + } + layer { + parents: "ae_decode1_elu" + name: "ae_decode1_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode0 + layer { + parents: "ae_decode1_dropout" + name: "ae_decode0" + data_layout: "data_parallel" + weights: "decode0linearity decode0bias" + fully_connected { + #num_neurons: 16384 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 1 ] + has_bias: true + } + } + + #layer { + # parents: "ae_decode0" + # name: "ae_sigmoid" + # data_layout: "data_parallel" + # sigmoid {} + #} + + ###################### + # Reconstruction + ###################### + + layer { + parents: "ae_decode0" + #parents: "ae_sigmoid" + name: "ae_reconstruction" + data_layout: "data_parallel" + split {} + } + layer { + parents: "ae_reconstruction image_data_id" + name: "ae_loss" + data_layout: "data_parallel" + mean_squared_error {} + } + + ###Cycle GAN duplicated for latent loss dump + #Takes output of encoder as input + layer { + fully_connected { + num_neurons: 64 + #num_neurons: 256 + has_bias: true + } + name: "latent_gen1fc1" + data_layout: "data_parallel" + weights: "gen1fc1linearity gen1fc1bias" + parents: "param_data_id" + } + layer { + leaky_relu { + } + name: "latent_gen1leaky_relu1_1" + data_layout: "data_parallel" + parents: "latent_gen1fc1" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "latent_gen1fc2" + data_layout: "data_parallel" + weights: "gen1fc2linearity gen1fc2bias" + parents: "latent_gen1leaky_relu1_1" + } + layer { + leaky_relu { + } + name: "latent_gen1leaky_relu2_1" + data_layout: "data_parallel" + parents: "latent_gen1fc2" + } + layer { + dropout { + keep_prob: 0.8 + } + name: "latent_gen1dropout1_1" + data_layout: "data_parallel" + parents: "latent_gen1leaky_relu2_1" + } + layer { + fully_connected { + #num_neurons: 8192 + num_neurons: 2048 + has_bias: true + } + name: "latent_gen1fc3" + data_layout: "data_parallel" + weights: "gen1fc3linearity gen1fc3bias" + parents: "latent_gen1dropout1_1" + } + layer { + leaky_relu { + } + name: "latent_gen1leaky_relu3_1" + data_layout: "data_parallel" + parents: "latent_gen1fc3" + } + layer { + fully_connected { + #num_neurons: 16384 + #latent_dim + num_neurons: 20 + has_bias: true + } + name: "latent_gen1fc4" + data_layout: "data_parallel" + weights: "gen1fc4linearity gen1fc4bias" + parents: "latent_gen1leaky_relu3_1" + } + + layer { + name: "gsample_minus_latentsample" + data_layout: "data_parallel" + parents: "latent_gen1fc4 image_data_dummy" + weighted_sum { + scaling_factors: "1 -1" + } + } + layer { + name: "fw_latent_loss" + data_layout: "data_parallel" + l2_norm2 { + } + parents: "gsample_minus_latentsample" + } + + #####Inverse loss from cycle GAN + #### latent space (image_data_dummy) -> pred X'(gen2fc4) + layer { + fully_connected { + num_neurons: 16 + has_bias: true + } + name: "gen2fc1" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "image_data_dummy" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1" + data_layout: "data_parallel" + parents: "gen2fc1" + } + layer { + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "gen2fc2" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2" + data_layout: "data_parallel" + parents: "gen2fc2" + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "gen2fc3" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3" + data_layout: "data_parallel" + parents: "gen2fc3" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3" + } + + + #layer { + # name: "gsample2_minus_x" + # data_layout: "data_parallel" + # parents: "gen2fc4 param_data_id" + # weighted_sum { + # scaling_factors: "1 -1" + # } + #} + ### ||X-X'|| + layer { + #name: "l_l2_x" + name: "inv_loss" + data_layout: "data_parallel" + #l2_norm2 { + #} + mean_absolute_error{ } + parents: "gen2fc4 param_data_id" + #parents: "gsample2_minus_x" + } + #layer { + # name: "abs_inv_loss" + # data_layout: "data_parallel" + # abs { + # } + # parents: "gsample2_minus_x" + #} + ##########X cyclic loss, input to this path is Y_fake (gen1fc4) from fw model + #### Shares weight with path that takes real/encoder (latent) image + layer { + fully_connected { + #num_neurons: 64 + num_neurons: 16 + has_bias: true + } + name: "gen2fc1_cyclic" + data_layout: "data_parallel" + weights: "gen2fc1linearity gen2fc1bias" + parents: "gen1fc4" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu1_cyclic" + data_layout: "data_parallel" + parents: "gen2fc1_cyclic" + } + layer { + fully_connected { + #num_neurons: 512 + num_neurons: 128 + has_bias: true + } + name: "gen2fc2_cyclic" + data_layout: "data_parallel" + weights: "gen2fc2linearity gen2fc2bias" + parents: "gen2leaky_relu1" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu2_cyclic" + data_layout: "data_parallel" + parents: "gen2fc2_cyclic" + } + layer { + fully_connected { + #num_neurons: 2048 + num_neurons: 512 + has_bias: true + } + name: "gen2fc3_cyclic" + data_layout: "data_parallel" + weights: "gen2fc3linearity gen2fc3bias" + parents: "gen2leaky_relu2_cyclic" + } + layer { + leaky_relu { + } + name: "gen2leaky_relu3_cyclic" + data_layout: "data_parallel" + parents: "gen2fc3_cyclic" + } + layer { + fully_connected { + #num_neurons: 11 + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 2 ] + has_bias: true + } + name: "gen2fc4_cyclic" + data_layout: "data_parallel" + weights: "gen2fc4linearity gen2fc4bias" + parents: "gen2leaky_relu3_cyclic" + } + layer { + name: "L_cyc_x" + data_layout: "data_parallel" + mean_absolute_error{ + } + parents: "gen2fc4_cyclic param_data_id" + } + + ####For metric, loss per individual sample + layer { + name: "ae_latent_out_losses" + data_layout: "data_parallel" + parents: "param_data_id encodefc4 ae_loss fw_latent_loss fw_out_loss inv_loss L_cyc_x" + concatenation { + } + } + callback { + save_model { + dir: "model1" + disable_save_after_training: true + } + } + weights { + name: "gen2fc1linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc2linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc3linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc4linearity" + he_normal_initializer { + } + } + weights { + name: "gen2fc1bias" + } + weights { + name: "gen2fc2bias" + } + weights { + name: "gen2fc3bias" + } + weights { + name: "gen2fc4bias" + } + ################################################### + # end of layers + ################################################### +} diff --git a/model_zoo/models/jag/wae_cycle_gan/wae_nobn.prototext b/model_zoo/models/jag/wae_cycle_gan/wae_nobn.prototext new file mode 100644 index 00000000000..6ad053e25a5 --- /dev/null +++ b/model_zoo/models/jag/wae_cycle_gan/wae_nobn.prototext @@ -0,0 +1,734 @@ +model { + random_init_models_differently: true + name: "wae_model" + serialize_background_io: true + objective_function { + l2_weight_regularization { + scale_factor: 0.0001 + } + layer_term { + scale_factor: 1.0 + layer: "disc1_real_bce" + } + layer_term { + scale_factor: 1.0 + layer: "disc1_fake_bce" + } + layer_term { + #lam = 0.01 + scale_factor: 0.01 + layer: "g_adv1_bce" + } + layer_term { + scale_factor: 1.0 + layer: "img_loss" + } + layer_term { + scale_factor: 1.0 + layer: "rec_error" + } + } + metric { + layer_metric { + layer: "img_loss" + } + } + num_epochs: 6 + data_layout: "data_parallel" + layer { + input { + io_buffer: "partitioned" + data_set_per_model: true + target_mode: "N/A" + } + name: "data1" + data_layout: "data_parallel" + } + #z or sample_z + #@todo z = -1+2*np.random.rand(batch_size, zdim=20) + layer { + name: "sample_z" + data_layout: "data_parallel" + gaussian { + mean: 0.0 + stdev: 1.0 + neuron_dims: "20" + } + } + + layer { + name: "zero" + data_layout: "data_parallel" + constant { + value: 0.0 + num_neurons: "1" + } + } + layer { + name: "one" + data_layout: "data_parallel" + constant { + value: 1.0 + num_neurons: "1" + } + } + + layer { + name: "slice_data" + data_layout: "data_parallel" + parents: "data1" + children: "image_data_dummy param_data_id" + slice { + get_slice_points_from_reader: "independent" + } + } + layer { + identity { + } + name: "image_data_dummy" + data_layout: "data_parallel" + parents: "slice_data" + } + layer { + identity { + } + name: "param_data_id" + data_layout: "data_parallel" + parents: "slice_data" + } + + #concate image data with sample_z + layer { + name: "concat_y_n_samplez" + data_layout: "data_parallel" + parents: "image_data_dummy sample_z" + concatenation { + } + } + + ###generator == encoder + layer { + fully_connected { + #num_neurons: 32 + num_neurons: 1024 + has_bias: true + } + name: "encodefc1" + data_layout: "data_parallel" + #weights: "encodefc1linearity" + parents: "image_data_dummy" + } + layer { + elu { + } + name: "encodeleaky_relu1" + data_layout: "data_parallel" + parents: "encodefc1" + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "encodefc2" + data_layout: "data_parallel" + #weights: "encodefc2linearity" + parents: "encodeleaky_relu1" + } + layer { + tanh { + } + name: "encodeleaky_relu2" + data_layout: "data_parallel" + parents: "encodefc2" + } + layer { + dropout { + keep_prob: 1.0 + } + name: "encodedropout1" + data_layout: "data_parallel" + parents: "encodeleaky_relu2" + } + layer { + fully_connected { + num_neurons: 32 + has_bias: true + } + name: "encodefc3" + data_layout: "data_parallel" + #weights: "encodefc3linearity" + parents: "encodedropout1" + } + layer { + tanh { + } + name: "encodeleaky_relu3" + data_layout: "data_parallel" + parents: "encodefc3" + } + layer { + fully_connected { + #gen output is latent dim + num_neurons: 20 + has_bias: true + } + #z_sample + name: "encodefc4" + data_layout: "data_parallel" + #weights: "encodefc4linearity" + parents: "encodeleaky_relu3" + } + + ####Discriminator + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d1fc1_real" + data_layout: "data_parallel" + weights: "wae_d1fc1linearity wae_d1fc1bias" + parents: "concat_y_n_samplez" + } + layer { + leaky_relu { + } + #@todo: use "acts" for activation instead of actualy type + name: "wae_d1leaky_relu1_real" + data_layout: "data_parallel" + parents: "wae_d1fc1_real" + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d1fc2_real" + data_layout: "data_parallel" + weights: "wae_d1fc2linearity wae_d1fc2bias" + parents: "wae_d1leaky_relu1_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu2_real" + data_layout: "data_parallel" + parents: "wae_d1fc2_real" + } + layer { + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d1fc3_real" + data_layout: "data_parallel" + weights: "wae_d1fc3linearity wae_d1fc3bias" + parents: "wae_d1leaky_relu2_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu3_real" + data_layout: "data_parallel" + parents: "wae_d1fc3_real" + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d1fc4_real" + data_layout: "data_parallel" + weights: "wae_d1fc4linearity wae_d1fc4bias" + parents: "wae_d1leaky_relu3_real" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu4_real" + data_layout: "data_parallel" + parents: "wae_d1fc4_real" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + ## This is D_prior + name: "wae_d1fc5_real" + data_layout: "data_parallel" + weights: "wae_d1fc5linearity wae_d1fc5bias" + parents: "wae_d1leaky_relu4_real" + } + layer { + name: "concat_y_n_zsample" + data_layout: "data_parallel" + parents: "image_data_dummy encodefc4" + children: "wae_d1_stop_gradient wae_d2_dummy" + concatenation { + } + } + layer { + name: "wae_d1_stop_gradient" + data_layout: "data_parallel" + parents: "concat_y_n_zsample" + stop_gradient { + } + } + layer { + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d1fc1_fake" + data_layout: "data_parallel" + weights: "wae_d1fc1linearity wae_d1fc1bias" + parents: "wae_d1_stop_gradient" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu1_fake" + data_layout: "data_parallel" + parents: "wae_d1fc1_fake" + } + layer { + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d1fc2_fake" + data_layout: "data_parallel" + weights: "wae_d1fc2linearity wae_d1fc2bias" + parents: "wae_d1leaky_relu1_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu2_fake" + data_layout: "data_parallel" + parents: "wae_d1fc2_fake" + } + layer { + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d1fc3_fake" + data_layout: "data_parallel" + weights: "wae_d1fc3linearity wae_d1fc3bias" + parents: "wae_d1leaky_relu2_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu3_fake" + data_layout: "data_parallel" + parents: "wae_d1fc3_fake" + } + layer { + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d1fc4_fake" + data_layout: "data_parallel" + weights: "wae_d1fc4linearity wae_d1fc4bias" + parents: "wae_d1leaky_relu3_fake" + } + layer { + leaky_relu { + } + name: "wae_d1leaky_relu4_fake" + data_layout: "data_parallel" + parents: "wae_d1fc4_fake" + } + layer { + fully_connected { + num_neurons: 1 + has_bias: true + } + #This is D_sample + name: "wae_d1fc5_fake" + data_layout: "data_parallel" + weights: "wae_d1fc5linearity wae_d1fc5bias" + parents: "wae_d1leaky_relu4_fake" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_real_bce" + data_layout: "data_parallel" + parents: "wae_d1fc5_real one" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "disc1_fake_bce" + data_layout: "data_parallel" + parents: "wae_d1fc5_fake zero" + } + layer { + identity { + } + name: "wae_d2_dummy" + data_layout: "data_parallel" + parents: "concat_y_n_zsample" + } + layer { + freeze: true + fully_connected { + num_neurons: 512 + has_bias: true + } + name: "wae_d2fc1" + data_layout: "data_parallel" + parents: "wae_d2_dummy" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu1" + data_layout: "data_parallel" + parents: "wae_d2fc1" + } + layer { + freeze: true + fully_connected { + num_neurons: 256 + has_bias: true + } + name: "wae_d2fc2" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu1" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu2" + data_layout: "data_parallel" + parents: "wae_d2fc2" + } + layer { + freeze: true + fully_connected { + num_neurons: 128 + has_bias: true + } + name: "wae_d2fc3" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu2" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu3" + data_layout: "data_parallel" + parents: "wae_d2fc3" + } + layer { + freeze: true + fully_connected { + num_neurons: 64 + has_bias: true + } + name: "wae_d2fc4" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu3" + } + layer { + leaky_relu { + } + name: "wae_d2leaky_relu4" + data_layout: "data_parallel" + parents: "wae_d2fc4" + } + layer { + freeze: true + fully_connected { + num_neurons: 1 + has_bias: true + } + name: "wae_d2fc5" + data_layout: "data_parallel" + parents: "wae_d2leaky_relu4" + } + layer { + sigmoid_binary_cross_entropy { + } + name: "g_adv1_bce" + data_layout: "data_parallel" + parents: "wae_d2fc5 one" + } + layer { + name: "decode0_minus_y" + data_layout: "data_parallel" + parents: "decode0 image_data_dummy" + weighted_sum { + scaling_factors: "1 -1" + } + } + #L2loss + layer { + l2_norm2 { + } + name: "rec_error" + data_layout: "data_parallel" + parents: "decode0_minus_y" + } + + layer { + parents: "decode0 image_data_dummy" + name: "img_loss" + data_layout: "data_parallel" + mean_squared_error {} + } + + + ###################### + # Decoder + ###################### + + # decode3 + layer { + parents: "encodefc4" + name: "decode3" + weights: "decode3linearity decode3bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 32 + has_bias: true + } + } + layer { + parents: "decode3" + name: "decode3_tanh" + data_layout: "data_parallel" + elu {} + } + layer { + parents: "decode3_tanh" + name: "decode3_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode2 + layer { + parents: "decode3_dropout" + name: "decode2" + weights: "decode2linearity decode2bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 256 + has_bias: true + } + } + layer { + parents: "decode2" + name: "decode2_tanh" + data_layout: "data_parallel" + tanh {} + } + layer { + parents: "decode2_tanh" + name: "decode2_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode1 + layer { + parents: "decode2_dropout" + name: "decode1" + weights: "decode1linearity decode1bias" + data_layout: "data_parallel" + fully_connected { + num_neurons: 1024 + has_bias: true + } + } + layer { + parents: "decode1" + name: "decode1_elu" + data_layout: "data_parallel" + tanh { + } + } + layer { + parents: "decode1_elu" + name: "decode1_dropout" + data_layout: "data_parallel" + dropout { + keep_prob: 1.0 + } + } + + # decode0 + layer { + parents: "decode1_dropout" + name: "decode0" + weights: "decode0linearity decode0bias" + data_layout: "data_parallel" + fully_connected { + get_slice_points_from_reader: "independent" + get_num_neurons_of_slice_from_reader: [ 1 ] + has_bias: true + } + } + + ###################### + ###@todo : delete not used, LTFB uses encodefc*linearity_weights instead + weights { + name: "encodefc1linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc2linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc3linearity" + he_normal_initializer { + } + } + weights { + name: "encodefc4linearity" + he_normal_initializer { + } + } + + #Decoder weights here to be used in WAE+cyclic model + weights { + name: "decode0linearity" + he_normal_initializer { + } + } + weights { + name: "decode0bias" + } + weights { + name: "decode1linearity" + he_normal_initializer { + } + } + weights { + name: "decode1bias" + } + weights { + name: "decode2linearity" + he_normal_initializer { + } + } + weights { + name: "decode2bias" + } + weights { + name: "decode3linearity" + he_normal_initializer { + } + } + weights { + name: "decode3bias" + } + + + #Discriminator (shared) + weights { + name: "wae_d1fc1linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc1bias" + } + weights { + name: "wae_d1fc2linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc2bias" + } + weights { + name: "wae_d1fc3linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc3bias" + } + weights { + name: "wae_d1fc4linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc4bias" + } + weights { + name: "wae_d1fc5linearity" + he_normal_initializer { + } + } + weights { + name: "wae_d1fc5bias" + } + mini_batch_size: 128 + callback { + print { + interval: 1 + } + } + callback { + timer { + } + } + callback { + replace_weights { + source_layers: "wae_d1fc1_real wae_d1fc2_real wae_d1fc3_real wae_d1fc4_real wae_d1fc5_real" + destination_layers: "wae_d2fc1 wae_d2fc2 wae_d2fc3 wae_d2fc4 wae_d2fc5" + batch_interval: 2 + } + } + callback { + dump_outputs { + prefix: "newwae_100K/" + #prefix: "/dir/to/save/imgs" + #prefix:"/p/lustre2/jacobs32/EuroViz/ae_loss/" + # prefix:"/p/lustre2/brainusr/EuroViz19/1Mtrain/1Minf/wae_loss/" + # batch_interval: 1000 + layers: "decode0 image_data_dummy param_data_id encodefc4 img_loss" + execution_modes: "test" + format: "npy" + } + } + #callback { + # ltfb { + # batch_interval: 100 + # low_score_wins: true + # metric: "l_l2_y_eval" + # weights: "encodefc1_linearity_weights encodefc1_bias_weights encodefc2_linearity_weights encodefc2_bias_weights encodefc3_linearity_weights encodefc3_bias_weights encodefc4_linearity_weights encodefc4_bias_weights" + # } + + # } + # callback { + # dump_mb_indices { + # basename:"/p/lustre2/jacobs32/EuroViz19/losses/Feb221Mindices/wae/" + # interval: 1 + # } + # } + + + + callback { save_model { dir: "model" } } + block_size: 256 + procs_per_model:0 +} From 4ecad00c7ec8bd4ce58107e6c613baba398136d1 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Thu, 7 Mar 2019 18:32:27 -0800 Subject: [PATCH 161/443] Saved AE+cycleGAN model for inference --- model_zoo/lbann_aecycgan.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/model_zoo/lbann_aecycgan.cpp b/model_zoo/lbann_aecycgan.cpp index a9f035ac9cb..c25a9fb7bfc 100644 --- a/model_zoo/lbann_aecycgan.cpp +++ b/model_zoo/lbann_aecycgan.cpp @@ -92,7 +92,9 @@ int main(int argc, char *argv[]) { //Evaluate on pretrained autoencoder if(master) std::cout << " Copy trained weights from cycle GAN" << std::endl; model_3->copy_trained_weights_from(model2_weights); - if(master) std::cout << " Evaluate pretrained autoencoder" << std::endl; + if(master) std::cout << " Save AE + cycleGAN" << std::endl; + model_3->save_model(); + if(master) std::cout << " Evaluate cycleGAN model on pretrained autoencoder" << std::endl; model_3->evaluate(execution_mode::testing); } catch (std::exception& e) { From 920d573bd425bff8d6e45ec0bd4a0b3de31ef8c5 Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Fri, 8 Mar 2019 10:28:33 -0800 Subject: [PATCH 162/443] add print global stat only flag to print callback --- include/lbann/callbacks/callback_print.hpp | 4 +++- src/callbacks/callback_print.cpp | 28 ++++++++++++---------- src/proto/factories/callback_factory.cpp | 5 ++-- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/include/lbann/callbacks/callback_print.hpp b/include/lbann/callbacks/callback_print.hpp index 427a52a960e..4ca9f8cd5b4 100644 --- a/include/lbann/callbacks/callback_print.hpp +++ b/include/lbann/callbacks/callback_print.hpp @@ -39,7 +39,8 @@ namespace lbann { */ class lbann_callback_print : public lbann_callback { public: - lbann_callback_print(int batch_interval = 1) : lbann_callback(batch_interval) {} + lbann_callback_print(int batch_interval = 1, bool print_global_stat_only=false) : + lbann_callback(batch_interval), m_print_global_stat_only(print_global_stat_only) {} lbann_callback_print(const lbann_callback_print&) = default; lbann_callback_print& operator=(const lbann_callback_print&) = default; lbann_callback_print* copy() const override { return new lbann_callback_print(*this); } @@ -53,6 +54,7 @@ class lbann_callback_print : public lbann_callback { private: /** Print objective function and metrics to standard output. */ void report_results(model *m); + bool m_print_global_stat_only; }; diff --git a/src/callbacks/callback_print.cpp b/src/callbacks/callback_print.cpp index 7c39d0f3b29..fcbec37b2c9 100644 --- a/src/callbacks/callback_print.cpp +++ b/src/callbacks/callback_print.cpp @@ -161,10 +161,12 @@ void lbann_callback_print::report_results(model *m) { std::vector num_samples_list(comm->get_num_trainers()); comm->intertrainer_gather(obj_fn, obj_fn_list); comm->intertrainer_gather(obj_fn_samples, num_samples_list); - for (int i = 0; i < num_trainers; ++i) { - std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " - << "objective function : " << obj_fn_list[i] - << std::endl; + if(!m_print_global_stat_only) { + for (int i = 0; i < num_trainers; ++i) { + std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " + << "objective function : " << obj_fn_list[i] + << std::endl; + } } if (num_trainers > 1) { const EvalType avg_obj_fn = (std::inner_product(num_samples_list.begin(), @@ -192,14 +194,16 @@ void lbann_callback_print::report_results(model *m) { std::vector num_samples_list(comm->get_num_trainers()); comm->intertrainer_gather(score, score_list); comm->intertrainer_gather(score_samples, num_samples_list); - for (int i = 0; i < num_trainers; ++i) { - std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " - << met->name() << " : " - << score_list[i] << met->get_unit() - << std::endl; + if(!m_print_global_stat_only) { + for (int i = 0; i < num_trainers; ++i) { + std::cout << m->get_name() << " (instance " << i << ") " << mode_string << " " + << met->name() << " : " + << score_list[i] << met->get_unit() + << std::endl; + } } if (num_trainers > 1) { - const EvalType min_score = *std::min_element(begin(score_list), end(score_list)); + const EvalType min_score = *std::min_element(score_list.begin(), score_list.end()); const EvalType avg_score = (std::inner_product(num_samples_list.begin(), num_samples_list.end(), score_list.begin(), @@ -207,12 +211,12 @@ void lbann_callback_print::report_results(model *m) { / std::accumulate(num_samples_list.begin(), num_samples_list.end(), 0)); - const EvalType max_score = *std::max_element(begin(score_list), end(score_list)); + const EvalType max_score = *std::max_element(score_list.begin(), score_list.end()); std::cout << m->get_name() << " (global min) " << mode_string << " " << met->name() << " : " << min_score << met->get_unit() << std::endl; - std::cout << m->get_name() << " (global mean) " << mode_string << " " + std::cout << m->get_name() << " (global average) " << mode_string << " " << met->name() << " : " << avg_score << met->get_unit() << std::endl; diff --git a/src/proto/factories/callback_factory.cpp b/src/proto/factories/callback_factory.cpp index bedb7c8c515..78c13cffdb6 100644 --- a/src/proto/factories/callback_factory.cpp +++ b/src/proto/factories/callback_factory.cpp @@ -66,8 +66,9 @@ lbann_callback* construct_callback(lbann_comm* comm, ////////////////////////////////////////////////////////////// if (proto_cb.has_print()) { - const auto& interval = proto_cb.print().interval(); - return new lbann_callback_print(interval); + const auto& params = proto_cb.print(); + return new lbann_callback_print(params.interval(), + params.print_global_stat_only()); } if (proto_cb.has_timer()) { return new lbann_callback_timer(summarizer); From b36b1f61488f654baac60d4e06ecb7de706d0df1 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Fri, 8 Mar 2019 18:33:53 -0800 Subject: [PATCH 163/443] Data reader that gets samples from Python. Compiles, but fails in the constructor. --- CMakeLists.txt | 25 +- cmake/configure_files/lbann_config.hpp.in | 1 + include/lbann/data_readers/CMakeLists.txt | 1 + .../lbann/data_readers/data_reader_python.hpp | 157 ++++++++++++ include/lbann/lbann.hpp | 1 + scripts/build_lbann_lc.sh | 15 +- src/data_readers/CMakeLists.txt | 1 + src/data_readers/data_reader_python.cpp | 240 ++++++++++++++++++ src/proto/lbann.proto | 9 + src/proto/proto_common.cpp | 22 ++ 10 files changed, 470 insertions(+), 2 deletions(-) create mode 100644 include/lbann/data_readers/data_reader_python.hpp create mode 100644 src/data_readers/data_reader_python.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 1b470f51c88..dcca9be90e7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -118,6 +118,9 @@ option(LBANN_WITH_VTUNE option(LBANN_WITH_UNIT_TESTING "Enable the unit testing framework (requires Catch2)" OFF) +option(LBANN_WITH_PYTHON + "Enable Python/C API for Python extension and embedding" OFF) + # Enable parallel random matrix generation, if possible option(LBANN_DETERMINISTIC "Use deterministic algorithms as much as possible." OFF) @@ -289,6 +292,17 @@ if (LBANN_WITH_TBINF) add_subdirectory(external/TBinf) endif () +if (LBANN_WITH_PYTHON) + if (LBANN_PYTHON_INCLUDE_DIR) + set(PYTHON_INCLUDE_DIR ${LBANN_PYTHON_INCLUDE_DIR}) + endif (LBANN_PYTHON_INCLUDE_DIR) + if (LBANN_PYTHON_LIBRARY) + set(PYTHON_LIBRARY ${LBANN_PYTHON_LIBRARY}) + endif (LBANN_PYTHON_LIBRARY) + find_package(PythonLibs REQUIRED) + set(LBANN_HAS_PYTHON ${PYTHONLIBS_FOUND}) +endif (LBANN_WITH_PYTHON) + if (LBANN_WITH_VTUNE) find_package(VTune MODULE) @@ -451,6 +465,10 @@ target_include_directories(lbann PUBLIC $ $) +if (LBANN_HAS_PYTHON) + target_include_directories(lbann PUBLIC ${PYTHON_INCLUDE_DIRS}) +endif () + # Use the IMPORTED targets when possible. target_link_libraries(lbann PUBLIC LbannProto) target_link_libraries(lbann PUBLIC cereal) @@ -481,6 +499,10 @@ if (LBANN_HAS_VTUNE) target_link_libraries(lbann PUBLIC ${VTUNE_STATIC_LIB}) endif () +if (LBANN_HAS_PYTHON) + target_link_libraries(lbann PUBLIC ${PYTHON_LIBRARIES}) +endif () + if (TARGET LBANN_CXX_FLAGS_werror) target_link_libraries(lbann PUBLIC LBANN_CXX_FLAGS_werror) endif () @@ -653,7 +675,8 @@ append_str_tf(_str LBANN_HAS_DOXYGEN LBANN_HAS_LBANN_PROTO LBANN_HAS_ALUMINUM - LBANN_HAS_CONDUIT) + LBANN_HAS_CONDUIT + LBANN_HAS_PYTHON) string(APPEND _str "\n== End LBANN Configuration Summary ==\n") diff --git a/cmake/configure_files/lbann_config.hpp.in b/cmake/configure_files/lbann_config.hpp.in index 4a712dca4a2..76b50bc920c 100644 --- a/cmake/configure_files/lbann_config.hpp.in +++ b/cmake/configure_files/lbann_config.hpp.in @@ -31,6 +31,7 @@ #cmakedefine LBANN_HAS_ALUMINUM #cmakedefine LBANN_ALUMINUM_MPI_PASSTHROUGH #cmakedefine LBANN_HAS_CONDUIT +#cmakedefine LBANN_HAS_PYTHON #cmakedefine LBANN_DETERMINISTIC diff --git a/include/lbann/data_readers/CMakeLists.txt b/include/lbann/data_readers/CMakeLists.txt index 5de41c0213f..d158ec342ec 100644 --- a/include/lbann/data_readers/CMakeLists.txt +++ b/include/lbann/data_readers/CMakeLists.txt @@ -26,6 +26,7 @@ set_full_path(THIS_DIR_HEADERS data_reader_numpy.hpp data_reader_numpy_npz.hpp data_reader_pilot2_molecular.hpp + data_reader_python.hpp data_reader_synthetic.hpp image_preprocessor.hpp image_utils.hpp diff --git a/include/lbann/data_readers/data_reader_python.hpp b/include/lbann/data_readers/data_reader_python.hpp new file mode 100644 index 00000000000..e0b90cea9de --- /dev/null +++ b/include/lbann/data_readers/data_reader_python.hpp @@ -0,0 +1,157 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#ifndef LBANN_DATA_READERS_PYTHON_HPP_INCLUDED +#define LBANN_DATA_READERS_PYTHON_HPP_INCLUDED + +#include "data_reader.hpp" +#ifdef LBANN_HAS_PYTHON +#include +#include + +namespace lbann { + +namespace python { + +/** Singleton class to manage embedded Python session. + * + * This is very experimental. Be warned. + */ +class manager { +public: + + /** Get singleton instance. */ + static manager& get_instance(); + /** @brief Construct singleton instance. + * @detailed If there is already an instance, it is destroyed. + */ + static void create(); + /** Destroy singleton instance. */ + static void destroy(); + + /** Check if a Python error has occurred. + * + * Throw an exception if an error is detected. + * + * @param force_error Whether to force an exception to be thrown. + */ + void check_error(bool force_error = false) const; + + /** RAII-style mutex wrapper. + * + * Python session is not thread-safe. + * + * @c std::lock_guard would be better than @c std::unique_lock, but + * it can't be returned from a function since it is non-copyable + * (guaranteed copy elision is C++17). @c std::scoped_lock is + * supposed to be strictly better than @c std::lock_guard, but it + * is C++17. + */ + using mutex_guard_type = std::unique_lock; + /** RAII-style mutex wrapper. + * + * Python session is not thread-safe. + * + * A mutex is locked when this function is called and it is + * unlocked when the guard is destructed. + */ + mutex_guard_type get_mutex_guard(); + + ~manager(); + +private: + + /** Singleton instance. */ + static std::unique_ptr m_instance; + /** Python session is not thread-safe. */ + std::mutex m_mutex; + + // Lifetime functions + manager(); + manager(const manager&) = delete; + manager operator=(const manager&) = delete; + +}; + +/** Convenience wrapper around @c PyObject pointer. + * + * This is very experimental. Be warned. + */ +class object { +public: + object(PyObject* obj = nullptr); + object(std::string val); + object(El::Int val); + object(DataType val); + object(const object& other); + object& operator=(const object& other); + object(object&& other); + object& operator=(object&& other); + ~object(); + operator PyObject*() { return m_ptr; } +private: + PyObject* m_ptr; +}; + +} // namespace python + +class python_reader : public generic_data_reader { +public: + python_reader(std::string module, + std::string sample_function, + std::string num_samples_function, + std::string sample_dims_function); + python_reader(const python_reader&) = default; + python_reader& operator=(const python_reader&) = default; + ~python_reader() override = default; + python_reader* copy() const override { return new python_reader(*this); } + + std::string get_type() const override { + return "python_reader"; + } + + const std::vector get_data_dims() const override; + int get_num_labels() const override; + int get_linearized_data_size() const override; + int get_linearized_label_size() const override; + + void load() override; + +protected: + bool fetch_datum(CPUMat& X, int data_id, int mb_idx) override; + bool fetch_label(CPUMat& Y, int data_id, int mb_idx) override; + +private: + std::vector m_sample_dims; + El::Int m_num_samples; + python::object m_sample_function; + +}; + +} // namespace lbann + +#endif // LBANN_HAS_PYTHON +#endif // LBANN_DATA_READERS_PYTHON_HPP_INCLUDED diff --git a/include/lbann/lbann.hpp b/include/lbann/lbann.hpp index 6891e46b5fb..6fc66e1be3a 100644 --- a/include/lbann/lbann.hpp +++ b/include/lbann/lbann.hpp @@ -124,6 +124,7 @@ #include "lbann/data_readers/data_reader_pilot2_molecular.hpp" #include "lbann/data_readers/data_reader_mesh.hpp" #include "lbann/data_readers/data_reader_moving_mnist.hpp" +#include "lbann/data_readers/data_reader_python.hpp" /// Data stores #include "lbann/data_store/generic_data_store.hpp" diff --git a/scripts/build_lbann_lc.sh b/scripts/build_lbann_lc.sh index e1cf89cfbc4..c3b22c27f35 100755 --- a/scripts/build_lbann_lc.sh +++ b/scripts/build_lbann_lc.sh @@ -73,6 +73,9 @@ WITH_CONDUIT=OFF WITH_TBINF=OFF RECONFIGURE=0 USE_NINJA=0 +WITH_PYTHON=OFF +PYTHON_LIBRARY=/usr/tce/packages/python/python-3.6.4/lib/libpython3.6m.so +PYTHON_INCLUDE_DIR=/usr/tce/packages/python/python-3.6.4/include/python3.6m # In case that autoconf fails during on-demand buid on surface, try the newer # version of autoconf installed under '/p/lscratchh/brainusr/autoconf/bin' # by putting it at the beginning of the PATH or use the preinstalled library @@ -130,6 +133,7 @@ Options: ${C}--with-conduit Build with conduit interface ${C}--ninja Generate ninja files instead of makefiles ${C}--ninja-processes${N} Number of parallel processes for ninja. + ${C}--python${N} Build with Python/C API. EOF } @@ -274,6 +278,9 @@ while :; do --reconfigure) RECONFIGURE=1 ;; + --python) + WITH_PYTHON=ON + ;; -?*) # Unknown option echo "Unknown option (${1})" >&2 @@ -319,7 +326,7 @@ if [ ${USE_MODULES} -ne 0 ]; then HDF5_CMAKE_EXE=$(which cmake) fi module load cmake/3.9.2 - + CMAKE_PATH=$(dirname $(which cmake)) else use git @@ -731,6 +738,9 @@ if [ ${VERBOSE} -ne 0 ]; then print_variable MAKE_NUM_PROCESSES print_variable GEN_DOC print_variable WITH_TOPO_AWARE + print_variable WITH_PYTHON + print_variable PYTHON_LIBRARY + print_variable PYTHON_INCLUDE_DIR echo "" fi @@ -810,6 +820,9 @@ CONFIGURE_COMMAND=$(cat << EOF -D LBANN_CONDUIT_DIR=${CONDUIT_DIR} \ -D LBANN_BUILT_WITH_SPECTRUM=${WITH_SPECTRUM} \ -D OPENBLAS_ARCH_COMMAND=${OPENBLAS_ARCH} \ +-D LBANN_WITH_PYTHON=${WITH_PYTHON} \ +-D LBANN_PYTHON_LIBRARY=${PYTHON_LIBRARY} \ +-D LBANN_PYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIR} \ ${SUPERBUILD_DIR} EOF ) diff --git a/src/data_readers/CMakeLists.txt b/src/data_readers/CMakeLists.txt index e96d476903d..0ec716bae37 100644 --- a/src/data_readers/CMakeLists.txt +++ b/src/data_readers/CMakeLists.txt @@ -34,6 +34,7 @@ set_full_path(THIS_DIR_SOURCES data_reader_multi_images.cpp data_reader_mnist_siamese.cpp data_reader_triplet.cpp + data_reader_python.cpp offline_patches_npz.cpp image_preprocessor.cpp image_utils.cpp diff --git a/src/data_readers/data_reader_python.cpp b/src/data_readers/data_reader_python.cpp new file mode 100644 index 00000000000..09a875a22dc --- /dev/null +++ b/src/data_readers/data_reader_python.cpp @@ -0,0 +1,240 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + + +#include "lbann/data_readers/data_reader_python.hpp" +#ifdef LBANN_HAS_PYTHON +#include +#include "lbann/utils/file_utils.hpp" + +namespace lbann { + +namespace python { + +std::unique_ptr manager::m_instance; + +manager& manager::get_instance() { + if (m_instance == nullptr) { create(); } + return *m_instance; +} + +void manager::create() { + m_instance.reset(new manager()); +} + +void manager::destroy() { + m_instance.reset(nullptr); +} + +manager::manager() { + if (!Py_IsInitialized()) { + Py_Initialize(); + } + if (!Py_IsInitialized()) { + LBANN_ERROR("error creating embedded Python session"); + } +} + +manager::~manager() { + if (Py_IsInitialized()) { + Py_Finalize(); + } +} + +void manager::check_error(bool force_error) const { + if (force_error || PyErr_Occurred()) { + + // Get error information from Python session + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + + // Construct error message + std::ostringstream err; + err << "detected Python error"; + if (value != nullptr) { + err << " (" << PyUnicode_AsUTF8(value) << ")"; + } + if (traceback != nullptr) { + auto tb_module = PyImport_ImportModule("traceback"); + auto tb_message = PyObject_CallMethod(tb_module, + "format_exc", + nullptr); + err << "\n\n" << PyUnicode_AsUTF8(tb_message) << "\n"; + Py_XDECREF(tb_module); + Py_XDECREF(tb_message); + } + + // Clean up and throw exception + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + LBANN_ERROR(err.str()); + + } +} + +manager::mutex_guard_type manager::get_mutex_guard() { + return mutex_guard_type(m_mutex); +} + +object::object(PyObject* ptr) : m_ptr(ptr) { + if (Py_IsInitialized() && PyErr_Occurred()) { + manager::get_instance().check_error(); + } +} + +object::object(std::string val) + : object(PyUnicode_FromStringAndSize(val.c_str(), val.size())) {} +object::object(El::Int val) : object(PyLong_FromLong(val)) {} +object::object(DataType val) : object(PyFloat_FromDouble(val)) {} + +object::object(const object& other) : m_ptr(other.m_ptr) { + Py_XINCREF(m_ptr); +} + +object& object::operator=(const object& other) { + Py_XDECREF(m_ptr); + m_ptr = other.m_ptr; + Py_XINCREF(m_ptr); + return *this; +} + +object::object(object&& other) : m_ptr(other.m_ptr) { + other.m_ptr = nullptr; +} + +object& object::operator=(object&& other) { + Py_XDECREF(m_ptr); + m_ptr = other.m_ptr; + other.m_ptr = nullptr; + return *this; +} + +object::~object() { + Py_XDECREF(m_ptr); +} + +} // namespace python + +python_reader::python_reader(std::string module, + std::string sample_function, + std::string num_samples_function, + std::string sample_dims_function) + : generic_data_reader(true) { + + // Import Python module + auto& manager = python::manager::get_instance(); + const auto lock = manager.get_mutex_guard(); + const auto& module_dir = file::extract_parent_directory(module); + if (!module_dir.empty()) { + python::object path = PySys_GetObject("path"); + auto status = PyList_Append(path, python::object(module_dir)); + manager.check_error(status); + } + auto module_name = file::extract_base_name(module); + module_name.erase(module_name.rfind(".py")); + python::object _module = PyImport_ImportModule(module_name.c_str()); + + // Get number of samples + python::object num_func + = PyObject_GetAttrString(_module, num_samples_function.c_str()); + python::object num = PyObject_CallObject(num_func, nullptr); + m_num_samples = PyLong_AsLong(num); + manager.check_error(); + + // Get sample dimensions + python::object dims_func + = PyObject_GetAttrString(_module, sample_dims_function.c_str()); + python::object dims = PyObject_CallObject(dims_func, nullptr); + dims = PyObject_GetIter(dims); + for (auto d = PyIter_Next(dims); d != nullptr; d = PyIter_Next(dims)) { + m_sample_dims.push_back(PyLong_AsLong(d)); + Py_DECREF(d); + } + manager.check_error(); + + // Get sample function + m_sample_function + = PyObject_GetAttrString(_module, sample_function.c_str()); + +} + +const std::vector python_reader::get_data_dims() const { + std::vector dims; + for (const auto& d : m_sample_dims) { + dims.push_back(d); + } + return dims; +} +int python_reader::get_num_labels() const { + return 1; +} +int python_reader::get_linearized_data_size() const { + const auto& dims = get_data_dims(); + return std::accumulate(dims.begin(), dims.end(), 1, + std::multiplies()); +} +int python_reader::get_linearized_label_size() const { + return get_num_labels(); +} + +bool python_reader::fetch_datum(CPUMat& X, int data_id, int col) { + + // Lock mutex for the scope of this function + auto& manager = python::manager::get_instance(); + const auto lock = manager.get_mutex_guard(); + + // Get sample with Python + python::object id = PyTuple_New(1); + auto status = PyTuple_SetItem(id, 0, PyLong_FromLong(data_id)); + manager.check_error(status); + python::object sample = PyObject_CallObject(m_sample_function, id); + sample = PyObject_GetIter(sample); + + // Extract sample entries from Python iterator + const El::Int sample_size = get_linearized_data_size(); + for (El::Int row = 0; row < sample_size; ++row) { + python::object val = PyIter_Next(sample); + X(row, col) = PyFloat_AsDouble(val); + } + if (PyErr_Occurred()) { LBANN_ERROR("Python error detected"); } + + return true; +} + +bool python_reader::fetch_label(CPUMat& Y, int data_id, int col) { + return true; +} + +void python_reader::load() { + m_shuffled_indices.resize(m_num_samples); + std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0); + select_subset_of_data(); +} + +} // namespace lbann + +#endif // LBANN_HAS_PYTHON diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 2511e2d69f2..03fda1d78e5 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -71,6 +71,15 @@ message Reader { bool index_list_per_trainer = 400; bool index_list_per_model = 401; //------------- end of only for index lists ------------------ + + PythonDataReader python = 501; +} + +message PythonDataReader { + string module = 1; // Python module (can be a file path) + string sample_function = 2; // Name of function that gets data sample + string num_samples_function = 3; // Name of function that gets number of data samples + string sample_dims_function = 4; // Name of function that gets dimensions of data sample } message ImagePreprocessor { diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index dd0f7420c79..c75ef0bd3ab 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -325,6 +325,17 @@ void init_data_readers( reader = new mesh_reader(shuffle); } else if (name == "moving_mnist") { reader = new moving_mnist_reader(7, 40, 40, 2); + } else if (name == "python") { +#ifdef LBANN_HAS_PYTHON + const auto& params = readme.python(); + reader = new python_reader(params.module(), + params.sample_function(), + params.num_samples_function(), + params.sample_dims_function()); +#else + LBANN_ERROR("attempted to construct Python data reader, " + "but LBANN is not built with Python/C API"); +#endif // LBANN_HAS_PYTHON } else { if (master) { err << __FILE__ << " " << __LINE__ << " :: unknown name for data reader: " @@ -473,6 +484,17 @@ void init_data_readers( } else if (name == "moving_mnist") { reader_validation = new moving_mnist_reader(7, 40, 40, 2); (*(moving_mnist_reader *)reader_validation) = (*(moving_mnist_reader *)reader); + } else if (name == "python") { +#ifdef LBANN_HAS_PYTHON + const auto& params = readme.python(); + reader_validation = new python_reader(params.module(), + params.sample_function(), + params.num_samples_function(), + params.sample_dims_function()); +#else + LBANN_ERROR("attempted to construct Python data reader, " + "but LBANN is not built with Python/C API"); +#endif // LBANN_HAS_PYTHON } reader_validation->set_role("validate"); From b17bbd35aa644d15658b0428fae390470ddfac4f Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Mon, 11 Mar 2019 09:01:13 -0700 Subject: [PATCH 164/443] Reorder print order, remove batch interval from callback at testing time --- src/callbacks/callback_print.cpp | 8 ++++---- src/models/model.cpp | 4 +--- src/proto/lbann.proto | 3 ++- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/callbacks/callback_print.cpp b/src/callbacks/callback_print.cpp index fcbec37b2c9..a6e98d34520 100644 --- a/src/callbacks/callback_print.cpp +++ b/src/callbacks/callback_print.cpp @@ -212,14 +212,14 @@ void lbann_callback_print::report_results(model *m) { num_samples_list.end(), 0)); const EvalType max_score = *std::max_element(score_list.begin(), score_list.end()); - std::cout << m->get_name() << " (global min) " << mode_string << " " - << met->name() << " : " - << min_score << met->get_unit() - << std::endl; std::cout << m->get_name() << " (global average) " << mode_string << " " << met->name() << " : " << avg_score << met->get_unit() << std::endl; + std::cout << m->get_name() << " (global min) " << mode_string << " " + << met->name() << " : " + << min_score << met->get_unit() + << std::endl; std::cout << m->get_name() << " (global max) " << mode_string << " " << met->name() << " : " << max_score << met->get_unit() diff --git a/src/models/model.cpp b/src/models/model.cpp index cf9fd892edc..959d0ff44db 100644 --- a/src/models/model.cpp +++ b/src/models/model.cpp @@ -1256,9 +1256,7 @@ void model::do_batch_begin_cbs(execution_mode mode) { break; case execution_mode::validation: case execution_mode::testing: - if (get_step() % cb->get_batch_interval() == 0) { - cb->on_batch_evaluate_begin(this); - } + cb->on_batch_evaluate_begin(this); break; default: LBANN_ERROR("invalid execution mode"); diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 2511e2d69f2..466ac89749f 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -436,6 +436,7 @@ message CallbackSaveImages { message CallbackPrint { int64 interval = 1; //default in lbann_callback_print.hpp is 1 + bool print_global_stat_only = 2; //useful in large scale multi-trainer, default is false } message CallbackProfiler { @@ -1237,4 +1238,4 @@ message Variance { } message ChannelwiseMean {} message MiniBatchIndex {} -message MiniBatchSize {} \ No newline at end of file +message MiniBatchSize {} From 3ddc3ff254f99fb106aa336a85a4b1ca6c1287ff Mon Sep 17 00:00:00 2001 From: Ryan Forsyth Date: Wed, 20 Feb 2019 15:09:26 -0800 Subject: [PATCH 165/443] Fix Tests --- bamboo/README.md | 14 +- bamboo/common_python/test_tools.py | 68 +++--- bamboo/common_python/tools.py | 228 +++++++++++++----- bamboo/compiler_tests/build_script.sh | 50 +--- .../compiler_tests/build_script_specific.sh | 53 ++++ bamboo/compiler_tests/conftest.py | 11 +- bamboo/compiler_tests/test_compiler.py | 133 ++++++---- bamboo/integration_tests/common_code.py | 87 ++++--- bamboo/integration_tests/conftest.py | 42 ++-- ...toencoder_imagenet_objective_functions.csv | 21 ++ ..._autoencoder_mnist_objective_functions.csv | 6 + .../pascal/clang4/expected_performance.csv | 5 + ...toencoder_imagenet_objective_functions.csv | 21 ++ ..._autoencoder_mnist_objective_functions.csv | 6 + .../pascal/gcc4/expected_performance.csv | 5 + ...toencoder_imagenet_objective_functions.csv | 21 ++ ..._autoencoder_mnist_objective_functions.csv | 6 + .../pascal/gcc7/expected_performance.csv | 5 + ...toencoder_imagenet_objective_functions.csv | 21 ++ ..._autoencoder_mnist_objective_functions.csv | 6 + .../pascal/intel18/expected_performance.csv | 5 + .../test_integration_autoencoders.py | 88 ++++--- .../test_integration_debug.py | 23 +- .../test_integration_performance.py | 94 +++++--- bamboo/unit_tests/conftest.py | 26 +- .../test_unit_check_proto_models.py | 37 ++- bamboo/unit_tests/test_unit_checkpoint.py | 14 +- bamboo/unit_tests/test_unit_layer_clamp.py | 14 +- .../unit_tests/test_unit_layer_covariance.py | 16 +- bamboo/unit_tests/test_unit_layer_elu.py | 16 +- bamboo/unit_tests/test_unit_layer_identity.py | 16 +- bamboo/unit_tests/test_unit_layer_l1_norm.py | 16 +- bamboo/unit_tests/test_unit_layer_l2_norm2.py | 16 +- .../unit_tests/test_unit_layer_leaky_relu.py | 16 +- .../unit_tests/test_unit_layer_log_sigmoid.py | 16 +- .../unit_tests/test_unit_layer_log_softmax.py | 16 +- .../test_unit_layer_mean_absolute_error.py | 16 +- bamboo/unit_tests/test_unit_layer_relu.py | 16 +- bamboo/unit_tests/test_unit_layer_selu.py | 16 +- bamboo/unit_tests/test_unit_layer_sigmoid.py | 16 +- bamboo/unit_tests/test_unit_layer_softmax.py | 16 +- bamboo/unit_tests/test_unit_layer_softplus.py | 16 +- bamboo/unit_tests/test_unit_layer_softsign.py | 16 +- .../test_unit_layer_squared_difference.py | 16 +- .../unit_tests/test_unit_layer_tessellate.py | 16 +- bamboo/unit_tests/test_unit_layer_variance.py | 16 +- bamboo/unit_tests/test_unit_lbann2_reload.py | 20 +- .../unit_tests/test_unit_lbann_invocation.py | 24 +- .../unit_tests/test_unit_mnist_conv_graph.py | 23 +- .../test_unit_mnist_ridge_regression.py | 19 +- .../test_unit_mnist_softmax_classifier.py | 19 +- 51 files changed, 1053 insertions(+), 450 deletions(-) create mode 100755 bamboo/compiler_tests/build_script_specific.sh create mode 100644 bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_imagenet_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_mnist_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/clang4/expected_performance.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_imagenet_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_mnist_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc4/expected_performance.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_imagenet_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_mnist_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/gcc7/expected_performance.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_imagenet_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_mnist_objective_functions.csv create mode 100644 bamboo/integration_tests/expected_values/pascal/intel18/expected_performance.csv diff --git a/bamboo/README.md b/bamboo/README.md index 4ad8b6508be..c317c496379 100644 --- a/bamboo/README.md +++ b/bamboo/README.md @@ -47,13 +47,13 @@ As an alternative to splitting the file, errors can be searched for with `grep - Bamboo agent properties are used to specify requirements for each job. -| Agents (jobs) | `agent_owner` | `architecture` | `cluster` | `gpu_architecture` | `sys_type` | -| --- | --- | --- | --- | --- | --- | -| Catalyst Agents (x86_cpu) | `lbannusr` | `x86_64` | `catalyst` | `none` | `toss_3_x86_64_ib` | -| Pascal Agents | `lbannusr` | `x86_64` | `pascal` | `pascal` | `chaos_6_x86_64_ib` | -| Quartz Agents (x86_cpu) | `lbannusr` | `x86_64` | `quartz` | `none` | `toss_3_x86_64_ib` | -| Ray Agents (ppc64le_gpu) | `lbannusr` | `ppc64_le` | `ray` | `pascal` | `blueos_3_ppc64le_ib` | -| Surface Agents (x86_gpu) | `lbannusr` | `x86_64` | `surface` | `kepler` | `chaos_5_x86_64_ib` | +| Agents (jobs) | `agent_owner` | `architecture` | `cluster` | `gpu_architecture` | `sys_type` | +| --- | --- | --- | --- | --- | --- | +| Catalyst Agents (x86_cpu) | `lbannusr` | `x86_64` | `catalyst` | `none` | `toss_3_x86_64_ib` | +| Pascal Agents (x86_gpu_pascal) | `lbannusr` | `x86_64` | `pascal` | `pascal` | `chaos_6_x86_64_ib` | +| Quartz Agents (x86_cpu) | `lbannusr` | `x86_64` | `quartz` | `none` | `toss_3_x86_64_ib` | +| Ray Agents (ppc64le_gpu) | `lbannusr` | `ppc64_le` | `ray` | `pascal` | `blueos_3_ppc64le_ib` | +| Surface Agents (x86_gpu) | `lbannusr` | `x86_64` | `surface` | `kepler` | `chaos_5_x86_64_ib` | Currently, `agent_owner`, `architecture`, and `gpu_architecture` are used to determine agents to run a job. diff --git a/bamboo/common_python/test_tools.py b/bamboo/common_python/test_tools.py index 0fdbf044160..10a2b4339b0 100644 --- a/bamboo/common_python/test_tools.py +++ b/bamboo/common_python/test_tools.py @@ -5,27 +5,27 @@ # Run locally with python -m pytest -s def test_command_catalyst(): - actual = tools.get_command(cluster='catalyst', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existance=False) + actual = tools.get_command(cluster='catalyst', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existence=False) expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 > output_file 2> error_file' assert actual == expected def test_command_pascal(): - actual = tools.get_command(cluster='pascal', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existance=False) + actual = tools.get_command(cluster='pascal', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existence=False) expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 > output_file 2> error_file' assert actual == expected def test_command_quartz(): - actual = tools.get_command(cluster='quartz', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existance=False) + actual = tools.get_command(cluster='quartz', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existence=False) expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --ntasks=40 exe --data_filedir=lscratchh/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 > output_file 2> error_file' assert actual == expected def test_command_surface(): - actual = tools.get_command(cluster='surface', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existance=False) + actual = tools.get_command(cluster='surface', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existence=False) expected = 'salloc --nodes=20 --partition=pbatch --time=30 srun --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 > output_file 2> error_file' assert actual == expected def test_command_ray(): - actual = tools.get_command(cluster='ray', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existance=False) + actual = tools.get_command(cluster='ray', executable='exe', num_nodes=20, partition='pdebug', time_limit=30, num_processes=40, dir_name='dir', data_filedir_default='lscratchh/filedir', data_reader_name='mnist', data_reader_percent=0.10, exit_after_setup=True, mini_batch_size=15, model_folder='models/folder', model_name='lenet', num_epochs=7, optimizer_name='adagrad', processes_per_model=10, output_file_name='output_file', error_file_name='error_file', check_executable_existence=False) expected = 'bsub -x -G guests -Is -n 40 -q pdebug -R "span[ptile=2]" -W 30 mpirun -np 40 -N 2 exe --data_filedir=gscratchr/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 > output_file 2> error_file' assert actual == expected @@ -33,7 +33,7 @@ def test_command_ray(): def test_blacklisted_substrings(): try: - tools.get_command('ray', 'exe', partition=';', optimizer_path='--model=new_model', check_executable_existance=False) + tools.get_command('ray', 'exe', partition=';', optimizer_path='--model=new_model', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid character(s): ; contains ; , --model=new_model contains --' @@ -41,7 +41,7 @@ def test_blacklisted_substrings(): def test_unsupported_cluster(): try: - tools.get_command('quartz', 'exe', check_executable_existance=False) + tools.get_command('quartz', 'exe', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Unsupported Cluster: quartz' @@ -49,7 +49,7 @@ def test_unsupported_cluster(): def test_bad_model_1(): try: - tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', model_name='name', model_path='path', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', model_name='name', model_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name' @@ -57,7 +57,7 @@ def test_bad_model_1(): def test_bad_model_2(): try: - tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', model_path='path', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', model_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name' @@ -65,7 +65,7 @@ def test_bad_model_2(): def test_bad_model_3(): try: - tools.get_command('ray', 'exe', dir_name='dir', model_name='name', model_path='path', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', model_name='name', model_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name' @@ -73,7 +73,7 @@ def test_bad_model_3(): def test_bad_model_4(): try: - tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: model_folder set but not model_name.' @@ -81,7 +81,7 @@ def test_bad_model_4(): def test_bad_model_5(): try: - tools.get_command('ray', 'exe', dir_name='dir', model_name='name', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', model_name='name', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: model_name set but not model_folder.' @@ -89,7 +89,7 @@ def test_bad_model_5(): def test_bad_data_reader(): try: - tools.get_command('catalyst', 'exe', dir_name='dir', data_reader_name='name', data_reader_path='path', check_executable_existance=False) + tools.get_command('catalyst', 'exe', dir_name='dir', data_reader_name='name', data_reader_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_reader_path is set but so is data_reader_name , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.' @@ -97,7 +97,7 @@ def test_bad_data_reader(): def test_bad_optimizer(): try: - tools.get_command('ray', 'exe', dir_name='dir', optimizer_name='name', optimizer_path='path', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', optimizer_name='name', optimizer_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: optimizer_path is set but so is optimizer_name' @@ -105,7 +105,7 @@ def test_bad_optimizer(): def test_bad_dir_name_1(): try: - tools.get_command('ray', 'exe', dir_name='dir', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: dir_name set but none of model_folder, model_name, data_reader_name, optimizer_name are.' @@ -113,7 +113,7 @@ def test_bad_dir_name_1(): def test_bad_dir_name_2(): try: - tools.get_command('ray', 'exe', model_folder='folder', check_executable_existance=False) + tools.get_command('ray', 'exe', model_folder='folder', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.' @@ -121,7 +121,7 @@ def test_bad_dir_name_2(): def test_bad_dir_name_3(): try: - tools.get_command('ray', 'exe', model_name='name', check_executable_existance=False) + tools.get_command('ray', 'exe', model_name='name', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.' @@ -129,7 +129,7 @@ def test_bad_dir_name_3(): def test_bad_dir_name_4(): try: - tools.get_command('catalyst', 'exe', data_reader_name='name', check_executable_existance=False) + tools.get_command('catalyst', 'exe', data_reader_name='name', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is. , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.' @@ -137,7 +137,7 @@ def test_bad_dir_name_4(): def test_bad_dir_name_5(): try: - tools.get_command('ray', 'exe', optimizer_name='name', check_executable_existance=False) + tools.get_command('ray', 'exe', optimizer_name='name', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.' @@ -146,7 +146,7 @@ def test_bad_dir_name_5(): def test_bad_data_filedir_1(): try: tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', data_filedir_default='filedir', data_filedir_train_default='a', - check_executable_existance=False) + check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -155,7 +155,7 @@ def test_bad_data_filedir_1(): def test_bad_data_filedir_2(): try: tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', data_filedir_default='filedir', data_filename_train_default='b', - check_executable_existance=False) + check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -165,7 +165,7 @@ def test_bad_data_filedir_2(): def test_bad_data_filedir_3(): try: tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', data_filedir_default='filedir', data_filedir_test_default='c', - check_executable_existance=False) + check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -174,7 +174,7 @@ def test_bad_data_filedir_3(): def test_bad_data_filedir_4(): try: tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', data_filedir_default='filedir', data_filename_test_default='d', - check_executable_existance=False) + check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -182,7 +182,7 @@ def test_bad_data_filedir_4(): def test_bad_data_filedir_5(): try: - tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filedir_train_default='e', check_executable_existance=False) + tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filedir_train_default='e', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -190,7 +190,7 @@ def test_bad_data_filedir_5(): def test_bad_data_filedir_6(): try: - tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filename_train_default='f', check_executable_existance=False) + tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filename_train_default='f', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -199,7 +199,7 @@ def test_bad_data_filedir_6(): def test_bad_data_filedir_7(): try: - tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filedir_test_default='g', check_executable_existance=False) + tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filedir_test_default='g', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -207,7 +207,7 @@ def test_bad_data_filedir_7(): def test_bad_data_filedir_8(): try: - tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filename_test_default='h', check_executable_existance=False) + tools.get_command('ray', 'exe', data_reader_path='path', data_filedir_default='filedir', data_filename_test_default='h', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]' @@ -215,7 +215,7 @@ def test_bad_data_filedir_8(): def test_bad_data_filedir_9(): try: - tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', check_executable_existance=False) + tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.' @@ -223,7 +223,7 @@ def test_bad_data_filedir_9(): def test_bad_data_filedir_10(): try: - tools.get_command('ray', 'exe', data_reader_path='path', check_executable_existance=False) + tools.get_command('ray', 'exe', data_reader_path='path', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.' @@ -231,7 +231,7 @@ def test_bad_data_filedir_10(): def test_bad_data_filedir_11(): try: - tools.get_command('ray', 'exe', data_filedir_default='filedir', check_executable_existance=False) + tools.get_command('ray', 'exe', data_filedir_default='filedir', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: data_filedir_default set but neither data_reader_name or data_reader_path are.' @@ -239,7 +239,7 @@ def test_bad_data_filedir_11(): def test_bad_data_filedir_12(): try: - tools.get_command('ray', 'exe', data_filedir_train_default='a', check_executable_existance=False) + tools.get_command('ray', 'exe', data_filedir_train_default='a', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.' @@ -248,7 +248,7 @@ def test_bad_data_filedir_12(): def test_bad_data_filedir_13(): try: - tools.get_command('ray', 'exe', data_filename_train_default='b', check_executable_existance=False) + tools.get_command('ray', 'exe', data_filename_train_default='b', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.' @@ -257,7 +257,7 @@ def test_bad_data_filedir_13(): def test_bad_data_filedir_14(): try: - tools.get_command('ray', 'exe', data_filedir_test_default='c', check_executable_existance=False) + tools.get_command('ray', 'exe', data_filedir_test_default='c', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.' @@ -266,7 +266,7 @@ def test_bad_data_filedir_14(): def test_bad_data_filedir_15(): try: - tools.get_command('ray', 'exe', data_filename_test_default='e', check_executable_existance=False) + tools.get_command('ray', 'exe', data_filename_test_default='e', check_executable_existence=False) except Exception, e: actual = str(e) expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.' diff --git a/bamboo/common_python/tools.py b/bamboo/common_python/tools.py index 4a9508c8b3a..e058ca21eb1 100644 --- a/bamboo/common_python/tools.py +++ b/bamboo/common_python/tools.py @@ -1,14 +1,16 @@ import pytest import math, os, re + def check_list(substrings, strings): errors = [] for string in strings: for substring in substrings: if (string != None) and (substring in string): - errors.append('%s contains %s' % (string, substring)) + errors.append('%s contains %s' % (string, substring)) return errors + def get_command(cluster, executable, num_nodes=None, @@ -37,29 +39,25 @@ def get_command(cluster, output_file_name=None, error_file_name=None, return_tuple=False, - check_executable_existance=True, + check_executable_existence=True, skip_no_exe=True): # Check parameters for black-listed characters like semi-colons that # would terminate the command and allow for an extra command blacklist = [';', '--'] - strings = [partition, dir_name, data_filedir_default, data_filedir_train_default, + strings = [partition, dir_name, data_filedir_default, + data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default, data_reader_name, data_reader_path, model_folder, model_name, model_path, optimizer_name, optimizer_path, output_file_name, error_file_name] invalid_character_errors = check_list(blacklist, strings) if invalid_character_errors != []: - raise Exception('Invalid character(s): %s' % ' , '.join(invalid_character_errors)) + raise Exception('Invalid character(s): %s' % ' , '.join( + invalid_character_errors)) - # Check executable existance - if check_executable_existance: - executable_exists = os.path.exists(executable) - if not executable_exists: - error_string = 'Executable does not exist: %s' % executable - if skip_no_exe: - pytest.skip(error_string) - else: - raise Exception(error_string) + # Check executable existence + if check_executable_existence: + process_executable_existence(executable, skip_no_exe) # Determine scheduler if cluster in ['catalyst', 'pascal', 'quartz', 'surface']: @@ -81,20 +79,21 @@ def get_command(cluster, option_num_nodes = '' option_partition = '' option_time_limit = '' - if num_nodes != None: + if num_nodes is not None: # --nodes= => # Request that a minimum of minnodes nodes be allocated to this # job. A maximum node count may also be specified with # maxnodes. option_num_nodes = ' --nodes=%d' % num_nodes - if partition != None: + if partition is not None: # Surface does not have pdebug, so switch to pbatch - if (cluster == 'surface') and (partition == 'pdebug'): + if (cluster in ['surface', 'pascal']) and \ + (partition == 'pdebug'): partition = 'pbatch' # --partition => Request a specific partition for the resource # allocation. option_partition = ' --partition=%s' % partition - if time_limit != None: + if time_limit is not None: # --time => Set a limit on the total run time of the job # allocation. # Time limit in minutes @@ -109,7 +108,7 @@ def get_command(cluster, else: command_run = ' srun --mpibind=off' option_num_processes = '' - if num_processes != None: + if num_processes is not None: # --ntasks => Specify the number of tasks to run. # Number of processes to run => MPI Rank option_num_processes = ' --ntasks=%d' % num_processes @@ -120,7 +119,7 @@ def get_command(cluster, command_allocate = '' # Allocate a node if we don't have one already # Running the tests manually allows for already having a node allocated - if os.getenv('LSB_HOSTS') == None: + if os.getenv('LSB_HOSTS') is None: command_allocate = 'bsub' # x => Puts the host running your job into exclusive execution # mode. @@ -135,19 +134,19 @@ def get_command(cluster, option_partition = '' option_processes_per_node = '' option_time_limit = '' - if num_processes != None: + if num_processes is not None: # n => Submits a parallel job and specifies the number of # tasks in the job. option_num_processes = ' -n %d' % num_processes - if (num_nodes != None) and (num_nodes != 0): + if (num_nodes is not None) and (num_nodes != 0): # R => Runs the job on a host that meets the specified # resource requirements. option_processes_per_node = ' -R "span[ptile=%d]"' % int( math.ceil(float(num_processes)/num_nodes)) - if partition != None: + if partition is not None: # q => Submits the job to one of the specified queues. option_partition = ' -q %s' % partition - if time_limit != None: + if time_limit is not None: if cluster == 'ray': max_ray_time = 480 if time_limit > max_ray_time: @@ -166,10 +165,10 @@ def get_command(cluster, command_run = ' mpirun' option_num_processes = '' option_processes_per_node = '' - if num_processes != None: + if num_processes is not None: # -np => Run this many copies of the program on the given nodes. option_num_processes = ' -np %d' % num_processes - if (num_nodes != None) and (num_nodes != 0): + if (num_nodes is not None) and (num_nodes != 0): option_processes_per_node = ' -N %d' % int( math.ceil(float(num_processes)/num_nodes)) command_run = '%s%s%s' % ( @@ -194,57 +193,68 @@ def get_command(cluster, option_optimizer = '' option_processes_per_model = '' lbann_errors = [] - if model_path != None: + if model_path is not None: # If model_folder and/or model_name are set, an exception will be # raised later. option_model = ' --model=%s' % model_path - if data_reader_path != None: + if data_reader_path is not None: # If data_reader_name is set, an exception will be raised later. option_data_reader = ' --reader=%s' % data_reader_path - if optimizer_path != None: + if optimizer_path is not None: # If optimizer_name is set, an exception will be raised later. option_optimizer_name = ' --optimizer=%s' % optimizer_path - if dir_name != None: - if model_path != None: - if (model_folder != None) or (model_name != None): + if dir_name is not None: + if model_path is not None: + if (model_folder is not None) or (model_name is not None): lbann_errors.append( - 'model_path is set but so is at least one of model folder and model_name') + ('model_path is set but so is at least one of model' + ' folder and model_name')) else: - if (model_folder != None) and (model_name != None): - option_model = ' --model=%s/model_zoo/%s/model_%s.prototext' % (dir_name, model_folder, model_name) - elif model_folder != None: + if (model_folder is not None) and (model_name is not None): + option_model = ' --model=%s/model_zoo/%s/model_%s.prototext' % ( + dir_name, model_folder, model_name) + elif model_folder is not None: lbann_errors.append('model_folder set but not model_name.') - elif model_name != None: + elif model_name is not None: lbann_errors.append('model_name set but not model_folder.') - if data_reader_name != None: - if data_reader_path != None: - lbann_errors.append('data_reader_path is set but so is data_reader_name') + if data_reader_name is not None: + if data_reader_path is not None: + lbann_errors.append(('data_reader_path is set but so is' + ' data_reader_name')) else: option_data_reader = ' --reader=%s/model_zoo/data_readers/data_reader_%s.prototext' % (dir_name, data_reader_name) - if optimizer_name != None: - if optimizer_path != None: - lbann_errors.append('optimizer_path is set but so is optimizer_name') + if optimizer_name is not None: + if optimizer_path is not None: + lbann_errors.append(('optimizer_path is set but so is' + ' optimizer_name')) else: option_optimizer = ' --optimizer=%s/model_zoo/optimizers/opt_%s.prototext' % (dir_name, optimizer_name) - if (model_folder == None) and (model_name == None) and (data_reader_name == None) and (optimizer_name == None): - lbann_errors.append('dir_name set but none of model_folder, model_name, data_reader_name, optimizer_name are.') - elif (model_folder != None) or (model_name != None) or (data_reader_name != None) or (optimizer_name != None): + if (model_folder is None) and (model_name is None) and \ + (data_reader_name is None) and (optimizer_name is None): + lbann_errors.append( + ('dir_name set but none of model_folder, model_name,' + ' data_reader_name, optimizer_name are.')) + elif (model_folder is not None) or (model_name is not None) or \ + (data_reader_name is not None) or (optimizer_name is not None): lbann_errors.append( - 'dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.') + ('dir_name is not set but at least one of model_folder,' + ' model_name, data_reader_name, optimizer_name is.')) data_file_parameters = [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] # Determine data file paths # If there is no regex match, then re.sub keeps the original string - if data_filedir_default != None: + if data_filedir_default is not None: if cluster in ['catalyst', 'pascal', 'surface']: # option_data_filedir = data_filedir_default # lscratchh, presumably - pass # No need to pass in a parameter + pass # No need to pass in a parameter elif cluster == 'quartz': - option_data_filedir = ' --data_filedir=%s' % re.sub('[a-z]scratch[a-z]', 'lscratchh', data_filedir_default) + option_data_filedir = ' --data_filedir=%s' % re.sub( + '[a-z]scratch[a-z]', 'lscratchh', data_filedir_default) elif cluster == 'ray': - option_data_filedir = ' --data_filedir=%s' % re.sub('[a-z]scratch[a-z]', 'gscratchr', data_filedir_default) + option_data_filedir = ' --data_filedir=%s' % re.sub( + '[a-z]scratch[a-z]', 'gscratchr', data_filedir_default) elif None not in data_file_parameters: if cluster in ['catalyst', 'pascal', 'surface']: # option_data_filedir_train = data_filedir_train_default @@ -262,35 +272,56 @@ def get_command(cluster, option_data_filename_train = ' --data_filename_train=%s' % re.sub('[a-z]scratch[a-z]', 'gscratchr', data_filename_train_default) option_data_filedir_test = ' --data_filedir_test=%s' % re.sub('[a-z]scratch[a-z]', 'gscratchr', data_filedir_test_default) option_data_filename_test = ' --data_filename_test=%s' % re.sub('[a-z]scratch[a-z]', 'gscratchr', data_filename_test_default) - if (data_reader_name != None) or (data_reader_path != None): - if data_filedir_default != None: - if data_file_parameters != [None, None, None, None]: # If any are not None - lbann_errors.append('data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]') + if (data_reader_name is not None) or (data_reader_path is not None): + if data_filedir_default is not None: + # If any are not None + if data_file_parameters != [None, None, None, None]: + lbann_errors.append( + ('data_fildir_default set but so is at least one of' + ' [data_filedir_train_default, data_filename_train' + '_default, data_filedir_test_default,' + ' data_filename_test_default]')) # else: only data_filedir_default is set else: # if None in data_file_parameters: # If any are None if data_file_parameters == [None, None, None, None]: # If all are None - lbann_errors.append('data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.') + lbann_errors.append( + ('data_reader_name or data_reader_path is set but not' + ' data_filedir_default. If a data reader is provided,' + ' the default filedir must be set. This allows for' + ' determining what the filedir should be on each' + ' cluster. Alternatively, some or all of' + ' [data_filedir_train_default, data_filename_train' + '_default, data_filedir_test_default, data_filename' + '_test_default] can be set.')) # else: no data_file parameters are set else: - if data_filedir_default != None: - lbann_errors.append('data_filedir_default set but neither data_reader_name or data_reader_path are.') - elif filter(lambda x: x != None, data_file_parameters) != []: # If the list of non-None data_file parameters is not empty - lbann_errors.append('At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.') + if data_filedir_default is not None: + lbann_errors.append( + ('data_filedir_default set but neither data_reader_name' + ' or data_reader_path are.')) + elif filter(lambda x: x is not None, data_file_parameters) != []: + # If the list of non-None data_file parameters is not empty + lbann_errors.append( + ('At least one of [data_filedir_train_default, data_filename' + '_train_default, data_filedir_test_default, data_filename' + '_test_default] is set, but neither data_reader_name or' + ' data_reader_path are.')) # else: no conflicts - if data_reader_percent != None: + if data_reader_percent is not None: option_data_reader_percent = ' --data_reader_percent=%f' % data_reader_percent if exit_after_setup: option_exit_after_setup = ' --exit_after_setup' - if mini_batch_size != None: + if mini_batch_size is not None: option_mini_batch_size = ' --mini_batch_size=%d' % mini_batch_size - if num_epochs != None: + if num_epochs is not None: option_num_epochs = ' --num_epochs=%d' % num_epochs - if processes_per_model != None: + if processes_per_model is not None: option_processes_per_model = ' --procs_per_model=%d' % processes_per_model - if ckpt_dir != None: + if ckpt_dir is not None: option_ckpt_dir = ' --ckpt_dir=%s' % ckpt_dir if lbann_errors != []: + print('lbann_errors={lbann_errors}.'.format(lbann_errors=lbann_errors)) raise Exception('Invalid Usage: ' + ' , '.join(lbann_errors)) command_lbann = '%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s' % ( executable, option_ckpt_dir, option_data_filedir, @@ -304,9 +335,9 @@ def get_command(cluster, # Create redirect command command_output = '' command_error = '' - if output_file_name != None: + if output_file_name is not None: command_output = ' > %s' % output_file_name - if error_file_name != None: + if error_file_name is not None: command_error = ' 2> %s' % error_file_name command_redirect = '%s%s' % (command_output, command_error) @@ -316,3 +347,68 @@ def get_command(cluster, return t else: return '%s%s %s%s' % t + + +def process_executable_existence(executable, skip_no_exe=True): + executable_exists = os.path.exists(executable) + if not executable_exists: + error_string = 'Executable does not exist: %s' % executable + if skip_no_exe: + pytest.skip(error_string) + else: + raise Exception(error_string) + + +def get_spack_exes(default_dirname, cluster): + exes = {} + + exes['clang4'] = '%s/bamboo/compiler_tests/builds/%s_clang-4.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) + exes['gcc4'] = '%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_rel/build/model_zoo/lbann' % (default_dirname, cluster) + exes['gcc7'] = '%s/bamboo/compiler_tests/builds/%s_gcc-7.1.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) + exes['intel18'] = '%s/bamboo/compiler_tests/builds/%s_intel-18.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) + + exes['clang4_debug'] = '%s/bamboo/compiler_tests/builds/%s_clang-4.0.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) + exes['gcc4_debug'] = '%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_debug/build/model_zoo/lbann' % (default_dirname, cluster) + exes['gcc7_debug'] = '%s/bamboo/compiler_tests/builds/%s_gcc-7.1.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) + exes['intel18_debug'] = '%s/bamboo/compiler_tests/builds/%s_intel-18.0.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) + + return exes + + +def get_default_exes(default_dirname, cluster): + exes = get_spack_exes(default_dirname, cluster) + # Use build script as a backup if the Spack build doesn't work. + if not os.path.exists(exes['clang4']): + exes['clang4'] = '%s/build/clang.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + if not os.path.exists(exes['gcc7']): + exes['gcc7'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + if not os.path.exists(exes['intel18']): + exes['intel18'] = '%s/build/intel.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + + if not os.path.exists(exes['clang4_debug']): + exes['clang4_debug'] = '%s/build/clang.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + if not os.path.exists(exes['gcc7_debug']): + exes['gcc7_debug'] = '%s/build/gnu.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + if not os.path.exists(exes['intel18_debug']): + exes['intel18_debug'] = '%s/build/intel.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + + default_exes = {} + default_exes['default'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) + if cluster in ['catalyst', 'quartz', 'pascal']: + # x86_cpu - catalyst, quartz + # x86_gpu_pascal - pascal + default_exes['clang4'] = exes['clang4'] + default_exes['gcc4'] = exes['gcc4'] + default_exes['gcc7'] = exes['gcc7'] + default_exes['intel18'] = exes['intel18'] + + default_exes['clang4_debug'] = exes['clang4_debug'] + default_exes['gcc4_debug'] = exes['gcc4_debug'] + default_exes['gcc7_debug'] = exes['gcc7_debug'] + default_exes['intel18_debug'] = exes['intel18_debug'] + elif cluster in ['surface']: + # x86_gpu - surface + default_exes['gcc4'] = exes['gcc4'] + default_exes['gcc4_debug'] = exes['gcc4_debug'] + + return default_exes diff --git a/bamboo/compiler_tests/build_script.sh b/bamboo/compiler_tests/build_script.sh index c52f239b5fa..bb701fdc362 100755 --- a/bamboo/compiler_tests/build_script.sh +++ b/bamboo/compiler_tests/build_script.sh @@ -1,53 +1,7 @@ -set -e CLUSTER=$(hostname | sed 's/\([a-zA-Z][a-zA-Z]*\)[0-9]*/\1/g') -LBANN_DIR=$(git rev-parse --show-toplevel) -DEBUG='' if [ "${CLUSTER}" != 'surface' ]; then source /usr/share/lmod/lmod/init/bash source /etc/profile.d/00-modulepath.sh fi - -while :; do - case ${1} in - --compiler) - # Choose compiler - if [ -n "${2}" ]; then - COMPILER=${2} - shift - else - echo "\"${1}\" option requires a non-empty option argument" >&2 - exit 1 - fi - ;; - - -d|--debug) - # Debug mode - DEBUG='--debug' - ;; - *) - # Break loop if there are no more options - break - - esac - shift -done - -if [ "${COMPILER}" == 'clang' ]; then - module load clang/4.0.0 - ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler clang ${DEBUG} --reconfigure -fi - -if [ "${COMPILER}" == 'intel' ]; then - module load intel/18.0.0 - ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler intel ${DEBUG} --reconfigure -fi - -if [ "${COMPILER}" == 'gcc4' ]; then - module load gcc/4.9.3 - ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler gnu ${DEBUG} --reconfigure -fi - -if [ "${COMPILER}" == 'gcc7' ]; then - module load gcc/7.1.0 - ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler gnu ${DEBUG} --reconfigure -fi +LBANN_DIR=$(git rev-parse --show-toplevel) +${LBANN_DIR}/scripts/build_lbann_lc.sh diff --git a/bamboo/compiler_tests/build_script_specific.sh b/bamboo/compiler_tests/build_script_specific.sh new file mode 100755 index 00000000000..c52f239b5fa --- /dev/null +++ b/bamboo/compiler_tests/build_script_specific.sh @@ -0,0 +1,53 @@ +set -e +CLUSTER=$(hostname | sed 's/\([a-zA-Z][a-zA-Z]*\)[0-9]*/\1/g') +LBANN_DIR=$(git rev-parse --show-toplevel) +DEBUG='' +if [ "${CLUSTER}" != 'surface' ]; then + source /usr/share/lmod/lmod/init/bash + source /etc/profile.d/00-modulepath.sh +fi + +while :; do + case ${1} in + --compiler) + # Choose compiler + if [ -n "${2}" ]; then + COMPILER=${2} + shift + else + echo "\"${1}\" option requires a non-empty option argument" >&2 + exit 1 + fi + ;; + + -d|--debug) + # Debug mode + DEBUG='--debug' + ;; + *) + # Break loop if there are no more options + break + + esac + shift +done + +if [ "${COMPILER}" == 'clang' ]; then + module load clang/4.0.0 + ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler clang ${DEBUG} --reconfigure +fi + +if [ "${COMPILER}" == 'intel' ]; then + module load intel/18.0.0 + ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler intel ${DEBUG} --reconfigure +fi + +if [ "${COMPILER}" == 'gcc4' ]; then + module load gcc/4.9.3 + ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler gnu ${DEBUG} --reconfigure +fi + +if [ "${COMPILER}" == 'gcc7' ]; then + module load gcc/7.1.0 + ${LBANN_DIR}/scripts/build_lbann_lc.sh --compiler gnu ${DEBUG} --reconfigure +fi diff --git a/bamboo/compiler_tests/conftest.py b/bamboo/compiler_tests/conftest.py index 6e07162c5d3..238b812e638 100644 --- a/bamboo/compiler_tests/conftest.py +++ b/bamboo/compiler_tests/conftest.py @@ -1,18 +1,23 @@ import pytest import re, subprocess + def pytest_addoption(parser): - cluster = re.sub('[0-9]+', '', subprocess.check_output('hostname'.split()).strip()) - default_dirname = subprocess.check_output('git rev-parse --show-toplevel'.split()).strip() + cluster = re.sub('[0-9]+', '', subprocess.check_output( + 'hostname'.split()).strip()) + default_dirname = subprocess.check_output( + 'git rev-parse --show-toplevel'.split()).strip() parser.addoption('--cluster', action='store', default=cluster, help='--cluster= to specify the cluster being run on, for the purpose of determing which commands to use. Default the current cluster') parser.addoption('--dirname', action='store', default=default_dirname, help='--dirname specifies the top-level directory') + @pytest.fixture def cluster(request): return request.config.getoption('--cluster') - + + @pytest.fixture def dirname(request): return request.config.getoption('--dirname') diff --git a/bamboo/compiler_tests/test_compiler.py b/bamboo/compiler_tests/test_compiler.py index 383c8701832..6bb9599330c 100644 --- a/bamboo/compiler_tests/test_compiler.py +++ b/bamboo/compiler_tests/test_compiler.py @@ -1,14 +1,14 @@ +# import sys +# sys.path.insert(0, '../common_python') +# import tools import pytest import os, re, subprocess -def build_script(cluster, dirname, compiler, debug): - if debug: - build = 'debug' - else: - build = 'release' - output_file_name = '%s/bamboo/compiler_tests/output/%s_%s_%s_output.txt' % (dirname, cluster, compiler, build) - error_file_name = '%s/bamboo/compiler_tests/error/%s_%s_%s_error.txt' % (dirname, cluster, compiler, build) - command = '%s/bamboo/compiler_tests/build_script.sh --compiler %s %s> %s 2> %s' % (dirname, compiler, debug, output_file_name, error_file_name) + +def test_compiler_build_script(dirname): + output_file_name = '%s/bamboo/compiler_tests/output/build_script_output.txt' % (dirname) + error_file_name = '%s/bamboo/compiler_tests/error/build_script_error.txt' % (dirname) + command = '%s/bamboo/compiler_tests/build_script.sh > %s 2> %s' % (dirname, output_file_name, error_file_name) return_code = os.system(command) if return_code != 0: output_file = open(output_file_name, 'r') @@ -19,55 +19,62 @@ def build_script(cluster, dirname, compiler, debug): print('%s: %s' % (error_file_name, line)) assert return_code == 0 + def test_compiler_clang4_release(cluster, dirname): - #skeleton_clang4(cluster, dirname, False) - if cluster in ['ray', 'catalyst']: - build_script(cluster, dirname, 'clang', '') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_clang4(cluster, dirname, False) + except AssertionError: + build_script(cluster, dirname, 'clang', False) + def test_compiler_clang4_debug(cluster, dirname): - #skeleton_clang4(cluster, dirname, True) - if cluster in ['ray', 'catalyst']: - build_script(cluster, dirname, 'clang', '--debug') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_clang4(cluster, dirname, True) + except AssertionError: + build_script(cluster, dirname, 'clang', True) + def test_compiler_gcc4_release(cluster, dirname): - #skeleton_gcc4(cluster, dirname, False) - build_script(cluster, dirname, 'gcc4', '') + try: + skeleton_gcc4(cluster, dirname, False) + except AssertionError: + build_script(cluster, dirname, 'gcc4', False) + def test_compiler_gcc4_debug(cluster, dirname): - #skeleton_gcc4(cluster, dirname, True) - build_script(cluster, dirname, 'gcc4', '--debug') + try: + skeleton_gcc4(cluster, dirname, True) + except AssertionError: + build_script(cluster, dirname, 'gcc4', True) + def test_compiler_gcc7_release(cluster, dirname): - #skeleton_gcc7(cluster, dirname, False) - if cluster == 'catalyst': - build_script(cluster, dirname, 'gcc7', '') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_gcc7(cluster, dirname, False) + except AssertionError: + build_script(cluster, dirname, 'gcc7', False) + def test_compiler_gcc7_debug(cluster, dirname): - #skeleton_gcc7(cluster, dirname, True) - if cluster == 'catalyst': - build_script(cluster, dirname, 'gcc7', '--debug') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_gcc7(cluster, dirname, True) + except AssertionError: + build_script(cluster, dirname, 'gcc7', True) + def test_compiler_intel18_release(cluster, dirname): - #skeleton_intel18(cluster, dirname, False) - if cluster == 'catalyst': - build_script(cluster, dirname, 'intel', '') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_intel18(cluster, dirname, False) + except AssertionError: + build_script(cluster, dirname, 'intel18', False) + def test_compiler_intel18_debug(cluster, dirname): - #skeleton_intel18(cluster, dirname, True) - if cluster == 'catalyst': - build_script(cluster, dirname, 'intel', '--debug') - else: - pytest.skip('Unsupported Cluster %s' % cluster) + try: + skeleton_intel18(cluster, dirname, True) + except AssertionError: + build_script(cluster, dirname, 'intel18', True) + def skeleton_clang4(cluster, dir_name, debug, should_log=False): if cluster in ['catalyst', 'quartz']: @@ -76,11 +83,12 @@ def skeleton_clang4(cluster, dir_name, debug, should_log=False): else: pytest.skip('Unsupported Cluster %s' % cluster) + def skeleton_gcc4(cluster, dir_name, debug, should_log=False): if cluster in ['catalyst', 'quartz', 'ray']: - if cluster in ['catalyst','quartz']: + if cluster in ['catalyst', 'quartz']: mpi = 'mvapich2@2.2' - elif cluster in ['pascal', 'surface']: + elif cluster in ['pascal', 'surface']: mpi = 'mvapich2@2.2+cuda' elif cluster == 'ray': mpi = 'spectrum-mpi@2018.04.27' @@ -91,6 +99,7 @@ def skeleton_gcc4(cluster, dir_name, debug, should_log=False): else: pytest.skip('Unsupported Cluster %s' % cluster) + def skeleton_gcc7(cluster, dir_name, debug, should_log=False): if cluster in ['catalyst', 'quartz']: spack_skeleton(dir_name, 'gcc@7.1.0', 'mvapich2@2.2', debug, should_log) @@ -98,6 +107,7 @@ def skeleton_gcc7(cluster, dir_name, debug, should_log=False): else: pytest.skip('Unsupported Cluster %s' % cluster) + def skeleton_intel18(cluster, dir_name, debug, should_log=False): if cluster in ['catalyst', 'quartz']: spack_skeleton(dir_name, 'intel@18.0.0', 'mvapich2@2.2', debug, should_log) @@ -105,6 +115,7 @@ def skeleton_intel18(cluster, dir_name, debug, should_log=False): else: pytest.skip('Unsupported Cluster %s' % cluster) + def spack_skeleton(dir_name, compiler, mpi_lib, debug, should_log): compiler_underscored = re.sub('[@\.]', '_', compiler) if debug: @@ -130,6 +141,7 @@ def spack_skeleton(dir_name, compiler, mpi_lib, debug, should_log): print('%s: %s' % (error_file_name, line)) assert return_code == 0 + def build_skeleton(dir_name, compiler, debug, should_log): compiler_underscored = re.sub('[@\.]', '_', compiler) if debug: @@ -142,7 +154,8 @@ def build_skeleton(dir_name, compiler, debug, should_log): #mpi_lib = mpi_lib.replace('@', '-') cluster = re.sub('[0-9]+', '', subprocess.check_output('hostname'.split()).strip()) # For reference: - # Commenting out for now. These additions to path name will likely return one day, so I am not removing them entirely + # Commenting out for now. These additions to path name will likely return + # one day, so I am not removing them entirely. # x86_64 <=> catalyst, pascal, quartz, surface # ppc64le <=> ray #architecture = subprocess.check_output('uname -m'.split()).strip() @@ -164,3 +177,33 @@ def build_skeleton(dir_name, compiler, debug, should_log): for line in error_file: print('%s: %s' % (error_file_name, line)) assert return_code == 0 + + +def build_script(cluster, dirname, compiler, debug): + # We can't ensure the build_script tests will be run after the spack + # tests... + # exes = tools.get_spack_exes(dirname, cluster) + # if os.path.exists(exes[compiler]): + # # Spack executable exists. That means the Spack test passed. + # pytest.skip(('Spack test passed. Ignoring build_script for' + # ' compiler={compiler}.').format(compiler=compiler)) + print(('Running build_script for cluster={cluster},' + ' compiler={compiler}.').format(cluster=cluster, compiler=compiler)) + if debug: + build = 'debug' + debug_flag = '--debug' + else: + build = 'release' + debug_flag = '' + output_file_name = '%s/bamboo/compiler_tests/output/%s_%s_%s_build_script_output.txt' % (dirname, cluster, compiler, build) + error_file_name = '%s/bamboo/compiler_tests/error/%s_%s_%s__build_script_error.txt' % (dirname, cluster, compiler, build) + command = '%s/bamboo/compiler_tests/build_script_specific.sh --compiler %s %s> %s 2> %s' % (dirname, compiler, debug_flag, output_file_name, error_file_name) + return_code = os.system(command) + if return_code != 0: + output_file = open(output_file_name, 'r') + for line in output_file: + print('%s: %s' % (output_file_name, line)) + error_file = open(error_file_name, 'r') + for line in error_file: + print('%s: %s' % (error_file_name, line)) + assert return_code == 0 diff --git a/bamboo/integration_tests/common_code.py b/bamboo/integration_tests/common_code.py index 7a3cea95c71..e4b6a7ab49d 100644 --- a/bamboo/integration_tests/common_code.py +++ b/bamboo/integration_tests/common_code.py @@ -3,6 +3,7 @@ import tools import collections, csv, os, pprint, re, time + # Set up the command ########################################################## def get_command(cluster, dir_name, model_folder, model_name, executable, output_file_name, error_file_name, compiler_name, weekly=False): @@ -23,7 +24,8 @@ def get_command(cluster, dir_name, model_folder, model_name, executable, optimizer_name='adagrad', output_file_name=output_file_name, error_file_name=error_file_name) elif model_name in ['conv_autoencoder_mnist', 'lenet_mnist']: - if (model_name == 'lenet_mnist') and (compiler_name in ['clang4', 'intel18']): + if (model_name == 'lenet_mnist') and \ + (compiler_name in ['clang4', 'intel18']): partition = 'pbatch' time_limit = 600 else: @@ -33,10 +35,10 @@ def get_command(cluster, dir_name, model_folder, model_name, executable, num_processes = 20 else: num_processes = 2 - command = tools.get_command( + command = tools.get_command( cluster=cluster, executable=executable, num_nodes=1, - partition=partition, time_limit=time_limit, num_processes=num_processes, - dir_name=dir_name, + partition=partition, time_limit=time_limit, + num_processes=num_processes, dir_name=dir_name, data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', data_reader_name='mnist', model_folder=model_folder, model_name=model_name, num_epochs=5, optimizer_name='adagrad', @@ -47,18 +49,23 @@ def get_command(cluster, dir_name, model_folder, model_name, executable, # Run LBANN ################################################################### -def run_lbann(command, model_name, output_file_name, error_file_name, should_log=False): + +def run_lbann(command, model_name, output_file_name, error_file_name, + should_log=False): print('About to run: %s' % command) - print('%s began waiting in the queue at ' % model_name + time.strftime('%H:%M:%S', time.localtime())) + print('%s began waiting in the queue at ' % model_name + + time.strftime('%H:%M:%S', time.localtime())) output_value = os.system(command) - print('%s finished at ' % model_name + time.strftime('%H:%M:%S', time.localtime())) + print('%s finished at ' % model_name + + time.strftime('%H:%M:%S', time.localtime())) lbann_exceptions = [] timed_out = False if should_log or (output_value != 0): output_file = open(output_file_name, 'r') for line in output_file: print('%s: %s' % (output_file_name, line)) - is_match = re.search('This lbann_exception is about to be thrown:(.*)', line) + is_match = re.search( + 'This lbann_exception is about to be thrown:(.*)', line) if is_match: lbann_exceptions.append(is_match.group(1)) is_match = re.search('CANCELLED AT (.*) DUE TO TIME LIMIT', line) @@ -67,15 +74,22 @@ def run_lbann(command, model_name, output_file_name, error_file_name, should_log error_file = open(error_file_name, 'r') for line in error_file: print('%s: %s' % (error_file_name, line)) + is_match = re.search('LBANN error on:(.*)', line) + if is_match: + lbann_exceptions.append(is_match.group(1)) if output_value != 0: - error_string = 'Model %s crashed with output_value=%d, timed_out=%s, and lbann exceptions=%s. Command was: %s' % ( - model_name, output_value, str(timed_out), str(collections.Counter(lbann_exceptions)), command) + error_string = ('Model %s crashed with output_value=%d, timed_out=%s,' + ' and lbann exceptions=%s. Command was: %s') % ( + model_name, output_value, str(timed_out), + str(collections.Counter(lbann_exceptions)), command) raise Exception(error_string) return output_value # Extract data from output #################################################### -def populate_data_dict_epoch(regex, line, data_field, data_fields, data_dict, model_id): + +def populate_data_dict_epoch(regex, line, data_field, data_fields, data_dict, + model_id): is_match = re.search(regex, line) if is_match and (data_field in data_fields): if model_id not in data_dict[data_field].keys(): @@ -84,7 +98,9 @@ def populate_data_dict_epoch(regex, line, data_field, data_fields, data_dict, mo value = float(is_match.group(2)) data_dict[data_field][model_id][epoch_id] = value -def populate_data_dict_overall(regex, line, data_field, data_fields, data_dict, model_id): + +def populate_data_dict_overall(regex, line, data_field, data_fields, data_dict, + model_id): is_match = re.search(regex, line) if is_match and (data_field in data_fields): if model_id not in data_dict[data_field].keys(): @@ -92,6 +108,7 @@ def populate_data_dict_overall(regex, line, data_field, data_fields, data_dict, value = float(is_match.group(1)) data_dict[data_field][model_id]['overall'] = value + # data_dict[data_field][model_id][epoch_id] = float # data_fields is the list or set of data we're interested in. def extract_data(output_file_name, data_fields, should_log): @@ -111,13 +128,17 @@ def extract_data(output_file_name, data_fields, should_log): regex = 'training epoch ([0-9]+) objective function : ([0-9.]+)' data_field = 'training_objective_function' - populate_data_dict_epoch(regex, line, data_field, data_fields, data_dict, model_id) + populate_data_dict_epoch(regex, line, data_field, data_fields, + data_dict, model_id) regex = 'training epoch ([0-9]+) run time : ([0-9.]+)' data_field = 'training_run_time' - populate_data_dict_epoch(regex, line, data_field, data_fields, data_dict, model_id) + populate_data_dict_epoch(regex, line, data_field, data_fields, + data_dict, model_id) - regex = 'training epoch ([0-9]+) mini-batch time statistics : ([0-9.]+)s mean, ([0-9.]+)s max, ([0-9.]+)s min, ([0-9.]+)s stdev' + regex = ('training epoch ([0-9]+) mini-batch time statistics' + ' : ([0-9.]+)s mean, ([0-9.]+)s max, ([0-9.]+)s min,' + ' ([0-9.]+)s stdev') is_match = re.search(regex, line) if is_match: epoch_id = is_match.group(1) @@ -148,7 +169,8 @@ def extract_data(output_file_name, data_fields, should_log): regex = 'test categorical accuracy : ([0-9.]+)' data_field = 'test_accuracy' - populate_data_dict_overall(regex, line, data_field, data_fields, data_dict, model_id) + populate_data_dict_overall(regex, line, data_field, data_fields, + data_dict, model_id) output_file.close() if should_log: pprint.pprint(data_dict) @@ -156,26 +178,33 @@ def extract_data(output_file_name, data_fields, should_log): # Skeleton #################################################################### -def skeleton(cluster, dir_name, executable, model_folder, model_name, data_fields, should_log, compiler_name=None, weekly=False): + +def skeleton(cluster, dir_name, executable, model_folder, model_name, + data_fields, should_log, compiler_name=None, weekly=False): if compiler_name == None: output_file_name = '%s/bamboo/integration_tests/output/%s_output.txt' % (dir_name, model_name) error_file_name = '%s/bamboo/integration_tests/error/%s_error.txt' % (dir_name, model_name) else: - output_file_name = '%s/bamboo/integration_tests/output/%s_%s_output.txt' %(dir_name, model_name, compiler_name) - error_file_name = '%s/bamboo/integration_tests/error/%s_%s_error.txt' %(dir_name, model_name, compiler_name) - command = get_command(cluster, dir_name, model_folder, model_name, executable, output_file_name, error_file_name, compiler_name, weekly=weekly) - run_lbann(command, model_name, output_file_name, error_file_name, should_log) # Don't need return value + output_file_name = '%s/bamboo/integration_tests/output/%s_%s_output.txt' % (dir_name, model_name, compiler_name) + error_file_name = '%s/bamboo/integration_tests/error/%s_%s_error.txt' % (dir_name, model_name, compiler_name) + command = get_command( + cluster, dir_name, model_folder, model_name, executable, + output_file_name, error_file_name, compiler_name, weekly=weekly) + run_lbann(command, model_name, output_file_name, + error_file_name, should_log) # Don't need return value return extract_data(output_file_name, data_fields, should_log) # Misc. functions ############################################################ + # csv_dict[row_header][column_header] = float def csv_to_dict(csv_path): - with open(csv_path, 'r') as csv_file: - reader = csv.reader(csv_file, skipinitialspace=True) - column_headers = reader.next() - values = {} - for row in reader: - row_header = row[0] - values[row_header] = dict(zip(column_headers[1:], map(float, row[1:]))) - return values + with open(csv_path, 'r') as csv_file: + reader = csv.reader(csv_file, skipinitialspace=True) + column_headers = reader.next() + values = {} + for row in reader: + row_header = row[0] + values[row_header] = dict( + zip(column_headers[1:], map(float, row[1:]))) + return values diff --git a/bamboo/integration_tests/conftest.py b/bamboo/integration_tests/conftest.py index 4039eeb7dac..da2ffc127be 100644 --- a/bamboo/integration_tests/conftest.py +++ b/bamboo/integration_tests/conftest.py @@ -1,31 +1,15 @@ -import pytest, os, re, subprocess +import sys +sys.path.insert(0, '../common_python') +import tools +import pytest, re, subprocess -def pytest_addoption(parser): - cluster = re.sub('[0-9]+', '', subprocess.check_output('hostname'.split()).strip()) - default_dirname = subprocess.check_output('git rev-parse --show-toplevel'.split()).strip() - default_exes = {} - default_exes['default'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) - if cluster in ['catalyst', 'quartz']: - default_exes['clang4'] = '%s/build/clang.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_clang-4.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - #default_exes['gcc4'] = '%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['gcc7'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-7.1.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['intel18'] = '%s/build/intel.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_intel-18.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - - default_exes['clang4_debug'] = '%s/build/clang.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_clang-4.0.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) - #default_exes['gcc4_debug'] = '%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_debug/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['gcc7_debug'] = '%s/build/gnu.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-7.1.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['intel18_debug'] = '%s/build/intel.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_intel-18.0.0_debug/build/model_zoo/lbann' % (default_dirname, cluster) - - if cluster == 'ray': - default_exes['clang4'] = '%s/build/clang.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) - default_exes['gcc4'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['clang4_debug'] = '%s/build/clang.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) - default_exes['gcc4_debug'] = '%s/build/gnu.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_debug/build/model_zoo/lbann' % (default_dirname, cluster) - - if cluster in ['surface', 'pascal']: - default_exes['gcc4'] = default_exes['default'] - default_exes['gcc4_debug'] = '%s/build/gnu.Debug.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) +def pytest_addoption(parser): + cluster = re.sub('[0-9]+', '', subprocess.check_output( + 'hostname'.split()).strip()) + default_dirname = subprocess.check_output( + 'git rev-parse --show-toplevel'.split()).strip() + default_exes = tools.get_default_exes(default_dirname, cluster) parser.addoption('--cluster', action='store', default=cluster, help='--cluster= to specify the cluster being run on, for the purpose of determing which commands to use. Default the current cluster') @@ -40,26 +24,32 @@ def pytest_addoption(parser): # For local testing only parser.addoption('--exe', action='store', help='--exe=') + @pytest.fixture def cluster(request): return request.config.getoption('--cluster') + @pytest.fixture def debug(request): return request.config.getoption('--debug') + @pytest.fixture def dirname(request): return request.config.getoption('--dirname') + @pytest.fixture def exes(request): return request.config.getoption('--exes') + @pytest.fixture def weekly(request): return request.config.getoption('--weekly') + @pytest.fixture def exe(request): return request.config.getoption('--exe') diff --git a/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_imagenet_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_imagenet_objective_functions.csv new file mode 100644 index 00000000000..003794fd557 --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_imagenet_objective_functions.csv @@ -0,0 +1,21 @@ +Epoch_number, training_objective_function_nightly, training_objective_function_weekly +0, 0.675652, 0.608574 +1, 0.590008, 0.590008 +2, 0.587484, 0.587484 +3, 0.586305, 0.586305 +4, 0.585585, 0.585585 +5, 0.585036, 0.585036 +6, 0.584688, 0.584688 +7, 0.584348, 0.584348 +8, 0.584041, 0.584041 +9, 0.583865, 0.583865 +10, 0.583665, 0.583665 +11, 0.583521, 0.583521 +12, 0.583303, 0.583303 +13, 0.58328, 0.58328 +14, 0.5832, 0.5832 +15, 0.583134, 0.583134 +16, 0.583052, 0.583052 +17, 0.583039, 0.583039 +18, 0.582954, 0.582954 +19, 0.582936, 0.582936 diff --git a/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_mnist_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_mnist_objective_functions.csv new file mode 100644 index 00000000000..80c12b2b0ed --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/clang4/expected_conv_autoencoder_mnist_objective_functions.csv @@ -0,0 +1,6 @@ +Epoch_number, training_objective_function +0, 0.207480 +1, 0.194710 +2, 0.193224 +3, 0.192867 +4, 0.192758 diff --git a/bamboo/integration_tests/expected_values/pascal/clang4/expected_performance.csv b/bamboo/integration_tests/expected_values/pascal/clang4/expected_performance.csv new file mode 100644 index 00000000000..2234e14d5ab --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/clang4/expected_performance.csv @@ -0,0 +1,5 @@ +Model_name, training_run_time, training_mean, training_max, training_min, training_stdev, test_accuracy +alexnet_nightly, 353.48, 7.07, 21.57, 1.24, 4.21, 0.00 +alexnet_weekly, 882.26, 1.78, 4.68, 0.95, 0.21, 2.49 +cache_alexnet, 623.30, 1.27, 4.98, 0.66, 2.24, 0.57 +lenet_mnist, 33.55, 0.04, 0.09, 0.04, 0.01, 98.96 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_imagenet_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_imagenet_objective_functions.csv new file mode 100644 index 00000000000..003794fd557 --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_imagenet_objective_functions.csv @@ -0,0 +1,21 @@ +Epoch_number, training_objective_function_nightly, training_objective_function_weekly +0, 0.675652, 0.608574 +1, 0.590008, 0.590008 +2, 0.587484, 0.587484 +3, 0.586305, 0.586305 +4, 0.585585, 0.585585 +5, 0.585036, 0.585036 +6, 0.584688, 0.584688 +7, 0.584348, 0.584348 +8, 0.584041, 0.584041 +9, 0.583865, 0.583865 +10, 0.583665, 0.583665 +11, 0.583521, 0.583521 +12, 0.583303, 0.583303 +13, 0.58328, 0.58328 +14, 0.5832, 0.5832 +15, 0.583134, 0.583134 +16, 0.583052, 0.583052 +17, 0.583039, 0.583039 +18, 0.582954, 0.582954 +19, 0.582936, 0.582936 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_mnist_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_mnist_objective_functions.csv new file mode 100644 index 00000000000..80c12b2b0ed --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_conv_autoencoder_mnist_objective_functions.csv @@ -0,0 +1,6 @@ +Epoch_number, training_objective_function +0, 0.207480 +1, 0.194710 +2, 0.193224 +3, 0.192867 +4, 0.192758 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc4/expected_performance.csv b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_performance.csv new file mode 100644 index 00000000000..639e20aa5f4 --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc4/expected_performance.csv @@ -0,0 +1,5 @@ +Model_name, training_run_time, training_mean, training_max, training_min, training_stdev, test_accuracy +alexnet_nightly, 63.18, 1.27, 3.11, 0.79, 0.55, 0.00 +alexnet_weekly, 565.30, 1.14, 3.83, 0.76, 0.30, 3.11 +cache_alexnet, 623.30, 1.27, 4.98, 0.66, 2.24, 0.57 +lenet_mnist, 15.61, 0.02, 0.08, 0.02, 0.01, 98.96 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_imagenet_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_imagenet_objective_functions.csv new file mode 100644 index 00000000000..003794fd557 --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_imagenet_objective_functions.csv @@ -0,0 +1,21 @@ +Epoch_number, training_objective_function_nightly, training_objective_function_weekly +0, 0.675652, 0.608574 +1, 0.590008, 0.590008 +2, 0.587484, 0.587484 +3, 0.586305, 0.586305 +4, 0.585585, 0.585585 +5, 0.585036, 0.585036 +6, 0.584688, 0.584688 +7, 0.584348, 0.584348 +8, 0.584041, 0.584041 +9, 0.583865, 0.583865 +10, 0.583665, 0.583665 +11, 0.583521, 0.583521 +12, 0.583303, 0.583303 +13, 0.58328, 0.58328 +14, 0.5832, 0.5832 +15, 0.583134, 0.583134 +16, 0.583052, 0.583052 +17, 0.583039, 0.583039 +18, 0.582954, 0.582954 +19, 0.582936, 0.582936 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_mnist_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_mnist_objective_functions.csv new file mode 100644 index 00000000000..8bcf25bb71d --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_conv_autoencoder_mnist_objective_functions.csv @@ -0,0 +1,6 @@ +Epoch_number, training_objective_function +0, 0.207514 +1, 0.194710 +2, 0.193221 +3, 0.192864 +4, 0.192755 diff --git a/bamboo/integration_tests/expected_values/pascal/gcc7/expected_performance.csv b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_performance.csv new file mode 100644 index 00000000000..b315574f51d --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/gcc7/expected_performance.csv @@ -0,0 +1,5 @@ +Model_name, training_run_time, training_mean, training_max, training_min, training_stdev, test_accuracy +alexnet_nightly, 71.14, 1.43, 3.20, 0.98, 0.51, 0.00 +alexnet_weekly, 691.96, 1.40, 4.53, 1.09, 0.22, 1.05 +cache_alexnet, 623.30, 1.27, 4.98, 0.66, 2.24, 0.57 +lenet_mnist, 15.51, 0.02, 0.06, 0.02, 0.01, 99.00 diff --git a/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_imagenet_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_imagenet_objective_functions.csv new file mode 100644 index 00000000000..003794fd557 --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_imagenet_objective_functions.csv @@ -0,0 +1,21 @@ +Epoch_number, training_objective_function_nightly, training_objective_function_weekly +0, 0.675652, 0.608574 +1, 0.590008, 0.590008 +2, 0.587484, 0.587484 +3, 0.586305, 0.586305 +4, 0.585585, 0.585585 +5, 0.585036, 0.585036 +6, 0.584688, 0.584688 +7, 0.584348, 0.584348 +8, 0.584041, 0.584041 +9, 0.583865, 0.583865 +10, 0.583665, 0.583665 +11, 0.583521, 0.583521 +12, 0.583303, 0.583303 +13, 0.58328, 0.58328 +14, 0.5832, 0.5832 +15, 0.583134, 0.583134 +16, 0.583052, 0.583052 +17, 0.583039, 0.583039 +18, 0.582954, 0.582954 +19, 0.582936, 0.582936 diff --git a/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_mnist_objective_functions.csv b/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_mnist_objective_functions.csv new file mode 100644 index 00000000000..80c12b2b0ed --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/intel18/expected_conv_autoencoder_mnist_objective_functions.csv @@ -0,0 +1,6 @@ +Epoch_number, training_objective_function +0, 0.207480 +1, 0.194710 +2, 0.193224 +3, 0.192867 +4, 0.192758 diff --git a/bamboo/integration_tests/expected_values/pascal/intel18/expected_performance.csv b/bamboo/integration_tests/expected_values/pascal/intel18/expected_performance.csv new file mode 100644 index 00000000000..4fc534169fe --- /dev/null +++ b/bamboo/integration_tests/expected_values/pascal/intel18/expected_performance.csv @@ -0,0 +1,5 @@ +Model_name, training_run_time, training_mean, training_max, training_min, training_stdev, test_accuracy +alexnet_nightly, 49.54, 0.96, 3.21, 1.00, 0.62, 0.00 +alexnet_weekly, 402.50, 0.82, 3.36, 0.47, 0.34, 3.27 +cache_alexnet, 623.30, 1.27, 4.98, 0.66, 2.24, 0.57 +lenet_mnist, 20.02, 0.03, 0.09, 0.03, 0.01, 98.91 diff --git a/bamboo/integration_tests/test_integration_autoencoders.py b/bamboo/integration_tests/test_integration_autoencoders.py index 4fbe0172d0f..40717575e7d 100644 --- a/bamboo/integration_tests/test_integration_autoencoders.py +++ b/bamboo/integration_tests/test_integration_autoencoders.py @@ -1,36 +1,47 @@ import pytest import common_code -def error_if(f, f_symbol, data_field, actual_values, expected_values, model_name, errors, all_values, frequency_str): + +def error_if(f, f_symbol, data_field, actual_values, expected_values, + model_name, errors, all_values, frequency_str): d = actual_values[data_field] for model_id in sorted(d.keys()): for epoch_id in sorted(d[model_id].keys()): actual_value = d[model_id][epoch_id] expected_value = expected_values[epoch_id][data_field + frequency_str] - if actual_value == None: + if actual_value is None: errors.append('d[%s][%s] == None' % (model_id, epoch_id)) - if expected_value == None: + if expected_value is None: errors.append('d[%s]([%s] == None' % (model_id, epoch_id)) if f(actual_value, expected_value): - errors.append('%f %s %f %s Model %s Epoch %s %s' % (actual_value, f_symbol, expected_value, model_name, model_id, epoch_id, data_field)) - all_values.append('%f %s Model %s Epoch %s %s' % (actual_value, model_name, model_id, epoch_id, data_field)) + errors.append('%f %s %f %s Model %s Epoch %s %s' % ( + actual_value, f_symbol, expected_value, model_name, model_id, + epoch_id, data_field)) + all_values.append('%f %s Model %s Epoch %s %s' % ( + actual_value, model_name, model_id, epoch_id, data_field)) + -def run_tests(actual_objective_functions, model_name, dir_name, cluster, should_log, compiler_name, frequency_str=''): - expected_objective_functions = common_code.csv_to_dict('%s/bamboo/integration_tests/expected_values/%s/%s/expected_%s_objective_functions.csv' % (dir_name, cluster, compiler_name, model_name)) +def run_tests(actual_objective_functions, model_name, dir_name, cluster, + should_log, compiler_name, frequency_str=''): + expected_objective_functions = common_code.csv_to_dict( + '%s/bamboo/integration_tests/expected_values/%s/%s/expected_%s_objective_functions.csv' % (dir_name, cluster, compiler_name, model_name)) errors = [] all_values = [] tolerance = 0.05 # Are we within tolerance * expected_value? - outside_tolerance = lambda x,y: abs(x - y) > abs(tolerance * y) - error_if(outside_tolerance, '!=', 'training_objective_function', actual_objective_functions, expected_objective_functions, model_name, errors, all_values, frequency_str) + outside_tolerance = lambda x, y: abs(x - y) > abs(tolerance * y) + error_if(outside_tolerance, '!=', 'training_objective_function', + actual_objective_functions, expected_objective_functions, + model_name, errors, all_values, frequency_str) print('Errors for: %s %s (%d)' % (model_name, compiler_name, len(errors))) for error in errors: print(error) if should_log: - print('All values for: %s %s (%d)' % (model_name, compiler_name, len(all_values))) + print('All values for: %s %s (%d)' % (model_name, compiler_name, + len(all_values))) for value in all_values: print(value) assert errors == [] @@ -39,16 +50,22 @@ def run_tests(actual_objective_functions, model_name, dir_name, cluster, should_ 'training_objective_function' ] + def skeleton_autoencoder_mnist(cluster, dir_name, executables, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) model_folder = 'models/autoencoder_mnist' model_name = 'conv_autoencoder_mnist' - should_log=False - actual_objective_functions = common_code.skeleton(cluster, dir_name, executables[compiler_name], model_folder, model_name, DATA_FIELDS, should_log, compiler_name=compiler_name) - run_tests(actual_objective_functions, model_name, dir_name, cluster, should_log, compiler_name) + should_log = False + actual_objective_functions = common_code.skeleton( + cluster, dir_name, executables[compiler_name], model_folder, model_name, + DATA_FIELDS, should_log, compiler_name=compiler_name) + run_tests(actual_objective_functions, model_name, dir_name, cluster, + should_log, compiler_name) + -def skeleton_autoencoder_imagenet(cluster, dir_name, executables, compiler_name, weekly): +def skeleton_autoencoder_imagenet(cluster, dir_name, executables, compiler_name, + weekly): if cluster == 'surface': pytest.skip('skeleton_autoencoder_imagenet does not run on surface') if compiler_name not in executables: @@ -56,11 +73,15 @@ def skeleton_autoencoder_imagenet(cluster, dir_name, executables, compiler_name, model_folder = 'models/autoencoder_imagenet' model_name = 'conv_autoencoder_imagenet' should_log = False - actual_objective_functions = common_code.skeleton(cluster, dir_name, executables[compiler_name], model_folder, model_name, DATA_FIELDS, should_log, compiler_name=compiler_name, weekly=weekly) + actual_objective_functions = common_code.skeleton( + cluster, dir_name, executables[compiler_name], model_folder, model_name, + DATA_FIELDS, should_log, compiler_name=compiler_name, weekly=weekly) frequency_str = '_nightly' if weekly: frequency_str = '_weekly' - run_tests(actual_objective_functions, model_name, dir_name, cluster, should_log, compiler_name, frequency_str) + run_tests(actual_objective_functions, model_name, dir_name, cluster, + should_log, compiler_name, frequency_str) + def test_integration_autoencoder_mnist_clang4(cluster, dirname, exes): if cluster in ['catalyst', 'quartz']: @@ -69,9 +90,12 @@ def test_integration_autoencoder_mnist_clang4(cluster, dirname, exes): # 0.219298 != 0.207480 conv_autoencoder_mnist Model 0 Epoch 0 training_objective_function skeleton_autoencoder_mnist(cluster, dirname, exes, 'clang4') -def test_integration_autoencoder_imagenet_clang4(cluster, dirname, exes, weekly): + +def test_integration_autoencoder_imagenet_clang4(cluster, dirname, exes, + weekly): skeleton_autoencoder_imagenet(cluster, dirname, exes, 'clang4', weekly) - + + def test_integration_autoencoder_mnist_gcc4(cluster, dirname, exes): if cluster in ['catalyst', 'quartz', 'surface']: pytest.skip('FIXME') @@ -85,35 +109,45 @@ def test_integration_autoencoder_mnist_gcc4(cluster, dirname, exes): # 0.023243 != 0.192716 conv_autoencoder_mnist Model 0 Epoch 4 training_objective_function skeleton_autoencoder_mnist(cluster, dirname, exes, 'gcc4') + def test_integration_autoencoder_imagenet_gcc4(cluster, dirname, exes, weekly): skeleton_autoencoder_imagenet(cluster, dirname, exes, 'gcc4', weekly) - + + def test_integration_autoencoder_mnist_gcc7(cluster, dirname, exes): - if cluster in ['catalyst', 'quartz']: + if cluster in ['catalyst', 'quartz', 'pascal']: pytest.skip('FIXME') # Catalyst Errors: # 0.219383 != 0.207514 conv_autoencoder_mnist Model 0 Epoch 0 training_objective_function skeleton_autoencoder_mnist(cluster, dirname, exes, 'gcc7') + def test_integration_autoencoder_imagenet_gcc7(cluster, dirname, exes, weekly): - skeleton_autoencoder_imagenet(cluster, dirname, exes, 'gcc7', weekly) - + if cluster == 'pascal': + pytest.skip('FIXME') + skeleton_autoencoder_imagenet(cluster, dirname, exes, 'gcc7', weekly) + + def test_integration_autoencoder_mnist_intel18(cluster, dirname, exes): skeleton_autoencoder_mnist(cluster, dirname, exes, 'intel18') -def test_integration_autoencoder_imagenet_intel18(cluster, dirname, exes, weekly): + +def test_integration_autoencoder_imagenet_intel18(cluster, dirname, exes, + weekly): skeleton_autoencoder_imagenet(cluster, dirname, exes, 'intel18', weekly) + # Run with python -m pytest -s test_integration_autoencoder.py -k 'test_integration_autoencoder_mnist_exe' --exe= def test_integration_autoencoder_mnist_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_autoencoder_mnist(cluster, dirname, exes, 'exe', True) + # Run with python -m pytest -s test_integration_autoencoder.py -k 'test_integration_autoencoder_imagenet_exe' --exe= def test_integration_autoencoder_imagenet_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_autoencoder_imagenet(cluster, dirname, exes, 'exe', True) diff --git a/bamboo/integration_tests/test_integration_debug.py b/bamboo/integration_tests/test_integration_debug.py index 1744e3243d4..af3035ee6e7 100644 --- a/bamboo/integration_tests/test_integration_debug.py +++ b/bamboo/integration_tests/test_integration_debug.py @@ -2,10 +2,11 @@ sys.path.insert(0, '../common_python') import tools import pytest -import os import common_code -def skeleton_mnist_debug(cluster, dir_name, executables, compiler_name, weekly, debug, should_log=False): + +def skeleton_mnist_debug(cluster, dir_name, executables, compiler_name, weekly, + debug, should_log=False): # If weekly or debug are true, then run the test. if (not weekly) and (not debug): pytest.skip('Not doing weekly or debug testing') @@ -24,7 +25,9 @@ def skeleton_mnist_debug(cluster, dir_name, executables, compiler_name, weekly, output_value = common_code.run_lbann(command, model_name, output_file_name, error_file_name) assert output_value == 0 -def skeleton_cifar_debug(cluster, dir_name, executables, compiler_name, weekly, debug, should_log=False): + +def skeleton_cifar_debug(cluster, dir_name, executables, compiler_name, weekly, + debug, should_log=False): # If weekly or debug are true, then run the test. if (not weekly) and (not debug): pytest.skip('Not doing weekly or debug testing') @@ -46,37 +49,47 @@ def skeleton_cifar_debug(cluster, dir_name, executables, compiler_name, weekly, output_value = common_code.run_lbann(command, model_name, output_file_name, error_file_name) assert output_value == 0 + def test_integration_mnist_clang4_debug(cluster, dirname, exes, weekly, debug): skeleton_mnist_debug(cluster, dirname, exes, 'clang4_debug', weekly, debug) + def test_integration_cifar_clang4_debug(cluster, dirname, exes, weekly, debug): skeleton_cifar_debug(cluster, dirname, exes, 'clang4_debug', weekly, debug) + def test_integration_mnist_gcc4_debug(cluster, dirname, exes, weekly, debug): skeleton_mnist_debug(cluster, dirname, exes, 'gcc4_debug', weekly, debug) + def test_integration_cifar_gcc4_debug(cluster, dirname, exes, weekly, debug): skeleton_cifar_debug(cluster, dirname, exes, 'gcc4_debug', weekly, debug) + def test_integration_mnist_gcc7_debug(cluster, dirname, exes, weekly, debug): skeleton_mnist_debug(cluster, dirname, exes, 'gcc7_debug', weekly, debug) + def test_integration_cifar_gcc7_debug(cluster, dirname, exes, weekly, debug): skeleton_cifar_debug(cluster, dirname, exes, 'gcc7_debug', weekly, debug) + def test_integration_mnist_intel18_debug(cluster, dirname, exes, weekly, debug): skeleton_mnist_debug(cluster, dirname, exes, 'intel18_debug', weekly, debug) + def test_integration_cifar_intel18_debug(cluster, dirname, exes, weekly, debug): skeleton_cifar_debug(cluster, dirname, exes, 'intel18_debug', weekly, debug) + # Run with python -m pytest -s test_integration_debug.py -k 'test_integration_mnist_exe' --exe= def test_integration_mnist_exe(cluster, dirname, exe): - if exe == None: - pytest.skip('Non-local testing') + if exe is None: + pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_mnist_debug(cluster, dirname, exes, 'exe', True, True) + # Run with python -m pytest -s test_integration_debug.py -k 'test_integration_cifar_exe' --exe= def test_integration_cifar_exe(cluster, dirname, exe): if exe == None: diff --git a/bamboo/integration_tests/test_integration_performance.py b/bamboo/integration_tests/test_integration_performance.py index da5e6472762..66bbf7140a3 100644 --- a/bamboo/integration_tests/test_integration_performance.py +++ b/bamboo/integration_tests/test_integration_performance.py @@ -2,7 +2,9 @@ import operator, os import common_code -def error_if(f, f_symbol, data_field, actual_values, expected_values, model_name, errors, all_values, frequency_str): + +def error_if(f, f_symbol, data_field, actual_values, expected_values, + model_name, errors, all_values, frequency_str): d = actual_values[data_field] if f_symbol == '<': # Every time a value is smaller, update archive_value @@ -23,15 +25,21 @@ def error_if(f, f_symbol, data_field, actual_values, expected_values, model_name errors.append('d[%s]([%s] == None' % (model_id, epoch_id)) if f(actual_value, expected_value): - errors.append('%f %s %f %s Model %s Epoch %s %s' % (actual_value, f_symbol, expected_value, model_name, model_id, epoch_id, data_field)) - all_values.append('%f %s Model %s Epoch %s %s' % (actual_value, model_name, model_id, epoch_id, data_field)) + errors.append('%f %s %f %s Model %s Epoch %s %s' % ( + actual_value, f_symbol, expected_value, model_name, model_id, + epoch_id, data_field)) + all_values.append('%f %s Model %s Epoch %s %s' % ( + actual_value, model_name, model_id, epoch_id, data_field)) if f(actual_value, archive_value): archive_value = actual_value return archive_value -def run_tests(actual_performance, model_name, dir_name, should_log, compiler_name, cluster, frequency_str=''): - expected_performance = common_code.csv_to_dict('%s/bamboo/integration_tests/expected_values/%s/%s/expected_performance.csv' % (dir_name, cluster, compiler_name)) + +def run_tests(actual_performance, model_name, dir_name, should_log, + compiler_name, cluster, frequency_str=''): + expected_performance = common_code.csv_to_dict( + '%s/bamboo/integration_tests/expected_values/%s/%s/expected_performance.csv' % (dir_name, cluster, compiler_name)) errors = [] all_values = [] greater_than = operator.gt @@ -62,7 +70,8 @@ def run_tests(actual_performance, model_name, dir_name, should_log, compiler_nam for error in errors: print(error) if should_log: - print('All values for: %s %s (%d)' % (model_name, compiler_name, len(all_values))) + print('All values for: %s %s (%d)' % ( + model_name, compiler_name, len(all_values))) for value in all_values: print(value) assert errors == [] @@ -76,30 +85,40 @@ def run_tests(actual_performance, model_name, dir_name, should_log, compiler_nam 'test_accuracy' ] -def skeleton_performance_lenet_mnist(cluster, dir_name, executables, compiler_name): + +def skeleton_performance_lenet_mnist(cluster, dir_name, executables, + compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) executable = executables[compiler_name] model_name = 'lenet_mnist' model_folder = 'models/' + model_name should_log = False - actual_performance = common_code.skeleton(cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS, should_log, compiler_name=compiler_name) - run_tests(actual_performance, model_name, dir_name, should_log, compiler_name, cluster) - -def skeleton_performance_alexnet(cluster, dir_name, executables, compiler_name, weekly): + actual_performance = common_code.skeleton( + cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS, + should_log, compiler_name=compiler_name) + run_tests(actual_performance, model_name, dir_name, should_log, + compiler_name, cluster) + +def skeleton_performance_alexnet(cluster, dir_name, executables, compiler_name, + weekly): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) executable = executables[compiler_name] model_name = 'alexnet' model_folder = 'models/' + model_name should_log = False - actual_performance = common_code.skeleton(cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS, should_log, compiler_name=compiler_name, weekly=weekly) + actual_performance = common_code.skeleton( + cluster, dir_name, executable, model_folder, model_name, DATA_FIELDS, + should_log, compiler_name=compiler_name, weekly=weekly) frequency_str = '_nightly' if weekly: frequency_str = '_weekly' - run_tests(actual_performance, model_name, dir_name, should_log, compiler_name, cluster, frequency_str) + run_tests(actual_performance, model_name, dir_name, should_log, + compiler_name, cluster, frequency_str) -def skeleton_performance_full_alexnet(cluster, dir_name, executables, compiler_name, weekly): +def skeleton_performance_full_alexnet(cluster, dir_name, executables, + compiler_name, weekly): if not weekly: pytest.skip('Not doing weekly testing') if compiler_name not in executables: @@ -111,15 +130,19 @@ def skeleton_performance_full_alexnet(cluster, dir_name, executables, compiler_n should_log = False output_file_name = '%s/bamboo/integration_tests/output/%s_%s_output.txt' %(dir_name, model_name, compiler_name) error_file_name = '%s/bamboo/integration_tests/error/%s_%s_error.txt' %(dir_name, model_name, compiler_name) - if (cluster in ['catalyst', 'surface']): + if cluster in ['catalyst', 'surface']: command = 'salloc %s/bamboo/integration_tests/%s.sh > %s' % (dir_name, model_name, output_file_name) elif cluster == 'ray': pytest.skip('Ray is unsupported for skeleton_performance_full_alexnet') else: raise Exception('Unsupported Cluster %s' % cluster) - common_code.run_lbann(command, model_name, output_file_name, error_file_name, should_log) # Don't need return value - actual_performance = common_code.extract_data(output_file_name, DATA_FIELDS, should_log) - run_tests(actual_performance, model_name, dirname, should_log, compiler_name, cluster) + common_code.run_lbann(command, model_name, output_file_name, error_file_name, + should_log) # Don't need return value + actual_performance = common_code.extract_data(output_file_name, DATA_FIELDS, + should_log) + run_tests(actual_performance, model_name, dir_name, should_log, compiler_name, + cluster) + def test_integration_performance_lenet_mnist_clang4(cluster, dirname, exes): if cluster in ['catalyst', 'quartz']: @@ -128,13 +151,17 @@ def test_integration_performance_lenet_mnist_clang4(cluster, dirname, exes): # 0.104416 > 0.090000 lenet_mnist Model 0 Epoch 0 training_max # 98.770000 < 98.960000 lenet_mnist Model 0 Epoch overall test_accuracy skeleton_performance_lenet_mnist(cluster, dirname, exes, 'clang4') - + + def test_integration_performance_alexnet_clang4(cluster, dirname, exes, weekly): skeleton_performance_alexnet(cluster, dirname, exes, 'clang4', weekly) -def test_integration_performance_full_alexnet_clang4(cluster, dirname, exes, weekly): + +def test_integration_performance_full_alexnet_clang4(cluster, dirname, exes, + weekly): skeleton_performance_full_alexnet(cluster, dirname, exes, 'clang4', weekly) - + + def test_integration_performance_lenet_mnist_gcc4(cluster, dirname, exes): if cluster in ['catalyst', 'quartz', 'surface']: pytest.skip('FIXME') @@ -147,6 +174,7 @@ def test_integration_performance_lenet_mnist_gcc4(cluster, dirname, exes): # srun: error: surface145: task 0: Segmentation fault (core dumped) skeleton_performance_lenet_mnist(cluster, dirname, exes, 'gcc4') + def test_integration_performance_alexnet_gcc4(cluster, dirname, exes, weekly): if cluster in ['surface']: pytest.skip('FIXME') @@ -155,9 +183,11 @@ def test_integration_performance_alexnet_gcc4(cluster, dirname, exes, weekly): # srun: error: surface59: task 0: Segmentation fault (core dumped) skeleton_performance_alexnet(cluster, dirname, exes, 'gcc4', weekly) + def test_integration_performance_full_alexnet_gcc4(cluster, dirname, exes, weekly): skeleton_performance_full_alexnet(cluster, dirname, exes, 'gcc4', weekly) + def test_integration_performance_lenet_mnist_gcc7(cluster, dirname, exes): if cluster in ['catalyst', 'quartz']: pytest.skip('FIXME') @@ -166,6 +196,7 @@ def test_integration_performance_lenet_mnist_gcc7(cluster, dirname, exes): # 98.950000 < 99.000000 lenet_mnist Model 0 Epoch overall test_accuracy skeleton_performance_lenet_mnist(cluster, dirname, exes, 'gcc7') + def test_integration_performance_alexnet_gcc7(cluster, dirname, exes, weekly): if cluster in ['catalyst', 'quartz']: pytest.skip('FIXME') @@ -173,36 +204,45 @@ def test_integration_performance_alexnet_gcc7(cluster, dirname, exes, weekly): # 0.546884 > 0.510000 alexnet Model 0 Epoch 17 training_stdev skeleton_performance_alexnet(cluster, dirname, exes, 'gcc7', weekly) -def test_integration_performance_full_alexnet_gcc7(cluster, dirname, exes, weekly): + +def test_integration_performance_full_alexnet_gcc7(cluster, dirname, exes, + weekly): skeleton_performance_full_alexnet(cluster, dirname, exes, 'gcc7', weekly) + def test_integration_performance_lenet_mnist_intel18(cluster, dirname, exes): skeleton_performance_lenet_mnist(cluster, dirname, exes, 'intel18') -def test_integration_performance_alexnet_intel18(cluster, dirname, exes, weekly): + +def test_integration_performance_alexnet_intel18(cluster, dirname, exes, + weekly): skeleton_performance_alexnet(cluster, dirname, exes, 'intel18', weekly) -def test_integration_performance_full_alexnet_intel18(cluster, dirname, exes, weekly): + +def test_integration_performance_full_alexnet_intel18(cluster, dirname, exes, + weekly): skeleton_performance_full_alexnet(cluster, dirname, exes, 'intel18', weekly) # Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_lenet_mnist_exe' --exe= def test_integration_performance_lenet_mnist_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_performance_lenet_mnist(cluster, dirname, exes, 'exe') + # Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_alexnet_exe' --exe= def test_integration_performance_alexnet_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_performance_alexnet(cluster, dirname, exes, 'exe', True) + # Run with python -m pytest -s test_integration_performance.py -k 'test_integration_performance_full_alexnet_exe' --exe= def test_integration_performance_full_alexnet_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_performance_full_alexnet(cluster, dirname, exes, 'exe', True) diff --git a/bamboo/unit_tests/conftest.py b/bamboo/unit_tests/conftest.py index 5e5cce7d2f7..eda975da95a 100644 --- a/bamboo/unit_tests/conftest.py +++ b/bamboo/unit_tests/conftest.py @@ -1,22 +1,14 @@ -import pytest, os, re, subprocess +import sys +sys.path.insert(0, '../common_python') +import tools +import pytest, re, subprocess def pytest_addoption(parser): - cluster = re.sub('[0-9]+', '', subprocess.check_output('hostname'.split()).strip()) - default_dirname = subprocess.check_output('git rev-parse --show-toplevel'.split()).strip() - default_exes = {} - default_exes['default'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) - if cluster in ['catalyst', 'quartz']: - default_exes['clang4'] = '%s/build/clang.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_clang-4.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - #default_exes['gcc4'] = '%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['gcc7'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-7.1.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['intel18'] = '%s/build/intel.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_intel-18.0.0_rel/build/model_zoo/lbann' % (default_dirname, cluster) - - if cluster == 'ray': - default_exes['gcc4'] = '%s/build/gnu.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) #'%s/bamboo/compiler_tests/builds/%s_gcc-4.9.3_rel/build/model_zoo/lbann' % (default_dirname, cluster) - default_exes['clang4'] = '%s/build/clang.Release.%s.llnl.gov/install/bin/lbann' % (default_dirname, cluster) - - if cluster in ['surface', 'pascal']: - default_exes['gcc4'] = default_exes['default'] + cluster = re.sub('[0-9]+', '', subprocess.check_output( + 'hostname'.split()).strip()) + default_dirname = subprocess.check_output( + 'git rev-parse --show-toplevel'.split()).strip() + default_exes = tools.get_default_exes(default_dirname, cluster) parser.addoption('--cluster', action='store', default=cluster, help='--cluster= to specify the cluster being run on, for the purpose of determing which commands to use. Default the current cluster') diff --git a/bamboo/unit_tests/test_unit_check_proto_models.py b/bamboo/unit_tests/test_unit_check_proto_models.py index 539d69d0b44..95203274c2a 100644 --- a/bamboo/unit_tests/test_unit_check_proto_models.py +++ b/bamboo/unit_tests/test_unit_check_proto_models.py @@ -2,7 +2,8 @@ sys.path.insert(0, '../common_python') import tools import pytest -import os, re, subprocess, sys +import os + def skeleton_models(cluster, dir_name, executables, compiler_name): if compiler_name not in executables: @@ -38,6 +39,9 @@ def skeleton_models(cluster, dir_name, executables, compiler_name): data_reader_path = '%s/model_zoo/models/gan/mnist/discriminator_data.prototext' % (dir_name) data_reader_name = None elif 'triplet' in file_name: + if (cluster == 'catalyst') and (compiler_name == 'clang4'): + # Skipping this test. + continue data_filedir_train_default = '/p/lscratchh/brainusr/datasets/ILSVRC2012/patches_84h_110x110_13x13-blur-ab_compact/' data_filename_train_default = '/p/lscratchh/brainusr/datasets/ILSVRC2012/patches_84h_110x110_13x13-blur-ab_compact/train/train_list_8h.nfl.npz' data_filedir_test_default = '/p/lscratchh/brainusr/datasets/ILSVRC2012/patches_84h_110x110_13x13-blur-ab_compact/' @@ -72,24 +76,31 @@ def skeleton_models(cluster, dir_name, executables, compiler_name): else: print("Shared lbannusr account doesn't have access to dataset this model requires") continue - if (cluster == 'ray') and (data_reader_name in ['cifar10', 'ascii']): + if (cluster == 'ray') and \ + (data_reader_name in ['cifar10', 'ascii']): print('Skipping %s because data is not available on ray' % model_path) - elif (cluster == 'ray') or (cluster == 'pascal') and ('conv_autoencoder' in file_name) or ('gan' in subdir): + elif (cluster == 'ray') or (cluster == 'pascal') and \ + ('conv_autoencoder' in file_name) or ('gan' in subdir): print('Skipping %s because unpooling/noise is not implemented on gpu' % model_path) else: output_file_name = '%s/bamboo/unit_tests/output/check_proto_models_%s_%s_output.txt' % (dir_name, file_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/check_proto_models_%s_%s_error.txt' % (dir_name, file_name, compiler_name) cmd = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=node_count, - partition='pbatch', time_limit=time_limit, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=node_count, + partition='pbatch', time_limit=time_limit, + dir_name=dir_name, data_filedir_default=data_filedir_default, data_filedir_train_default=data_filedir_train_default, data_filename_train_default=data_filename_train_default, data_filedir_test_default=data_filedir_test_default, data_filename_test_default=data_filename_test_default, - data_reader_name=data_reader_name, data_reader_path=data_reader_path, - exit_after_setup=True, model_path=model_path, optimizer_name=opt, - output_file_name=output_file_name, error_file_name=error_file_name) + data_reader_name=data_reader_name, + data_reader_path=data_reader_path, + exit_after_setup=True, model_path=model_path, + optimizer_name=opt, + output_file_name=output_file_name, + error_file_name=error_file_name) if os.system(cmd) != 0: print("Error detected in " + model_path) #defective_models.append(file_name) @@ -98,15 +109,18 @@ def skeleton_models(cluster, dir_name, executables, compiler_name): working_models.append(cmd) num_defective = len(defective_models) if num_defective != 0: - print('Working models: %d. Defective models: %d', len(working_models), num_defective) + print('Working models: %d. Defective models: %d' % ( + len(working_models), num_defective)) print('Errors for: The following models exited with errors %s' % compiler_name) for model in defective_models: print(model) assert num_defective == 0 + def test_unit_models_clang4(cluster, dirname, exes): skeleton_models(cluster, dirname, exes, 'clang4') + def test_unit_models_gcc4(cluster, dirname, exes): if cluster in ['surface']: pytest.skip('FIXME') @@ -114,15 +128,18 @@ def test_unit_models_gcc4(cluster, dirname, exes): # assert 8 == 0 skeleton_models(cluster, dirname, exes, 'gcc4') + def test_unit_models_gcc7(cluster, dirname, exes): skeleton_models(cluster, exes, dirname, 'gcc7') + def test_unit_models_intel18(cluster, dirname, exes): skeleton_models(cluster, dirname, exes, 'intel18') + # Run with python -m pytest -s test_unit_check_proto_models.py -k 'test_unit_models_exe' --exe= def test_unit_models_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_models(cluster, dirname, exes, 'exe') diff --git a/bamboo/unit_tests/test_unit_checkpoint.py b/bamboo/unit_tests/test_unit_checkpoint.py index 2b0912c5200..fa06413a139 100644 --- a/bamboo/unit_tests/test_unit_checkpoint.py +++ b/bamboo/unit_tests/test_unit_checkpoint.py @@ -4,7 +4,9 @@ import pytest import os -def skeleton_checkpoint_lenet_shared(cluster, executables, dir_name, compiler_name): + +def skeleton_checkpoint_lenet_shared(cluster, executables, dir_name, + compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) exe = executables[compiler_name] @@ -55,7 +57,8 @@ def skeleton_checkpoint_lenet_shared(cluster, executables, dir_name, compiler_na os.system('rm -rf ckpt*') assert diff_test == 0 -def skeleton_checkpoint_lenet_distributed(cluster, executables, dir_name, compiler_name): +def skeleton_checkpoint_lenet_distributed(cluster, executables, dir_name, + compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) exe = executables[compiler_name] @@ -106,25 +109,30 @@ def skeleton_checkpoint_lenet_distributed(cluster, executables, dir_name, compil os.system('rm -rf ckpt*') assert diff_test == 0 + def test_unit_checkpoint_lenet_clang4(cluster, exes, dirname): skeleton_checkpoint_lenet_shared(cluster, exes, dirname, 'clang4') skeleton_checkpoint_lenet_distributed(cluster, exes, dirname, 'clang4') + def test_unit_checkpoint_lenet_gcc4(cluster, exes, dirname): skeleton_checkpoint_lenet_shared(cluster, exes, dirname, 'gcc4') skeleton_checkpoint_lenet_distributed(cluster, exes, dirname, 'gcc4') + def test_unit_checkpoint_lenet_gcc7(cluster, exes, dirname): skeleton_checkpoint_lenet_shared(cluster, exes, dirname, 'gcc7') skeleton_checkpoint_lenet_distributed(cluster, exes, dirname, 'gcc7') + def test_unit_checkpoint_lenet_intel18(cluster, exes, dirname): skeleton_checkpoint_lenet_shared(cluster, exes, dirname, 'intel18') skeleton_checkpoint_lenet_distributed(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_checkpoint.py -k 'test_unit_checkpoint_lenet_exe' --exe= def test_unit_checkpoint_lenet_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_checkpoint_lenet_shared(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_clamp.py b/bamboo/unit_tests/test_unit_layer_clamp.py index 6ac7278ab30..e1c08a53dc8 100644 --- a/bamboo/unit_tests/test_unit_layer_clamp.py +++ b/bamboo/unit_tests/test_unit_layer_clamp.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_clamp(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_clamp_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_clamp_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='clamp', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='clamp', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_clamp_clang4(cluster, exes, dirname): skeleton_layer_clamp(cluster, exes, dirname, 'clang4') + def test_unit_layer_clamp_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_clamp_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_clamp(cluster, exes, dirname, 'gcc4') + def test_unit_layer_clamp_gcc7(cluster, exes, dirname): skeleton_layer_clamp(cluster, exes, dirname, 'gcc7') + def test_unit_layer_clamp_intel18(cluster, exes, dirname): skeleton_layer_clamp(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_clamp.py -k 'test_unit_layer_clamp_exe' --exe= def test_unit_layer_clamp_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') exes = {'exe' : exe} skeleton_layer_clamp(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_covariance.py b/bamboo/unit_tests/test_unit_layer_covariance.py index 41bdb9d985f..307701263c7 100644 --- a/bamboo/unit_tests/test_unit_layer_covariance.py +++ b/bamboo/unit_tests/test_unit_layer_covariance.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_covariance(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_covariance_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_covariance_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='covariance', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='covariance', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_covariance_clang4(cluster, exes, dirname): skeleton_layer_covariance(cluster, exes, dirname, 'clang4') + def test_unit_layer_covariance_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_covariance_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_covariance(cluster, exes, dirname, 'gcc4') + def test_unit_layer_covariance_gcc7(cluster, exes, dirname): skeleton_layer_covariance(cluster, exes, dirname, 'gcc7') + def test_unit_layer_covariance_intel18(cluster, exes, dirname): skeleton_layer_covariance(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_covariance_exe' --exe= def test_unit_layer_covariance_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_covariance(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_elu.py b/bamboo/unit_tests/test_unit_layer_elu.py index a121bfcb50f..a14023992f9 100644 --- a/bamboo/unit_tests/test_unit_layer_elu.py +++ b/bamboo/unit_tests/test_unit_layer_elu.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_elu(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_elu_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_elu_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='elu', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='elu', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_elu_clang4(cluster, exes, dirname): skeleton_layer_elu(cluster, exes, dirname, 'clang4') + def test_unit_layer_elu_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_elu_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_elu(cluster, exes, dirname, 'gcc4') + def test_unit_layer_elu_gcc7(cluster, exes, dirname): skeleton_layer_elu(cluster, exes, dirname, 'gcc7') + def test_unit_layer_elu_intel18(cluster, exes, dirname): skeleton_layer_elu(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_elu.py -k 'test_unit_layer_elu_exe' --exe= def test_unit_layer_elu_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_elu(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_identity.py b/bamboo/unit_tests/test_unit_layer_identity.py index b26f4248d69..21ad2cf978d 100644 --- a/bamboo/unit_tests/test_unit_layer_identity.py +++ b/bamboo/unit_tests/test_unit_layer_identity.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_identity(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_identity_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_identity_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='identity', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='identity', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_identity_clang4(cluster, exes, dirname): skeleton_layer_identity(cluster, exes, dirname, 'clang4') + def test_unit_layer_identity_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_identity_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_identity(cluster, exes, dirname, 'gcc4') + def test_unit_layer_identity_gcc7(cluster, exes, dirname): skeleton_layer_identity(cluster, exes, dirname, 'gcc7') + def test_unit_layer_identity_intel18(cluster, exes, dirname): skeleton_layer_identity(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_identity.py -k 'test_unit_layer_identity_exe' --exe= def test_unit_layer_identity_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_identity(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_l1_norm.py b/bamboo/unit_tests/test_unit_layer_l1_norm.py index 1c1ab406106..72d26dbb42a 100644 --- a/bamboo/unit_tests/test_unit_layer_l1_norm.py +++ b/bamboo/unit_tests/test_unit_layer_l1_norm.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_l1_norm(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_l1_norm_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_l1_norm_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='l1_norm', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='l1_norm', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_l1_norm_clang4(cluster, exes, dirname): skeleton_layer_l1_norm(cluster, exes, dirname, 'clang4') + def test_unit_layer_l1_norm_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_l1_norm_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_l1_norm(cluster, exes, dirname, 'gcc4') + def test_unit_layer_l1_norm_gcc7(cluster, exes, dirname): skeleton_layer_l1_norm(cluster, exes, dirname, 'gcc7') + def test_unit_layer_l1_norm_intel18(cluster, exes, dirname): skeleton_layer_l1_norm(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_l1_norm_exe' --exe= def test_unit_layer_l1_norm_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_l1_norm(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_l2_norm2.py b/bamboo/unit_tests/test_unit_layer_l2_norm2.py index 29233e9ce18..6457f881f74 100644 --- a/bamboo/unit_tests/test_unit_layer_l2_norm2.py +++ b/bamboo/unit_tests/test_unit_layer_l2_norm2.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_l2_norm2(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_l2_norm2_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_l2_norm2_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='l2_norm2', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='l2_norm2', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_l2_norm2_clang4(cluster, exes, dirname): skeleton_layer_l2_norm2(cluster, exes, dirname, 'clang4') + def test_unit_layer_l2_norm2_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_l2_norm2_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_l2_norm2(cluster, exes, dirname, 'gcc4') + def test_unit_layer_l2_norm2_gcc7(cluster, exes, dirname): skeleton_layer_l2_norm2(cluster, exes, dirname, 'gcc7') + def test_unit_layer_l2_norm2_intel18(cluster, exes, dirname): skeleton_layer_l2_norm2(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_l2_norm2_exe' --exe= def test_unit_layer_l2_norm2_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_l2_norm2(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_leaky_relu.py b/bamboo/unit_tests/test_unit_layer_leaky_relu.py index d934987e76a..b7f9beaca71 100644 --- a/bamboo/unit_tests/test_unit_layer_leaky_relu.py +++ b/bamboo/unit_tests/test_unit_layer_leaky_relu.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_leaky_relu(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_leaky_relu_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_leaky_relu_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='leaky_relu', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='leaky_relu', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_leaky_relu_clang4(cluster, exes, dirname): skeleton_layer_leaky_relu(cluster, exes, dirname, 'clang4') + def test_unit_layer_leaky_relu_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_leaky_relu_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_leaky_relu(cluster, exes, dirname, 'gcc4') + def test_unit_layer_leaky_relu_gcc7(cluster, exes, dirname): skeleton_layer_leaky_relu(cluster, exes, dirname, 'gcc7') + def test_unit_layer_leaky_relu_intel18(cluster, exes, dirname): skeleton_layer_leaky_relu(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_leaky_relu_exe' --exe= def test_unit_layer_leaky_relu_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_leaky_relu(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_log_sigmoid.py b/bamboo/unit_tests/test_unit_layer_log_sigmoid.py index bda8dab5b98..e454b8d984b 100644 --- a/bamboo/unit_tests/test_unit_layer_log_sigmoid.py +++ b/bamboo/unit_tests/test_unit_layer_log_sigmoid.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_log_sigmoid(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_log_sigmoid_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_log_sigmoid_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='log_sigmoid', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='log_sigmoid', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_log_sigmoid_clang4(cluster, exes, dirname): skeleton_layer_log_sigmoid(cluster, exes, dirname, 'clang4') + def test_unit_layer_log_sigmoid_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_log_sigmoid_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_log_sigmoid(cluster, exes, dirname, 'gcc4') + def test_unit_layer_log_sigmoid_gcc7(cluster, exes, dirname): skeleton_layer_log_sigmoid(cluster, exes, dirname, 'gcc7') + def test_unit_layer_log_sigmoid_intel18(cluster, exes, dirname): skeleton_layer_log_sigmoid(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_log_sigmoid.py -k 'test_unit_layer_log_sigmoid_exe' --exe= def test_unit_layer_log_sigmoid_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_log_sigmoid(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_log_softmax.py b/bamboo/unit_tests/test_unit_layer_log_softmax.py index 749cd34dc22..e44e1019aae 100644 --- a/bamboo/unit_tests/test_unit_layer_log_softmax.py +++ b/bamboo/unit_tests/test_unit_layer_log_softmax.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_log_softmax(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_log_softmax_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_log_softmax_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='log_softmax', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='log_softmax', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_log_softmax_clang4(cluster, exes, dirname): skeleton_layer_log_softmax(cluster, exes, dirname, 'clang4') + def test_unit_layer_log_softmax_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_log_softmax_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_log_softmax(cluster, exes, dirname, 'gcc4') + def test_unit_layer_log_softmax_gcc7(cluster, exes, dirname): skeleton_layer_log_softmax(cluster, exes, dirname, 'gcc7') + def test_unit_layer_log_softmax_intel18(cluster, exes, dirname): skeleton_layer_log_softmax(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_log_softmax_exe' --exe= def test_unit_layer_log_softmax_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_log_softmax(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_mean_absolute_error.py b/bamboo/unit_tests/test_unit_layer_mean_absolute_error.py index 62768e6afe8..d74407e60e7 100644 --- a/bamboo/unit_tests/test_unit_layer_mean_absolute_error.py +++ b/bamboo/unit_tests/test_unit_layer_mean_absolute_error.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_mean_absolute_error(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_mean_absolute_error_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_mean_absolute_error_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='mean_absolute_error', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='mean_absolute_error', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_mean_absolute_error_clang4(cluster, exes, dirname): skeleton_layer_mean_absolute_error(cluster, exes, dirname, 'clang4') + def test_unit_layer_mean_absolute_error_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_mean_absolute_error_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_mean_absolute_error(cluster, exes, dirname, 'gcc4') + def test_unit_layer_mean_absolute_error_gcc7(cluster, exes, dirname): skeleton_layer_mean_absolute_error(cluster, exes, dirname, 'gcc7') + def test_unit_layer_mean_absolute_error_intel18(cluster, exes, dirname): skeleton_layer_mean_absolute_error(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_mean_absolute_error_exe' --exe= def test_unit_layer_mean_absolute_error_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_mean_absolute_error(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_relu.py b/bamboo/unit_tests/test_unit_layer_relu.py index 0b66c9fabb2..0546fa95d14 100644 --- a/bamboo/unit_tests/test_unit_layer_relu.py +++ b/bamboo/unit_tests/test_unit_layer_relu.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_relu(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_relu_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_relu_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='relu', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='relu', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_relu_clang4(cluster, exes, dirname): skeleton_layer_relu(cluster, exes, dirname, 'clang4') + def test_unit_layer_relu_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_relu_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_relu(cluster, exes, dirname, 'gcc4') + def test_unit_layer_relu_gcc7(cluster, exes, dirname): skeleton_layer_relu(cluster, exes, dirname, 'gcc7') + def test_unit_layer_relu_intel18(cluster, exes, dirname): skeleton_layer_relu(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_relu.py -k 'test_unit_layer_relu_exe' --exe= def test_unit_layer_relu_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_relu(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_selu.py b/bamboo/unit_tests/test_unit_layer_selu.py index 5fb4cef8d1e..971ef53b91d 100644 --- a/bamboo/unit_tests/test_unit_layer_selu.py +++ b/bamboo/unit_tests/test_unit_layer_selu.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_selu(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_selu_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_selu_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='selu', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='selu', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_selu_clang4(cluster, exes, dirname): skeleton_layer_selu(cluster, exes, dirname, 'clang4') + def test_unit_layer_selu_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_selu_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_selu(cluster, exes, dirname, 'gcc4') + def test_unit_layer_selu_gcc7(cluster, exes, dirname): skeleton_layer_selu(cluster, exes, dirname, 'gcc7') + def test_unit_layer_selu_intel18(cluster, exes, dirname): skeleton_layer_selu(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_selu.py -k 'test_unit_layer_selu_exe' --exe= def test_unit_layer_selu_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_selu(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_sigmoid.py b/bamboo/unit_tests/test_unit_layer_sigmoid.py index 2c0cc2d3d4e..f3775ea66d9 100644 --- a/bamboo/unit_tests/test_unit_layer_sigmoid.py +++ b/bamboo/unit_tests/test_unit_layer_sigmoid.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_sigmoid(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_sigmoid_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_sigmoid_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='sigmoid', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='sigmoid', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_sigmoid_clang4(cluster, exes, dirname): skeleton_layer_sigmoid(cluster, exes, dirname, 'clang4') + def test_unit_layer_sigmoid_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_sigmoid_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_sigmoid(cluster, exes, dirname, 'gcc4') + def test_unit_layer_sigmoid_gcc7(cluster, exes, dirname): skeleton_layer_sigmoid(cluster, exes, dirname, 'gcc7') + def test_unit_layer_sigmoid_intel18(cluster, exes, dirname): skeleton_layer_sigmoid(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_sigmoid.py -k 'test_unit_layer_sigmoid_exe' --exe= def test_unit_layer_sigmoid_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_sigmoid(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_softmax.py b/bamboo/unit_tests/test_unit_layer_softmax.py index dd1742a551c..168cc7d943b 100644 --- a/bamboo/unit_tests/test_unit_layer_softmax.py +++ b/bamboo/unit_tests/test_unit_layer_softmax.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_softmax(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_softmax_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_softmax_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='softmax', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='softmax', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_softmax_clang4(cluster, exes, dirname): skeleton_layer_softmax(cluster, exes, dirname, 'clang4') + def test_unit_layer_softmax_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_softmax_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_softmax(cluster, exes, dirname, 'gcc4') + def test_unit_layer_softmax_gcc7(cluster, exes, dirname): skeleton_layer_softmax(cluster, exes, dirname, 'gcc7') + def test_unit_layer_softmax_intel18(cluster, exes, dirname): skeleton_layer_softmax(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_softmax_exe' --exe= def test_unit_layer_softmax_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_softmax(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_softplus.py b/bamboo/unit_tests/test_unit_layer_softplus.py index bc7d5605988..d7ace58e189 100644 --- a/bamboo/unit_tests/test_unit_layer_softplus.py +++ b/bamboo/unit_tests/test_unit_layer_softplus.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_softplus(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_softplus_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_softplus_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='softplus', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='softplus', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_softplus_clang4(cluster, exes, dirname): skeleton_layer_softplus(cluster, exes, dirname, 'clang4') + def test_unit_layer_softplus_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_softplus_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_softplus(cluster, exes, dirname, 'gcc4') + def test_unit_layer_softplus_gcc7(cluster, exes, dirname): skeleton_layer_softplus(cluster, exes, dirname, 'gcc7') + def test_unit_layer_softplus_intel18(cluster, exes, dirname): skeleton_layer_softplus(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_softplus.py -k 'test_unit_layer_softplus_exe' --exe= def test_unit_layer_softplus_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_softplus(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_softsign.py b/bamboo/unit_tests/test_unit_layer_softsign.py index 667efb172c3..e96190fd31a 100644 --- a/bamboo/unit_tests/test_unit_layer_softsign.py +++ b/bamboo/unit_tests/test_unit_layer_softsign.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_softsign(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_softsign_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_softsign_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='softsign', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='softsign', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_softsign_clang4(cluster, exes, dirname): skeleton_layer_softsign(cluster, exes, dirname, 'clang4') + def test_unit_layer_softsign_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_softsign_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_softsign(cluster, exes, dirname, 'gcc4') + def test_unit_layer_softsign_gcc7(cluster, exes, dirname): skeleton_layer_softsign(cluster, exes, dirname, 'gcc7') + def test_unit_layer_softsign_intel18(cluster, exes, dirname): skeleton_layer_softsign(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_softsign.py -k 'test_unit_layer_softsign_exe' --exe= def test_unit_layer_softsign_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_softsign(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_squared_difference.py b/bamboo/unit_tests/test_unit_layer_squared_difference.py index 201267757d7..205798ec883 100644 --- a/bamboo/unit_tests/test_unit_layer_squared_difference.py +++ b/bamboo/unit_tests/test_unit_layer_squared_difference.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_squared_difference(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_squared_difference_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_squared_difference_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='squared_difference', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='squared_difference', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_squared_difference_clang4(cluster, exes, dirname): skeleton_layer_squared_difference(cluster, exes, dirname, 'clang4') + def test_unit_layer_squared_difference_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_squared_difference_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_squared_difference(cluster, exes, dirname, 'gcc4') + def test_unit_layer_squared_difference_gcc7(cluster, exes, dirname): skeleton_layer_squared_difference(cluster, exes, dirname, 'gcc7') + def test_unit_layer_squared_difference_intel18(cluster, exes, dirname): skeleton_layer_squared_difference(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_squared_difference.py -k 'test_unit_layer_squared_difference_exe' --exe= def test_unit_layer_squared_difference_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_squared_difference(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_tessellate.py b/bamboo/unit_tests/test_unit_layer_tessellate.py index 25e30770c63..961cbcee14f 100644 --- a/bamboo/unit_tests/test_unit_layer_tessellate.py +++ b/bamboo/unit_tests/test_unit_layer_tessellate.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_tessellate(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_tessellate_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_tessellate_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='tessellate', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='tessellate', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_tessellate_clang4(cluster, exes, dirname): skeleton_layer_tessellate(cluster, exes, dirname, 'clang4') + def test_unit_layer_tessellate_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_tessellate_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_tessellate(cluster, exes, dirname, 'gcc4') + def test_unit_layer_tessellate_gcc7(cluster, exes, dirname): skeleton_layer_tessellate(cluster, exes, dirname, 'gcc7') + def test_unit_layer_tessellate_intel18(cluster, exes, dirname): skeleton_layer_tessellate(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_layer_tessellate.py -k 'test_unit_layer_tessellate_exe' --exe= def test_unit_layer_tessellate_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_tessellate(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_layer_variance.py b/bamboo/unit_tests/test_unit_layer_variance.py index 4b476aedf5b..8e8d400a6d9 100644 --- a/bamboo/unit_tests/test_unit_layer_variance.py +++ b/bamboo/unit_tests/test_unit_layer_variance.py @@ -4,22 +4,27 @@ import pytest import os + def skeleton_layer_variance(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/layer_variance_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/layer_variance_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=2, dir_name=dir_name, + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=2, dir_name=dir_name, data_filedir_default='', data_reader_name='synthetic', - model_folder='tests/layer_tests', model_name='variance', optimizer_name='sgd', + model_folder='tests/layer_tests', model_name='variance', + optimizer_name='sgd', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_layer_variance_clang4(cluster, exes, dirname): skeleton_layer_variance(cluster, exes, dirname, 'clang4') + def test_unit_layer_variance_gcc4_check(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +32,18 @@ def test_unit_layer_variance_gcc4_check(cluster, exes, dirname): # assert 34304 == 0 skeleton_layer_variance(cluster, exes, dirname, 'gcc4') + def test_unit_layer_variance_gcc7(cluster, exes, dirname): skeleton_layer_variance(cluster, exes, dirname, 'gcc7') + def test_unit_layer_variance_intel18(cluster, exes, dirname): skeleton_layer_variance(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_layer_variance_exe' --exe= def test_unit_layer_variance_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_layer_variance(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_lbann2_reload.py b/bamboo/unit_tests/test_unit_lbann2_reload.py index 8bad2453fae..ff7f557d7b1 100644 --- a/bamboo/unit_tests/test_unit_lbann2_reload.py +++ b/bamboo/unit_tests/test_unit_lbann2_reload.py @@ -4,6 +4,7 @@ import pytest import os, sys + def skeleton_lbann2_reload(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) @@ -42,7 +43,8 @@ def skeleton_lbann2_reload(cluster, executables, dir_name, compiler_name): error_file_name=error_file_name) return_code_ckpt_1 = os.system(command) if return_code_ckpt_1 != 0: - sys.stderr.write('LeNet (checkpoint) execution failed, exiting with error') + sys.stderr.write( + 'LeNet (checkpoint) execution failed, exiting with error') sys.exit(1) output_file_name = '%s/bamboo/unit_tests/output/lbann2_restart_%s_output.txt' % (dir_name, compiler_name) @@ -59,7 +61,8 @@ def skeleton_lbann2_reload(cluster, executables, dir_name, compiler_name): error_file_name=error_file_name) return_code_ckpt_2 = os.system(command) if return_code_ckpt_2 != 0: - sys.stderr.write('LBANN2 LeNet weight reload failed, exiting with error') + sys.stderr.write( + 'LBANN2 LeNet weight reload failed, exiting with error') sys.exit(1) os.system('rm lbann2_ckpt/model0-epoch*') os.system('rm lbann2_nockpt/model0-epoch*') @@ -68,21 +71,30 @@ def skeleton_lbann2_reload(cluster, executables, dir_name, compiler_name): os.system('rm -rf lbann2_*') assert diff_test == 0 + def test_unit_lbann2_reload_clang4(cluster, exes, dirname): + if cluster == 'catalyst': + pytest.skip('FIXME') skeleton_lbann2_reload(cluster, exes, dirname, 'clang4') + def test_unit_lbann2_reload_gcc4(cluster, exes, dirname): skeleton_lbann2_reload(cluster, exes, dirname, 'gcc4') + def test_unit_lbann2_reload_gcc7(cluster, exes, dirname): + if cluster in ['catalyst', 'pascal']: + pytest.skip('FIXME') skeleton_lbann2_reload(cluster, exes, dirname, 'gcc7') + def test_unit_lbann2_reload_intel18(cluster, exes, dirname): skeleton_lbann2_reload(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_lbann2_reload.py -k 'test_unit_lbann2_reload_exe' --exe= def test_unit_lbann2_reload_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_lbann2_reload(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_lbann_invocation.py b/bamboo/unit_tests/test_unit_lbann_invocation.py index efaf7db4686..a002db49be4 100644 --- a/bamboo/unit_tests/test_unit_lbann_invocation.py +++ b/bamboo/unit_tests/test_unit_lbann_invocation.py @@ -1,10 +1,9 @@ import sys sys.path.insert(0, '../common_python') import tools -import pytest import os, sys -def test_unit_no_params_bad(cluster, exes, dirname): +def test_unit_no_params_bad(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with no params; lbann should throw exception\n') command = tools.get_command( @@ -12,7 +11,8 @@ def test_unit_no_params_bad(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_one_model_bad(cluster, exes, dirname): + +def test_unit_one_model_bad(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with no optimizer or reader; lbann should throw exception\n') model_path = 'prototext/model_mnist_simple_1.prototext' @@ -22,7 +22,8 @@ def test_unit_one_model_bad(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_two_models_bad(cluster, exes, dirname): + +def test_unit_two_models_bad(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with two models but no optimizer or reader; lbann should throw exception\n') model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}' @@ -32,7 +33,8 @@ def test_unit_two_models_bad(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_two_models_bad2(cluster, exes, dirname): + +def test_unit_two_models_bad2(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with two models with missing {; lbann should throw exception\n') model_path='prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}' @@ -42,7 +44,8 @@ def test_unit_two_models_bad2(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_missing_optimizer(cluster, exes, dirname): + +def test_unit_missing_optimizer(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with two models, reader, but no optimizer; lbann should throw exception\n') model_path='{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}' @@ -54,7 +57,8 @@ def test_unit_missing_optimizer(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_missing_reader(cluster, exes, dirname): + +def test_unit_missing_reader(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with two models, reader, but no reader; lbann should throw exception\n') model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}' @@ -65,14 +69,16 @@ def test_unit_missing_reader(cluster, exes, dirname): return_code = os.system(command) assert return_code != 0 -def test_unit_bad_params(cluster, exes, dirname): + +def test_unit_bad_params(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with ill-formed param (missing -) lbann should throw exception\n') (command_allocate, command_run, _, _) = tools.get_command(cluster=cluster, executable=exe, return_tuple=True) return_code = os.system('%s%s %s -exit_after_setup --reader=prototext/data_reader_mnist.prototext --model={prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext} --optimizer=prototext/opt_sgd.prototext' % (command_allocate, command_run, exe)) assert return_code != 0 -def test_unit_should_work(cluster, exes, dirname): + +def test_unit_should_work(cluster, exes): exe = exes['gcc4'] sys.stderr.write('TESTING: run lbann with two models, reader, and optimizer; lbann should NOT throw exception\n') model_path = '{prototext/model_mnist_simple_1.prototext,prototext/model_mnist_simple_1.prototext}' diff --git a/bamboo/unit_tests/test_unit_mnist_conv_graph.py b/bamboo/unit_tests/test_unit_mnist_conv_graph.py index 3437f461273..7b0bd7f90da 100644 --- a/bamboo/unit_tests/test_unit_mnist_conv_graph.py +++ b/bamboo/unit_tests/test_unit_mnist_conv_graph.py @@ -4,24 +4,32 @@ import pytest import os + def skeleton_mnist_conv_graph(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/mnist_conv_graph_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/mnist_conv_graph_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=1, - dir_name=dir_name, data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', - data_reader_name='mnist', model_folder='tests', model_name='mnist_conv_graph', + cluster=cluster, executable=executables[compiler_name], + num_nodes=1, num_processes=1, + dir_name=dir_name, + data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', + data_reader_name='mnist', model_folder='tests', + model_name='mnist_conv_graph', optimizer_name='adam', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_mnist_conv_graph_clang4(cluster, exes, dirname): + if cluster == 'catalyst': + pytest.skip('FIXME') skeleton_mnist_conv_graph(cluster, exes, dirname, 'clang4') + def test_unit_mnist_conv_graph_gcc4(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -29,15 +37,20 @@ def test_unit_mnist_conv_graph_gcc4(cluster, exes, dirname): # assert 35584 == 0 skeleton_mnist_conv_graph(cluster, exes, dirname, 'gcc4') + def test_unit_mnist_conv_graph_gcc7(cluster, exes, dirname): + if cluster in ['catalyst', 'pascal']: + pytest.skip('FIXME') skeleton_mnist_conv_graph(cluster, exes, dirname, 'gcc7') + def test_unit_mnist_conv_graph_intel18(cluster, exes, dirname): skeleton_mnist_conv_graph(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_conv_graph.py -k 'test_unit_mnist_conv_graph_exe' --exe= def test_unit_mnist_conv_graph_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_mnist_conv_graph(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_mnist_ridge_regression.py b/bamboo/unit_tests/test_unit_mnist_ridge_regression.py index 4390693d99b..e25b12e28c6 100644 --- a/bamboo/unit_tests/test_unit_mnist_ridge_regression.py +++ b/bamboo/unit_tests/test_unit_mnist_ridge_regression.py @@ -4,22 +4,28 @@ import pytest import os + def skeleton_mnist_ridge_regression(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/mnist_ridge_regression_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/mnist_ridge_regression_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=1, dir_name=dir_name, - data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', data_reader_name='mnist', - model_folder='tests', model_name='mnist_ridge_regression', optimizer_name='adam', + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=1, dir_name=dir_name, + data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', + data_reader_name='mnist', + model_folder='tests', model_name='mnist_ridge_regression', + optimizer_name='adam', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_mnist_ridge_regression_clang4(cluster, exes, dirname): skeleton_mnist_ridge_regression(cluster, exes, dirname, 'clang4') + def test_unit_mnist_ridge_regression_gcc4(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +33,18 @@ def test_unit_mnist_ridge_regression_gcc4(cluster, exes, dirname): # assert 34304 == 0 skeleton_mnist_ridge_regression(cluster, exes, dirname, 'gcc4') + def test_unit_mnist_ridge_regression_gcc7(cluster, exes, dirname): skeleton_mnist_ridge_regression(cluster, exes, dirname, 'gcc7') + def test_unit_mnist_ridge_regression_intel18(cluster, exes, dirname): skeleton_mnist_ridge_regression(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_ridge_regression.py -k 'test_unit_mnist_ridge_regression_exe' --exe= def test_unit_mnist_ridge_regression_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_mnist_ridge_regression(cluster, exes, dirname, 'exe') diff --git a/bamboo/unit_tests/test_unit_mnist_softmax_classifier.py b/bamboo/unit_tests/test_unit_mnist_softmax_classifier.py index e67ec7e8cb7..9b35db607c9 100644 --- a/bamboo/unit_tests/test_unit_mnist_softmax_classifier.py +++ b/bamboo/unit_tests/test_unit_mnist_softmax_classifier.py @@ -4,22 +4,28 @@ import pytest import os + def skeleton_mnist_softmax_classifier(cluster, executables, dir_name, compiler_name): if compiler_name not in executables: pytest.skip('default_exes[%s] does not exist' % compiler_name) output_file_name = '%s/bamboo/unit_tests/output/mnist_softmax_classifier_%s_output.txt' % (dir_name, compiler_name) error_file_name = '%s/bamboo/unit_tests/error/mnist_softmax_classifier_%s_error.txt' % (dir_name, compiler_name) command = tools.get_command( - cluster=cluster, executable=executables[compiler_name], num_nodes=1, num_processes=1, dir_name=dir_name, - data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', data_reader_name='mnist', - model_folder='tests', model_name='mnist_softmax_classifier', optimizer_name='adam', + cluster=cluster, executable=executables[compiler_name], num_nodes=1, + num_processes=1, dir_name=dir_name, + data_filedir_default='/p/lscratchh/brainusr/datasets/MNIST', + data_reader_name='mnist', + model_folder='tests', model_name='mnist_softmax_classifier', + optimizer_name='adam', output_file_name=output_file_name, error_file_name=error_file_name) return_code = os.system(command) assert return_code == 0 + def test_unit_mnist_softmax_classifier_clang4(cluster, exes, dirname): skeleton_mnist_softmax_classifier(cluster, exes, dirname, 'clang4') + def test_unit_mnist_softmax_classifier_gcc4(cluster, exes, dirname): if cluster in ['surface']: pytest.skip('FIXME') @@ -27,15 +33,18 @@ def test_unit_mnist_softmax_classifier_gcc4(cluster, exes, dirname): # assert 34304 == 0 skeleton_mnist_softmax_classifier(cluster, exes, dirname, 'gcc4') + def test_unit_mnist_softmax_classifier_gcc7(cluster, exes, dirname): skeleton_mnist_softmax_classifier(cluster, exes, dirname, 'gcc7') + def test_unit_mnist_softmax_classifier_intel18(cluster, exes, dirname): skeleton_mnist_softmax_classifier(cluster, exes, dirname, 'intel18') + # Run with python -m pytest -s test_unit_softmax_classifier.py -k 'test_unit_mnist_softmax_classifier_exe' --exe= def test_unit_mnist_softmax_classifier_exe(cluster, dirname, exe): - if exe == None: + if exe is None: pytest.skip('Non-local testing') - exes = {'exe' : exe} + exes = {'exe': exe} skeleton_mnist_softmax_classifier(cluster, exes, dirname, 'exe') From b67db0283bd818b16908eb4ed25ad45bb1390298 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 11 Mar 2019 16:14:56 -0700 Subject: [PATCH 166/443] Debugged Python data reader. Runs successfully with LeNet. --- .../lbann/data_readers/data_reader_python.hpp | 7 ++- src/data_readers/data_reader_python.cpp | 46 +++++++++++-------- src/proto/lbann.proto | 2 +- src/proto/proto_common.cpp | 4 +- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/include/lbann/data_readers/data_reader_python.hpp b/include/lbann/data_readers/data_reader_python.hpp index e0b90cea9de..41f425eb830 100644 --- a/include/lbann/data_readers/data_reader_python.hpp +++ b/include/lbann/data_readers/data_reader_python.hpp @@ -111,7 +111,10 @@ class object { object(object&& other); object& operator=(object&& other); ~object(); - operator PyObject*() { return m_ptr; } + inline PyObject* get() { return m_ptr; } + inline const PyObject* get() const { return m_ptr; } + inline operator PyObject*() { return get(); } + inline operator const PyObject*() const { return get(); } private: PyObject* m_ptr; }; @@ -120,7 +123,7 @@ class object { class python_reader : public generic_data_reader { public: - python_reader(std::string module, + python_reader(std::string script, std::string sample_function, std::string num_samples_function, std::string sample_dims_function); diff --git a/src/data_readers/data_reader_python.cpp b/src/data_readers/data_reader_python.cpp index 09a875a22dc..f950204995e 100644 --- a/src/data_readers/data_reader_python.cpp +++ b/src/data_readers/data_reader_python.cpp @@ -28,7 +28,6 @@ #include "lbann/data_readers/data_reader_python.hpp" #ifdef LBANN_HAS_PYTHON #include -#include "lbann/utils/file_utils.hpp" namespace lbann { @@ -75,14 +74,20 @@ void manager::check_error(bool force_error) const { std::ostringstream err; err << "detected Python error"; if (value != nullptr) { - err << " (" << PyUnicode_AsUTF8(value) << ")"; + const char* msg = PyUnicode_AsUTF8(value); + if (msg != nullptr) { + err << " (" << msg << ")"; + } } if (traceback != nullptr) { auto tb_module = PyImport_ImportModule("traceback"); auto tb_message = PyObject_CallMethod(tb_module, "format_exc", nullptr); - err << "\n\n" << PyUnicode_AsUTF8(tb_message) << "\n"; + const char* tb_str = PyUnicode_AsUTF8(tb_message); + if (tb_str != nullptr) { + err << "\n\n" << tb_str; + } Py_XDECREF(tb_module); Py_XDECREF(tb_message); } @@ -139,35 +144,40 @@ object::~object() { } // namespace python -python_reader::python_reader(std::string module, +python_reader::python_reader(std::string script, std::string sample_function, std::string num_samples_function, std::string sample_dims_function) : generic_data_reader(true) { + int status; - // Import Python module + // Execute Python script auto& manager = python::manager::get_instance(); const auto lock = manager.get_mutex_guard(); - const auto& module_dir = file::extract_parent_directory(module); - if (!module_dir.empty()) { - python::object path = PySys_GetObject("path"); - auto status = PyList_Append(path, python::object(module_dir)); + auto&& f = std::fopen(script.c_str(), "r"); + if (f == nullptr) { + LBANN_ERROR("failed to open file (" + script + ")"); + } + status = PyRun_SimpleFile(f, script.c_str()); + if (status) { manager.check_error(status); } - auto module_name = file::extract_base_name(module); - module_name.erase(module_name.rfind(".py")); - python::object _module = PyImport_ImportModule(module_name.c_str()); + status = std::fclose(f); + if (status) { + LBANN_ERROR("failed to close file (" + script + ")"); + } + python::object module = PyImport_ImportModule("__main__"); // Get number of samples python::object num_func - = PyObject_GetAttrString(_module, num_samples_function.c_str()); + = PyObject_GetAttrString(module, num_samples_function.c_str()); python::object num = PyObject_CallObject(num_func, nullptr); m_num_samples = PyLong_AsLong(num); manager.check_error(); // Get sample dimensions python::object dims_func - = PyObject_GetAttrString(_module, sample_dims_function.c_str()); + = PyObject_GetAttrString(module, sample_dims_function.c_str()); python::object dims = PyObject_CallObject(dims_func, nullptr); dims = PyObject_GetIter(dims); for (auto d = PyIter_Next(dims); d != nullptr; d = PyIter_Next(dims)) { @@ -178,7 +188,7 @@ python_reader::python_reader(std::string module, // Get sample function m_sample_function - = PyObject_GetAttrString(_module, sample_function.c_str()); + = PyObject_GetAttrString(module, sample_function.c_str()); } @@ -208,10 +218,8 @@ bool python_reader::fetch_datum(CPUMat& X, int data_id, int col) { const auto lock = manager.get_mutex_guard(); // Get sample with Python - python::object id = PyTuple_New(1); - auto status = PyTuple_SetItem(id, 0, PyLong_FromLong(data_id)); - manager.check_error(status); - python::object sample = PyObject_CallObject(m_sample_function, id); + python::object args = Py_BuildValue("(i)", data_id); + python::object sample = PyObject_CallObject(m_sample_function, args); sample = PyObject_GetIter(sample); // Extract sample entries from Python iterator diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 03fda1d78e5..18c3b23f36c 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -76,7 +76,7 @@ message Reader { } message PythonDataReader { - string module = 1; // Python module (can be a file path) + string script = 1; // Python script string sample_function = 2; // Name of function that gets data sample string num_samples_function = 3; // Name of function that gets number of data samples string sample_dims_function = 4; // Name of function that gets dimensions of data sample diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index c75ef0bd3ab..55430b55a0b 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -328,7 +328,7 @@ void init_data_readers( } else if (name == "python") { #ifdef LBANN_HAS_PYTHON const auto& params = readme.python(); - reader = new python_reader(params.module(), + reader = new python_reader(params.script(), params.sample_function(), params.num_samples_function(), params.sample_dims_function()); @@ -487,7 +487,7 @@ void init_data_readers( } else if (name == "python") { #ifdef LBANN_HAS_PYTHON const auto& params = readme.python(); - reader_validation = new python_reader(params.module(), + reader_validation = new python_reader(params.script(), params.sample_function(), params.num_samples_function(), params.sample_dims_function()); From 57b5e25e1ad90744e1ad180d4e13050a9eb6d257 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Mon, 11 Mar 2019 16:49:07 -0700 Subject: [PATCH 167/443] Python module for LeNet. --- scripts/proto/lbann/models/lenet.py | 113 ++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 scripts/proto/lbann/models/lenet.py diff --git a/scripts/proto/lbann/models/lenet.py b/scripts/proto/lbann/models/lenet.py new file mode 100644 index 00000000000..d782770853c --- /dev/null +++ b/scripts/proto/lbann/models/lenet.py @@ -0,0 +1,113 @@ +import lbann.proto as lp +import lbann.modules as lm + +# ============================================== +# LeNet module +# ============================================== + +class LeNet(lm.Module): + """LeNet neural network. + + Assumes image data in NCHW format. + + See: + Yann LeCun, Leon Bottou, Yoshua Bengio, and Patrick + Haffner. "Gradient-based learning applied to document + recognition." Proceedings of the IEEE 86, no. 11 (1998): + 2278-2324. + + """ + + global_count = 0 # Static counter, used for default names + + def __init__(self, output_size, name=None): + """Initialize LeNet. + + Args: + output_size (int): Size of output tensor. + name (str, optional): Module name + (default: 'lenet_module'). + + """ + LeNet.global_count += 1 + self.instance = 0 + self.name = (name if name + else 'lenet_module{0}'.format(LeNet.global_count)) + self.conv1 = lm.Convolution2dModule(6, 5, activation=lp.Relu, + name=self.name+'_conv1') + self.conv2 = lm.Convolution2dModule(16, 5, activation=lp.Relu, + name=self.name+'_conv2') + self.fc1 = lm.FullyConnectedModule(120, activation=lp.Relu, + name=self.name+'_fc1') + self.fc2 = lm.FullyConnectedModule(84, activation=lp.Relu, + name=self.name+'_fc2') + self.fc3 = lm.FullyConnectedModule(output_size, + name=self.name+'_fc3') + + def forward(self, x): + self.instance += 1 + + # Convolutional network + x = self.conv1(x) + x = lp.Pooling( + x, num_dims=2, has_vectors=False, + pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, + pool_mode='max', + name='{0}_pool1_instance{1}'.format(self.name,self.instance)) + x = self.conv2(x) + x = lp.Pooling(x, num_dims=2, has_vectors=False, + pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, + pool_mode='max', + name='{0}_pool2_instance{1}'.format(self.name,self.instance)) + return self.fc3(self.fc2(self.fc1(x))) + +# ============================================== +# Export prototext +# ============================================== + +if __name__ == '__main__': + + # Options + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( + 'file', + nargs='?', default='model.prototext', type=str, + help='exported prototext file') + parser.add_argument( + '--num-labels', action='store', default=10, type=int, + help='number of data classes (default: 10)') + args = parser.parse_args() + + # Construct layer graph. + input = lp.Input() + images = lp.Identity(input) + labels = lp.Identity(input) + preds = LeNet(args.num_labels)(images) + softmax = lp.Softmax(preds) + ce = lp.CrossEntropy([softmax, labels]) + top1 = lp.CategoricalAccuracy([softmax, labels]) + top5 = lp.TopKCategoricalAccuracy([softmax, labels], k=5) + layers = list(lp.traverse_layer_graph(input)) + + # Setup objective function + weights = set() + for l in layers: + weights.update(l.weights) + l2_reg = lp.L2WeightRegularization(weights=weights, scale=1e-4) + obj = lp.ObjectiveFunction([ce, l2_reg]) + + # Setup model + mini_batch_size = 256 + num_epochs = 20 + metrics = [lp.Metric(top1, name='categorical accuracy', unit='%'), + lp.Metric(top5, name='top-5 categorical accuracy', unit='%')] + callbacks = [lp.CallbackPrint(), + lp.CallbackTimer()] + model = lp.Model(mini_batch_size, num_epochs, + layers=layers, weights=weights, + objective_function=obj, + metrics=metrics, callbacks=callbacks) + + # Export model to file + model.save_proto(args.file) From 9753f3c9383f4b5dab59b3838b0eedf17c501f15 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Tue, 12 Mar 2019 09:43:44 -0700 Subject: [PATCH 168/443] add CEREAL to superbuild documentation --- docs/BuildingLBANN.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/BuildingLBANN.md b/docs/BuildingLBANN.md index 4643c7d3354..17e318c1f1b 100644 --- a/docs/BuildingLBANN.md +++ b/docs/BuildingLBANN.md @@ -326,6 +326,7 @@ supported. These are one or two edges from LBANN in the dependency DAG. + Aluminum ++ CEREAL + CNPY + CONDUIT + [CUB](https://github.com/nvlabs/cub). This is used by Hydrogen for @@ -382,8 +383,8 @@ cmake \ /path/to/lbann/superbuild ``` will invoke the superbuild to build Aluminum, Hydrogen, and LBANN -_only_. Acceptable values for `` are `ALUMINUM`, `CNPY`, -`CONDUIT`, `CUB`, `HDF5`, `HYDROGEN`, `JPEG_TURBO`, `OPENCV`, +_only_. Acceptable values for `` are `ALUMINUM`, `CEREAL`, +`CNPY`, `CONDUIT`, `CUB`, `HDF5`, `HYDROGEN`, `JPEG_TURBO`, `OPENCV`, `PROTOBUF` and `LBANN`. ### Forwarding options to sub-projects From e2bd54e1d23c1c9b0dbfbd70742b32eea573774f Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 12 Mar 2019 10:46:37 -0700 Subject: [PATCH 169/443] Updating Python documentation with changes in PR #918. --- scripts/proto/README.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/scripts/proto/README.md b/scripts/proto/README.md index d26f83bf01a..26a10ca2574 100644 --- a/scripts/proto/README.md +++ b/scripts/proto/README.md @@ -142,12 +142,14 @@ top1 = lp.CategoricalAccuracy([softmax, labels]) top5 = lp.TopKCategoricalAccuracy([softmax, labels], k=5) # ---------------------------------------------------------- -# Construct objective function, metrics, and callbacks. +# Construct model. # ---------------------------------------------------------- +mini_batch_size = 256 +num_epochs = 10 obj = lp.ObjectiveFunction([ cross_entropy, - lp.L2WeightRegularization(scale_factor=1e-4)]) # L2 weight regularization + lp.L2WeightRegularization(scale_factor=1e-4)]) ]) metrics = [ lp.Metric(top1, name='categorical accuracy', unit='%'), @@ -157,19 +159,18 @@ callbacks = [ lp.CallbackPrint(), # Print basic information every epoch. lp.CallbackTimer() # Print timing information every epoch. ] +model = lp.Model( + mini_batch_size, num_epochs, + layers=traverse_layer_graph(input), # Get layers connected to input + objective_function=obj, + metrics=metrics, + callbacks=callbacks) # ---------------------------------------------------------- # Save the model to a prototext file. # ---------------------------------------------------------- -lp.save_model( - 'test.prototext', # Write to test.prototext. - 256, # Mini-batch size. - 10, # Number of epochs for training. - layers=traverse_layer_graph(input), # Get all layers connected to input. - objective_function=obj, - metrics=metrics, - callbacks=callbacks) +model.save_proto('test.prototext') ``` From 35c0052a4fd6244d4511a786919830652d6881ab Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 12 Mar 2019 14:27:23 -0700 Subject: [PATCH 170/443] Adding optimizers to Python interface. Using oneof in optimizer protobuf message. Removing command-line arguments to set optimizer. --- model_zoo/optimizers/opt_sgd.prototext | 5 +- scripts/proto/lbann/proto.py | 25 ++++++- src/proto/lbann.proto | 43 ++++++------ src/proto/proto_common.cpp | 90 +------------------------- 4 files changed, 48 insertions(+), 115 deletions(-) diff --git a/model_zoo/optimizers/opt_sgd.prototext b/model_zoo/optimizers/opt_sgd.prototext index 3ab5afd6406..8d066780476 100644 --- a/model_zoo/optimizers/opt_sgd.prototext +++ b/model_zoo/optimizers/opt_sgd.prototext @@ -1,8 +1,7 @@ optimizer { sgd { learn_rate: 0.01 - momentum: 0.9 - decay_rate: 0 + momentum: 0.9 nesterov: false - } + } } diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index c820e6307f4..76a5fc1c690 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -7,6 +7,7 @@ # Import modules import google.protobuf.text_format +import google.protobuf.message from collections.abc import Iterable # Import lbann_pb2 module generated by protobuf @@ -495,6 +496,25 @@ def export_proto(self): # added to the Callback message in lbann.proto _generate_classes_from_message(Callback, lbann_pb2.Callback) +# ============================================== +# Optimizers +# ============================================== + +class Optimizer: + """Base class for optimizers.""" + + def __init__(self): + pass + + def export_proto(self): + """Construct and return a protobuf message.""" + return lbann_pb2.Optimizer() + +# Generate Optimizer sub-classes from lbann.proto +# Note: The list of skip fields must be updated if any new fields are +# added to the Optimizer message in lbann.proto +_generate_classes_from_message(Optimizer, lbann_pb2.Optimizer) + # ============================================== # Model # ============================================== @@ -566,7 +586,10 @@ def save_prototext(filename, **kwargs): `model`, `data_reader`, and `optimizer`. """ - # Initialize protobuf message + # Construct protobuf message + for key, value in kwargs.items(): + if not isinstance(value, google.protobuf.message.Message): + kwargs[key] = value.export_proto() pb = lbann_pb2.LbannPB(**kwargs) # Write to file diff --git a/src/proto/lbann.proto b/src/proto/lbann.proto index 235722757ca..8f75fc434c3 100644 --- a/src/proto/lbann.proto +++ b/src/proto/lbann.proto @@ -316,46 +316,45 @@ message LayerMetric { // Optimizers //======================================================================== message Optimizer { - // An Optimizer should contain exactly one of the following - // (this may or may not be properly checked for in proto_common.cpp) - Adagrad adagrad = 1; - Rmsprop rmsprop = 2; - Adam adam = 3; - HypergradientAdam hypergradient_adam = 4; - Sgd sgd = 5; + oneof optimizer_type { + AdaGrad adagrad = 1; + Adam adam = 2; + HypergradientAdam hypergradient_adam = 3; + RMSprop rmsprop = 4; + SGD sgd = 5; + } } -message Adagrad { +message AdaGrad { double learn_rate = 1; - double eps = 2; //default: 1e-8 + double eps = 2; // Suggested: 1e-8 } message Adam { double learn_rate = 1; - double beta1 = 6; //default: 0.9 - double beta2 = 7; //default: 0.99 - double eps = 8; //default: 1e-8 + double beta1 = 6; // Suggested: 0.9 + double beta2 = 7; // Suggested: 0.99 + double eps = 8; // Suggested: 1e-8 } message HypergradientAdam { double init_learning_rate = 1; - double hyper_learning_rate = 2; //default: 1e-7 - double beta1 = 6; //default: 0.9 - double beta2 = 7; //default: 0.99 - double eps = 8; //default: 1e-8 + double hyper_learning_rate = 2; // Suggested: 1e-7 + double beta1 = 6; // Suggested: 0.9 + double beta2 = 7; // Suggested: 0.99 + double eps = 8; // Suggested: 1e-8 } -message Rmsprop { +message RMSprop { double learn_rate = 1; double decay_rate = 2; - double eps = 3; //default: 1e-8 + double eps = 3; // Suggested: 1e-8 } -message Sgd { +message SGD { double learn_rate = 1; - double momentum = 2; //default: 0 - double decay_rate = 3; //default: 0 - bool nesterov = 4; //default: false + double momentum = 2; // Set to zero for vanilla SGD + bool nesterov = 4; } diff --git a/src/proto/proto_common.cpp b/src/proto/proto_common.cpp index 992b60651f2..21552354e8b 100644 --- a/src/proto/proto_common.cpp +++ b/src/proto/proto_common.cpp @@ -699,7 +699,6 @@ void customize_data_readers_index_list(const lbann_comm& comm, lbann_data::Lbann void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) { - bool master = comm.am_world_master(); std::ostringstream err; options *opts = options::get(); @@ -716,20 +715,6 @@ void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) } } - if (opts->has_string("dag_model")) { - std::string sanity = model->type(); - if (sanity != "dnn") { - err << __FILE__ << " " << __LINE__ << " :: " - << " the current network model is: " << model->type() - << "; you can only change the model to 'dag_model' if the current model is 'dnn'"; - throw lbann_exception(err.str()); - } - if (master) { - std::cout << "\nchanging model from " << model->type() << " to: dag\n\n"; - } - model->set_type("dag_model"); - } - if (opts->has_string("data_filedir") or opts->has_string("data_filedir_train") or opts->has_string("data_filename_train") @@ -785,65 +770,6 @@ void get_cmdline_overrides(const lbann_comm& comm, lbann_data::LbannPB& p) model->set_serialize_io(opts->get_bool("serialize_io")); } - - if (opts->has_string("opt")) { - //defaults - double learn_rate = opts->has_float("learn_rate") ? opts->get_float("learn_rate") : 0.01; - double eps = opts->has_float("eps") ? opts->get_float("eps") : 1e-8; - double beta1 = opts->has_float("beta1") ? opts->get_float("beta1") : 0.9; - double beta2 = opts->has_float("beta2") ? opts->get_float("beta2") : 0.99; - double init_learning_rate = opts->has_float("init_learning_rate") ? opts->get_float("init_learning_rate") : 0.01; - double hyper_learning_rate = opts->has_float("hyper_learning_rate") ? opts->get_float("hyper_learning_rate") : 1e-7; - double momentum = opts->has_float("momentum") ? opts->get_float("momentum") : 0.9; - double decay_rate = opts->has_float("decay_rate") ? opts->get_float("decay_rate") : 0.5; - bool nesterov = opts->has_bool("nesterov") ? opts->get_float("nesterov") : false; - - auto *opt = new lbann_data::Optimizer; - - //construct the new optimizer - std::string opt_string = opts->get_string("opt"); - if (opt_string == "adagrad") { - auto *a = new lbann_data::Adagrad; - a->set_learn_rate(learn_rate); - a->set_eps(eps); - opt->set_allocated_adagrad(a); - } else if (opt_string == "adam") { - auto *a = new lbann_data::Adam; - a->set_learn_rate(learn_rate); - a->set_eps(eps); - a->set_beta1(beta1); - a->set_beta2(beta2); - opt->set_allocated_adam(a); - } else if (opt_string == "hypergradient_adam") { - auto *a = new lbann_data::HypergradientAdam; - a->set_init_learning_rate(init_learning_rate); - a->set_hyper_learning_rate(hyper_learning_rate); - a->set_beta1(beta1); - a->set_beta2(beta2); - a->set_eps(eps); - opt->set_allocated_hypergradient_adam(a); - } else if (opt_string == "rmsprop") { - auto *a = new lbann_data::Rmsprop; - a->set_learn_rate(learn_rate); - a->set_decay_rate(decay_rate); - a->set_eps(eps); - opt->set_allocated_rmsprop(a); - } else if (opt_string == "sgd") { - if (master) std::cerr << "\n\nsetting: sgd\n\n"; - auto *a = new lbann_data::Sgd; - a->set_learn_rate(learn_rate); - a->set_momentum(momentum); - a->set_decay_rate(decay_rate); - a->set_nesterov(nesterov); - opt->set_allocated_sgd(a); - } else { - err << __FILE__ << " " << __LINE__ - << " :: unknown string for --optimizer: " << opt_string - << " should be on of: adagrad, adam, hypergradient_adam, rmsprop, sgd"; - throw lbann_exception(err.str()); - } - p.set_allocated_optimizer(opt); - } } void print_parameters(const lbann_comm& comm, lbann_data::LbannPB& p) @@ -898,7 +824,6 @@ void print_help(std::ostream& os) " e.g: --disable_cuda, then a value of '1' is assigned)\n" "\n" "General:\n" - " --dag_model\n" " --mini_batch_size=\n" " --num_epochs=\n" " --block_size=\n" @@ -950,20 +875,7 @@ void print_help(std::ostream& os) " used if the option is not specified on the cmd line.\n" " If you specify an option that is not applicable to your choice\n" " of optimizer, the option is ignored\n" - "\n" - " --opt=\n" - " must be one of:\n" - " adagrad, adam, hypergradient_adam, rmsprop, sgd\n" - "\n" - " --learn_rate=< 0.01 > (all except hypergradient_adam)\n" - " --eps=< 1e-8 > (all except sgd)\n" - " --beta1=< 0.9 > (adam, hypergradient_adam)\n" - " --beta2=< 0.99 > (adam, hypergradient_adam)\n" - " --init_learning_rate=< 0.01 > (hypergradient_adam)\n" - " --hyper_learning_rate=< 1e-7 > (hypergradient_adam)\n" - " --momentum=< 0.9 > (sgd)\n" - " --decay_rate=< 0.5 > (sgd, rmsprop)\n" - " --nesterov=< false > (sgd)\n"; + "\n"; } void copy_file(std::string fn, std::ofstream &out) From ebf9887d445136496d78d7e8c21d0e85e1ec3ce7 Mon Sep 17 00:00:00 2001 From: Ryan Forsyth Date: Tue, 12 Mar 2019 14:36:41 -0700 Subject: [PATCH 171/443] Add archiving logs --- bamboo/integration_tests/test_integration_performance.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bamboo/integration_tests/test_integration_performance.py b/bamboo/integration_tests/test_integration_performance.py index 66bbf7140a3..7ff101d684a 100644 --- a/bamboo/integration_tests/test_integration_performance.py +++ b/bamboo/integration_tests/test_integration_performance.py @@ -57,8 +57,12 @@ def run_tests(actual_performance, model_name, dir_name, should_log, plan = os.environ[key] if plan in ['LBANN-NIGHTD', 'LBANN-WD']: archive_file = '/usr/workspace/wsb/lbannusr/archives/%s/%s/%s/performance_%s.txt' % (plan, cluster, compiler_name, model_name) + archival_string = '%s, %f, %f, %f, %f, %f, %f\n' % (os.environ['bamboo_buildNumber'], max_run_time, max_mean, max_max, max_min, max_stdev, min_accuracy) + print('Archive file: ' + archive_file) + print('Archiving: ' + archival_string) with open(archive_file, 'a') as archive: - archive.write('%s, %f, %f, %f, %f, %f, %f\n' % (os.environ['bamboo_buildNumber'], max_run_time, max_mean, max_max, max_min, max_stdev, min_accuracy)) + print('Archiving to file.') + archive.write(archival_string) else: print('The plan %s does not have archiving activated' % plan) else: From f89cf3c7f685d94a22b57f6c592485295ae069c3 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Tue, 12 Mar 2019 16:39:18 -0700 Subject: [PATCH 172/443] Option to specify optimizer for weights in Python interface. --- scripts/proto/lbann/proto.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/proto/lbann/proto.py b/scripts/proto/lbann/proto.py index 76a5fc1c690..8e111a07ae8 100644 --- a/scripts/proto/lbann/proto.py +++ b/scripts/proto/lbann/proto.py @@ -362,9 +362,10 @@ def export_proto(self): init_message.CopyFrom(self.initializer.export_proto()) init_message.SetInParent() - # TODO: implement + # Set optimizer if needed if self.optimizer: - raise NotImplementedError('Weights cannot handle non-default optimizers') + proto.optimizer.CopyFrom(self.optimizer.export_proto()) + proto.optimizer.SetInParent() return proto From 45307eb0ea5c5e4e64f532cd27cd4731aec4447a Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 13 Mar 2019 13:57:53 -0700 Subject: [PATCH 173/443] update build docs --- README.md | 14 +- docs/BuildingLBANN.md | 362 ++------------------------------- docs/BuildingLBANNWithCMake.md | 351 ++++++++++++++++++++++++++++++++ 3 files changed, 375 insertions(+), 352 deletions(-) create mode 100644 docs/BuildingLBANNWithCMake.md diff --git a/README.md b/README.md index 10d3c8e1b7b..932abd4d402 100644 --- a/README.md +++ b/README.md @@ -21,9 +21,19 @@ methods. ## Building LBANN -A few options for building LBANN are documented -[here](docs/BuildingLBANN.md#top). +The preferred method for LBANN users to install LBANN is to use +[Spack](https://github.com/llnl/spack). After some system +configuration, this should be as straightforward as +```bash +spack install lbann +``` + +More detailed instructions for installing LBANN with Spack are +included [here](docs/BuildingLBANN.md#top). + +For developers or advanced users, the CMake build system and the +"Superbuild" are documented [here](docs/BuildingLBANNWithCMake.md#top). ## Running LBANN The basic template for running LBANN is diff --git a/docs/BuildingLBANN.md b/docs/BuildingLBANN.md index 17e318c1f1b..25c44ed78e1 100644 --- a/docs/BuildingLBANN.md +++ b/docs/BuildingLBANN.md @@ -162,373 +162,35 @@ Hydrogen, and LBANN separately, by whatever means they choose. cd ${LBANN_BUILD_DIR} cmake \ -G Ninja \ + -D CMAKE_BUILD_TYPE:STRING=Release \ + -D CMAKE_INSTALL_PREFIX:PATH=${LBANN_INSTALL_DIR} \ + \ -D LBANN_SB_BUILD_ALUMINUM=ON \ -D ALUMINUM_ENABLE_MPI_CUDA=OFF \ -D ALUMINUM_ENABLE_NCCL=ON \ + \ -D LBANN_SB_BUILD_HYDROGEN=ON \ + -D Hydrogen_ENABLE_ALUMINUM=ON \ + -D Hydrogen_ENABLE_CUB=ON \ -D Hydrogen_ENABLE_CUDA=ON \ + \ -D LBANN_SB_BUILD_LBANN=ON \ - -D CMAKE_BUILD_TYPE:STRING=Release \ - -D LBANN_WITH_CUDA:BOOL=ON \ - -D LBANN_WITH_NVPROF:BOOL=ON \ -D LBANN_DATATYPE:STRING=float \ - -D LBANN_WITH_TOPO_AWARE:BOOL=ON \ + -D LBANN_SEQUENTIAL_INITIALIZATION:BOOL=OFF \ -D LBANN_WITH_ALUMINUM:BOOL=ON \ -D LBANN_WITH_CONDUIT:BOOL=ON \ -D LBANN_WITH_CUDA:BOOL=ON \ -D LBANN_WITH_CUDNN:BOOL=ON \ -D LBANN_WITH_NCCL:BOOL=ON \ + -D LBANN_WITH_NVPROF:BOOL=ON \ -D LBANN_WITH_SOFTMAX_CUDA:BOOL=ON \ - -D LBANN_SEQUENTIAL_INITIALIZATION:BOOL=OFF \ + -D LBANN_WITH_TOPO_AWARE:BOOL=ON \ -D LBANN_WITH_TBINF=OFF \ -D LBANN_WITH_VTUNE:BOOL=OFF \ - -D LBANN_DATATYPE=float \ - -D CMAKE_INSTALL_PREFIX:PATH=${LBANN_INSTALL_DIR} \ ${LBANN_HOME}/superbuild ninja ``` -## Building with [CMake](https://cmake.org) - -LBANN uses [CMake](https://cmake.org) for its build system and a -version newer than or equal to 3.9.0 is required. LBANN development is -done primarily on UNIX-based platforms. As such, the build is tested -regularly on Linux-based machines, occasionally on OSX, and never on -Windows machines. - -It is required that LBANN be built out-of-source. That is, CMake must -not be invoked in a directory containing a CMakeLists. - -### LBANN CMake options -The following options are exposed in the CMake build system. - -+ `LBANN_WITH_ALUMINUM` (Default: `OFF`): Use the Aluminum communication - package. This will be set to `ON` automatically if Hydrogen was - built with Aluminum. - -+ `LBANN_WITH_CNPY` (Default: `ON`): Build with support for CNPY for reading - Numpy data. - -+ `LBANN_WITH_CONDUIT` (Default: `OFF`): Build with support for CONDUIT. - -+ `LBANN_WITH_NVPROF` (Default: `OFF`): Build with extra annotations for NVPROF. - -+ `LBANN_WITH_TOPO_AWARE` (Default: `ON`): Use HWLOC for topology-aware choices. - -+ `LBANN_WITH_TBINF` (Default: `ON`): Enable the Tensorboard interace. - -+ `LBANN_WITH_VTUNE` (Default: `OFF`): Build with extra annotations for VTune. - -+ `LBANN_DETERMINISTIC` (Default: `OFF`): Force as much of the code as possible - to be deterministic. This is not a guarantee as certain operations - in third-party libraries cannot be forced into a deterministic mode, - especially for CUDA-enabled builds. - -+ `LBANN_SEQUENTIAL_INITIALIZATION` (Default: `OFF`): Force sequentially - consistent initialization of data structures. - -+ `LBANN_WARNINGS_AS_ERRORS` (Default: `OFF`): Promote compiler - warnings to errors. This should be used by developers - only. Developers are encouraged to build with this `ON` prior to - merging any code into the repository. - -+ `LBANN_USE_PROTOBUF_MODULE` (Default: `OFF`): Search for Protobuf - using CMake's `FindProtobuf.cmake` module instead of the Protobuf - config file. This is useful on platforms with differently - architected compute nodes or when the config method is inexplicably - failing. - -The following variables may also be set: - -+ `LBANN_DATATYPE` (Default: `float`): The datatype to use for - training. Currently this must be `float` or `double`. - -The following variable has been deprecated and removed: - -+ `LBANN_WITH_CUDA`. The "CUDA-ness" of LBANN is now tied 1:1 with the - "CUDA-ness" of Hydrogen. At present, it seems like unnecessary - overhead to support the situation in which Hydrogen has CUDA support - but LBANN doesn't want to use it until a compelling use-case reveals - itself. - -### Controlling dependency resolution -The following variables may be set with CMake to identify dependencies -that are not installed into the "typical" locations that CMake -searches by default. They may be either exported into the environment -used by CMake using whatever mechanisms are allowed by the shell or -passed to CMake as a cache variable -(e.g., `cmake -DPKG_DIR=/path/to/pkg`). -The latter option is recommended. - -+ `Aluminum_DIR` or `ALUMINUM_DIR` or `AL_DIR`: The path to _either_ - the Aluminum installation prefix _or_ the AluminumConfig.cmake - file. If Hydrogen has not been built with Aluminum support, set - `LBANN_WITH_ALUMINUM=ON` to enable Aluminum support. -+ `CEREAL_DIR`: The path to _either_ the CEREAL installation prefix - _or_ the cereal-config.cmake file. -+ `CNPY_DIR`: The path to the CNPY installation prefix. Must set - `LBANN_WITH_CNPY=ON` to enable CNPY support. -+ `CONDUIT_DIR` or `CONDUIT_DIR`: The path to _either_ the - CONDUIT installation prefix _or_ the conduit.cmake file. Must set - `LBANN_WITH_CONDUIT=ON` to enable CONDUIT support. - + `HDF5_DIR`: The path to _either_ the HDF5 installation prefix _or_ - the hdf5_config.cmake file. There is a known issue with CONDUIT - that it may link to HDF5 but not properly export that dependency. -+ `HWLOC_DIR`: The path to the HWLOC installation prefix. Must set - `LBANN_WITH_HWLOC=ON` to enable HWLOC support. -+ `Hydrogen_DIR` or `HYDROGEN_DIR`: The path to _either_ the Hydrogen - installation prefix _or_ the HydrogenConfig.cmake file. -+ `NVTX_DIR`: The path the the prefix of NVTX. This should not be used - except in circumstances in which one might want to link to a - different NVTX installation than the CUDA toolkit. Under normal - circumstances, if CUDA was found without issue, NVTX should be as - well. -+ `OpenCV_DIR` or `OPENCV_DIR`: The path to _either_ the OpenCV - installation prefix _or_ the OpenCVConfig.cmake file. -+ `Protobuf_DIR` or `PROTOBUF_DIR`: The path to _either_ the Protobuf - installation prefix _or_ the protobuf-config.cmake file. -+ `VTUNE_DIR`: The path to the prefix of the VTune (or Intel compiler - suite) installation. - -Compilers, include CUDA compilers, are found using the default CMake -mechanisms, as are OpenMP and MPI. Thus, the process of finding these -tools can be manipulated using the usual CMake mechanisms and/or cache -variables as [documented by CMake](https://cmake.org/documentation). - -Except where otherwise noted, this list attempts to address the first -level of dependencies of LBANN, that is, those that are one edge away -in the DAG. If deeper dependency issues appear, please consult the -documentation of the packages that are causing the issues as they may -require additional CMake/environment flags to be set before properly -resolving. - -### Example CMake invocation -A sample CMake build for LBANN might look like the following. -```bash -cmake \ - -D LBANN_WITH_CUDA:BOOL=ON \ - -D LBANN_WITH_NVPROF:BOOL=ON \ - -D LBANN_DATATYPE:STRING=float \ - -D Hydrogen_DIR:PATH=/path/to/hydrogen \ - -D HWLOC_DIR:PATH=/path/to/hwloc \ - /path/to/lbann -``` - -## Building an entire ecosystem with the "Superbuild" - -__WARNING__: This is primarily for developer convenience and is not -meant to be robust to all possible use-cases for LBANN. - -LBANN includes CMake `ExternalProject` definitions for a large portion -of its dependency graph. The following dependencies are -supported. These are one or two edges from LBANN in the -dependency DAG. - -+ Aluminum -+ CEREAL -+ CNPY -+ CONDUIT -+ [CUB](https://github.com/nvlabs/cub). This is used by Hydrogen for - efficiently managing GPU memory. -+ [HDF5](https://www.hdfgroup.org/solutions/hdf5). This is a - dependency of CONDUIT. -+ Hydrogen -+ [JPEG-turbo](https://github.com/libjpeg-turbo/libjpeg-turbo). This - is a dependency of OpenCV. -+ [OpenBLAS](https://github.com/xianyi/OpenBLAS.git). This is an - optional dependency of Hydrogen. It is recommended if your system - does not have a system-optimized BLAS distribution (e.g., Intel's MKL). -+ OpenCV -+ Protobuf - -The following dependencies are known to exist but for some reason or -another are not supported by the superbuild framework. - -+ cuDNN is a freely available binary package available from NVIDIA. -+ NCCL is a freely available binary package available from - NVIDIA. Inspired users may also build it from source from its - [github repository](https://github.com/nvidia/nccl). -+ HWLOC is often installed by default, especially on large - supercomputers. Certain components may require superuser access to - configure, but these features are not used by LBANN. If it is not - available, ask the system administrators, consult the package - manager, install using Spack, or build from - [source](https://www.open-mpi.org/projects/hwloc/). - -The superbuild system is itself a CMake project rooted in -`$LBANN_HOME/superbuild` (distinct from the LBANN CMake project rooted -in `$LBANN_HOME`). Options that control the superbuild system are -prefixed with `LBANN_SB_`; other options that appear in a CMake -invocation for the superbuild are either interpreted on a sub-project -basis or forwarded to certain sub-projects. - -### Choosing packages to build in the Superbuild -The superbuild system is _constructive_ or _additive_; that is, it -will only build the packages that it is asked to build. Any required -package that is not requested is assumed to exist on the system by the -time it is needed by whichever package requires it. For example, if -HDF5 is provided by the system administrators on a system, it does not -need to be built and CONDUIT can be built by pointing its build to the -system HDF5. - -Packages are included in a superbuild by passing -`LBANN_SB_BUILD_` options to CMake _for each package_ that it -should build, including LBANN itself. E.g., -```bash -cmake \ - -DLBANN_SB_BUILD_ALUMINUM=ON \ - -DLBANN_SB_BUILD_HYDROGEN=ON \ - -DLBANN_SB_BUILD_LBANN=ON \ - /path/to/lbann/superbuild -``` -will invoke the superbuild to build Aluminum, Hydrogen, and LBANN -_only_. Acceptable values for `` are `ALUMINUM`, `CEREAL`, -`CNPY`, `CONDUIT`, `CUB`, `HDF5`, `HYDROGEN`, `JPEG_TURBO`, `OPENCV`, -`PROTOBUF` and `LBANN`. - -### Forwarding options to sub-projects -The subprojects are largely pre-configured to "do the right thing" for -building LBANN. However, there are some variables that users of the -superbuild system may need to control. These are exposed as regular -CMake options in the individual projects' CMakeLists and can be viewed -by running, e.g., - -```bash -cmake -L superbuild//CMakeLists.txt -``` - -Several significant CMake flags are automatically forwarded from the -superbuild CMake to subprojects. These are generally "typical" CMake -flags (but not all; if something is missing, open please -[an issue](https://github.com/llnl/lbann/issues)). Some examples are - -+ `CMAKE_INSTALL_PREFIX` -+ `CMAKE_BUILD_TYPE` -+ `CMAKE__COMPILER` -+ `CMAKE__FLAGS` - -To accommodate developers working on edge-cases with these -dependencies, any flag may be forwarded to any CMake-built package -using the following syntax: -`LBANN_SB_FWD__
below + [here](BuildingLBANNWithCMake.md#building-an-entire-ecosystem-with-the-superbuild) for a list and descriptions of all CMake flags known to LBANN's "Superbuild" build system. A representative CMake command line that expects `LBANN_HOME`, `LBANN_BUILD_DIR`, `LBANN_INSTALL_DIR` From 0e7870ad0623b9885695a1956686832ddfcda7d8 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 13 Mar 2019 14:07:52 -0700 Subject: [PATCH 175/443] update sphinx doc list --- docs/getting_started.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 471854752af..a0aea94c00c 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -1,3 +1,4 @@ .. mdinclude:: ./BuildingLBANN.md +.. mdinclude:: ./BuildingLBANNWithCMake.md .. mdinclude:: ./spack_environment.md .. mdinclude:: ./RunningLBANN.md From d1026a56d4b8cf74cef889760b087049adca12f4 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Wed, 13 Mar 2019 15:49:26 -0700 Subject: [PATCH 176/443] update find_package for conduit to reflect latest changes on their end. --- CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1b470f51c88..f0e5b2555b8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -338,15 +338,15 @@ if (LBANN_WITH_CONDUIT) message(STATUS "Found HDF5: ${HDF5_DIR}") endif () - find_package(CONDUIT CONFIG QUIET - HINTS ${CONDUIT_DIR} $ENV{CONDUIT_DIR} + find_package(Conduit CONFIG QUIET + HINTS ${Conduit_DIR} $ENV{Conduit_DIR} ${CONDUIT_DIR} $ENV{CONDUIT_DIR} PATH_SUFFIXES lib64/cmake lib/cmake NO_DEFAULT_PATH) - if (NOT CONDUIT_FOUND) - find_package(CONDUIT CONFIG QUIET REQUIRED + if (NOT Conduit_FOUND) + find_package(Conduit CONFIG QUIET REQUIRED PATH_SUFFIXES lib64/cmake lib/cmake) endif () - message(STATUS "Found CONDUIT: ${CONDUIT_DIR}") + message(STATUS "Found CONDUIT: ${Conduit_DIR}") # Ugh. I don't like that this requires intimate knowledge of # specific targets that CONDUIT exports. It should support From ee27920f2ec9dc68593683342c1e6d929beba6f2 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 14 Mar 2019 10:08:38 -0700 Subject: [PATCH 177/443] tmp: update building_lbann.rst --- docs/building_lbann.rst | 232 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 232 insertions(+) create mode 100644 docs/building_lbann.rst diff --git a/docs/building_lbann.rst b/docs/building_lbann.rst new file mode 100644 index 00000000000..8608e83c504 --- /dev/null +++ b/docs/building_lbann.rst @@ -0,0 +1,232 @@ +.. role:: bash(code) + :language: bash + +==================== +Building LBANN +==================== + +-------------------- +Download +-------------------- + +LBANN source code can be obtained from the `Github +repo `_. + +-------------------- +Dependencies +-------------------- + +The following packages and tools are required to build LBANN. All +packages listed below may be installed using +`Spack `_. See +below +for more details on using Spack to build a complete LBANN +environment. + +The following basic tools are **required**. + ++ A C++11-compliant compiler. ++ OpenMP, version 3.0 or newer. ++ An MPI-3.0 implementation. ++ `CEREAL `_ is used to handle + complex serialization tasks. ++ `CMake `_, version 3.9 or newer. + +The following LLNL-maintained packages are **required**. + ++ `Hydrogen `_ is a fork of the + `Elemental `_ distributed + dense linear-algebra library and it may be installed via + `Spack `_ using the package name + "hydrogen". If CUDA support is enabled in Hydrogen, LBANN will + inherit this support. + +The following third-party packages are **required**. + ++ `CNPY `_ is used to ingest data + in NumPy format. In principle this should be optional, but at time + of writing, LBANN will not build without it. ++ `OpenCV `_ is used to preprocess + image data. For performance reasons, it is recommend to build OpenCV + with `JPEG-turbo `_ + for JPEG format support. ++ `ProtoBuf `_ is used to + express models in a portable format. + +The following LLNL-maintained packages are **optional**. + ++ `Aluminum `_ is a + communication library optimized for machine learning and interaction + with GPUs. We cannot recommend its use strongly enough. It can be + built using `Spack `_. ++ `CONDUIT `_ is used to ingest + structured data produced by scientific simulations. + +The following third-party packages are **optional**. + ++ `CUDA `_. The development + team currently uses CUDA version 9.2. Building with CUDA support + requires that Hydrogen has been built with CUDA support (see below). + + `cuDNN `_ is required if + building LBANN with CUDA support. It is freely available as a binary + distribution from NVIDIA. ++ `HWLOC `_. HWLOC enables + LBANN to make certain optimizations based on the hardware + topology. Its use is strongly recommended. ++ NVTX. LBANN supports some improved annotations for NVPROF using + NVTX. NVTX is provided as part of the CUDA toolkit. ++ VTune. LBANN supports some improved annotations for VTune. + +------------------------------------------------------------ +Building with `Spack `_ +------------------------------------------------------------ + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Setup Spack and local base tools +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Download and install `Spack `_. + Additionally setup shell support as discussed + `here `_. + + .. code-block:: bash + + . ${SPACK_ROOT}/share/spack/setup-env.sh + + +2. Setup your compiler and external software environment. For example, + on LLNL\'s LC machines, one might load the following modules: + + .. code-block:: bash + + ml gcc/7.3.0 mvapich2/2.3 cuda/10.0.130 # Pascal + + or + + .. code-block:: bash + + ml gcc/7.3.1 cuda/9.2.148 spectrum-mpi/rolling-release # Lassen / Sierra + + + + Note to unload unwanted modules you can execute :bash:`ml` with + package names prepended with a dash, e.g.: :bash:`ml -intel`. To + unload all currently loaded modules, use :bash:`ml purge`. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Building & Installing LBANN as a user +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section is work in progress. For now, follow the developer +instructions below. We are working to simplify this process. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Building & Installing LBANN as a developer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Developers of LBANN will often need to interact with the source code +and/or advanced configuration options for Aluminum, Hydrogen, and +LBANN while the other dependencies remain constant. The Spack +installation instructions below set up a Spack environment with the +remaining dependencies, requiring the developer to build Aluminum, +Hydrogen, and LBANN separately, by whatever means they choose. + +1. Establish a Spack environment and install software dependencies. + Note that there are four environments to pick from along two axes: + + 1. developers or users + 2. x86_64 and ppc64le + + For example if you are a developer and want to build the inside of + the git repo use the following instructions: + + .. code-block:: bash + + export LBANN_HOME=/path/to/lbann/git/repo + export LBANN_BUILD_DIR=/path/to/a/build/directory + export LBANN_INSTALL_DIR=/path/to/an/install/directory + cd ${LBANN_BUILD_DIR} + spack env create -d . ${LBANN_HOME}/spack_environments/developer_release__cuda_spack.yaml # where = x86_64 | ppc64le + spack install + spack env loads # Spack creates a file named loads that has all of the correct modules + source loads + unset LIBRARY_PATH + + + + Note that the environments provided here have a set of external + packages and compilers that are installed on an LLNL LC CZ + system. Please update these for your system environment. + Alternatively, you can create baseline versions of the + user-level Spack configuration files and remove the externals + and compilers from the :code:`spack.yaml` file. More details are + provided `here `_. + + + Note that the initial build of all of the standard packages in Spack + will take a while. + + + Note that the Spack module files set the :bash:`LIBRARY_PATH` environment + variable. This behavior allows autotools-based builds to pickup the + correct libraries but interferes with the way that CMake sets up + RPATHs. To correctly establish the RPATH, please unset the variable + as noted above, or you can explicitly pass the RPATH fields to CMake + using a command such as: + + .. code-block:: bash + + cmake -DCMAKE_INSTALL_RPATH=$(sed 's/:/;/g' <<< "${LIBRARY_PATH}") \ + -DCMAKE_BUILD_RPATH=$(sed 's/:/;/g' <<< "${LIBRARY_PATH}") \ + ... + +2. Build LBANN locally from source and build Hydrogen and Aluminum + using the superbuild. See + `here `_ + for a list and descriptions of all CMake flags known to LBANN's + "Superbuild" build system. A representative CMake command line + that expects :bash:`LBANN_HOME`, :bash:`LBANN_BUILD_DIR`, + :bash:`LBANN_INSTALL_DIR` environment variables might be: + + .. code-block:: console + + cd ${LBANN_BUILD_DIR} + cmake \ + -G Ninja \ + -D CMAKE_BUILD_TYPE:STRING=Release \ + -D CMAKE_INSTALL_PREFIX:PATH=${LBANN_INSTALL_DIR} \ + \ + -D LBANN_SB_BUILD_ALUMINUM=ON \ + -D ALUMINUM_ENABLE_MPI_CUDA=OFF \ + -D ALUMINUM_ENABLE_NCCL=ON \ + \ + -D LBANN_SB_BUILD_HYDROGEN=ON \ + -D Hydrogen_ENABLE_ALUMINUM=ON \ + -D Hydrogen_ENABLE_CUB=ON \ + -D Hydrogen_ENABLE_CUDA=ON \ + \ + -D LBANN_SB_BUILD_LBANN=ON \ + -D LBANN_DATATYPE:STRING=float \ + -D LBANN_SEQUENTIAL_INITIALIZATION:BOOL=OFF \ + -D LBANN_WITH_ALUMINUM:BOOL=ON \ + -D LBANN_WITH_CONDUIT:BOOL=ON \ + -D LBANN_WITH_CUDA:BOOL=ON \ + -D LBANN_WITH_CUDNN:BOOL=ON \ + -D LBANN_WITH_NCCL:BOOL=ON \ + -D LBANN_WITH_NVPROF:BOOL=ON \ + -D LBANN_WITH_SOFTMAX_CUDA:BOOL=ON \ + -D LBANN_WITH_TOPO_AWARE:BOOL=ON \ + -D LBANN_WITH_TBINF=OFF \ + -D LBANN_WITH_VTUNE:BOOL=OFF \ + ${LBANN_HOME}/superbuild + + ninja + + +The complete documentation for building LBANN directly with CMake can +be found `here `_. + +.. toctree:: + :maxdepth: 1 + :caption: Advanced build methods + + build_with_cmake + build_with_superbuild + build_containers + build_llnl_idiosyncracies From ba52dceefdd5c24828d2aac288af375d203b7562 Mon Sep 17 00:00:00 2001 From: Tim Moon Date: Thu, 14 Mar 2019 11:49:01 -0700 Subject: [PATCH 178/443] Moving executable Python scripts for AlexNet and LeNet to model zoo. These scripts generate monolithic prototext files that contain the model, data reader, and optimizer. --- model_zoo/vision/alexnet.py | 97 ++++++++++++++++++++++++++ model_zoo/vision/lenet.py | 81 +++++++++++++++++++++ scripts/proto/lbann/models/__init__.py | 6 ++ scripts/proto/lbann/models/alexnet.py | 57 --------------- scripts/proto/lbann/models/lenet.py | 55 --------------- 5 files changed, 184 insertions(+), 112 deletions(-) create mode 100755 model_zoo/vision/alexnet.py create mode 100755 model_zoo/vision/lenet.py diff --git a/model_zoo/vision/alexnet.py b/model_zoo/vision/alexnet.py new file mode 100755 index 00000000000..f27b2e83092 --- /dev/null +++ b/model_zoo/vision/alexnet.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +import argparse +from os.path import abspath, dirname, join +import google.protobuf.text_format as txtf +import lbann.proto as lp +from lbann.models import AlexNet +from lbann.proto import lbann_pb2 + +# Command-line arguments +data_reader_prototext = join(dirname(dirname(abspath(__file__))), + 'data_readers', + 'data_reader_imagenet.prototext') +parser = argparse.ArgumentParser() +parser.add_argument( + 'file', nargs='?', default='model.prototext', type=str, + help='exported prototext file') +parser.add_argument( + '--mini-batch-size', action='store', default=256, type=int, + help='mini-batch size (default: 256)', metavar='NUM') +parser.add_argument( + '--num-epochs', action='store', default=100, type=int, + help='number of epochs (default: 100)', metavar='NUM') +parser.add_argument( + '--num-labels', action='store', default=1000, type=int, + help='number of data classes (default: 1000)', metavar='NUM') +parser.add_argument( + '--optimizer', action='store', default='momentum', type=str, + choices=('momentum', 'sgd', 'adam', 'adagrad', 'rmsprop'), + help='optimizer (default: momentum)') +parser.add_argument( + '--optimizer-learning-rate', + action='store', default=0.01, type=float, + help='optimizer learning rate (default: 0.01)', metavar='VAL') +parser.add_argument( + '--data-reader', action='store', + default=data_reader_prototext, type=str, + help='data reader prototext file (default: ' + data_reader_prototext + ')', + metavar='FILE') +args = parser.parse_args() + +# Construct layer graph +input = lp.Input() +images = lp.Identity(input) +labels = lp.Identity(input) +preds = AlexNet(args.num_labels)(images) +softmax = lp.Softmax(preds) +ce = lp.CrossEntropy([softmax, labels]) +top1 = lp.CategoricalAccuracy([softmax, labels]) +top5 = lp.TopKCategoricalAccuracy([softmax, labels], k=5) +layers = list(lp.traverse_layer_graph(input)) + +# Setup objective function +weights = set() +for l in layers: + weights.update(l.weights) + l2_reg = lp.L2WeightRegularization(weights=weights, scale=5e-4) + obj = lp.ObjectiveFunction([ce, l2_reg]) + +# Setup model +metrics = [lp.Metric(top1, name='top-1 accuracy', unit='%'), + lp.Metric(top5, name='top-5 accuracy', unit='%')] +callbacks = [lp.CallbackPrint(), + lp.CallbackTimer(), + lp.CallbackDropFixedLearningRate( + drop_epoch=[20,40,60], amt=0.1)] +model = lp.Model(args.mini_batch_size, + args.num_epochs, + layers=layers, + weights=weights, + objective_function=obj, + metrics=metrics, + callbacks=callbacks) + +# Setup optimizer +lr = args.optimizer_learning_rate +opt = lp.Optimizer() +if args.optimizer == 'momentum': + opt = lp.SGD(learn_rate=lr, momentum=0.9) +elif args.optimizer == 'sgd': + opt = lp.SGD(learn_rate=lr) +elif args.optimizer == 'adam': + opt = lp.Adam(learn_rate=lr, beta1=0.9, beta2=0.99, eps=1e-8) +elif args.optimizer == 'adagrad': + opt = lp.AdaGrad(learn_rate=lr, eps=1e-8) +elif args.optimizer == 'rmsprop': + opt = lp.RMSprop(learn_rate=lr, decay_rate=0.99, eps=1e-8) + +# Load data reader from prototext +data_reader_proto = lbann_pb2.LbannPB() +with open(args.data_reader, 'r') as f: + txtf.Merge(f.read(), data_reader_proto) +data_reader_proto = data_reader_proto.data_reader + +# Save to file +lp.save_prototext(args.file, + model=model, optimizer=opt, + data_reader=data_reader_proto) diff --git a/model_zoo/vision/lenet.py b/model_zoo/vision/lenet.py new file mode 100755 index 00000000000..e056f5291df --- /dev/null +++ b/model_zoo/vision/lenet.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +import argparse +from os.path import abspath, dirname, join +import google.protobuf.text_format as txtf +import lbann.proto as lp +from lbann.models import LeNet +from lbann.proto import lbann_pb2 + +# Command-line arguments +data_reader_prototext = join(dirname(dirname(abspath(__file__))), + 'data_readers', + 'data_reader_mnist.prototext') +parser = argparse.ArgumentParser() +parser.add_argument( + 'file', nargs='?', default='model.prototext', type=str, + help='exported prototext file') +parser.add_argument( + '--mini-batch-size', action='store', default=64, type=int, + help='mini-batch size (default: 64)', metavar='NUM') +parser.add_argument( + '--num-epochs', action='store', default=100, type=int, + help='number of epochs (default: 100)', metavar='NUM') +parser.add_argument( + '--num-labels', action='store', default=10, type=int, + help='number of data classes (default: 10)', metavar='NUM') +parser.add_argument( + '--optimizer', action='store', default='momentum', type=str, + choices=('momentum', 'sgd', 'adam', 'adagrad', 'rmsprop'), + help='optimizer (default: momentum)') +parser.add_argument( + '--optimizer-learning-rate', + action='store', default=0.01, type=float, + help='optimizer learning rate (default: 0.01)', metavar='VAL') +parser.add_argument( + '--data-reader', action='store', + default=data_reader_prototext, type=str, + help='data reader prototext file (default: ' + data_reader_prototext + ')', + metavar='FILE') +args = parser.parse_args() + +# Construct layer graph +input = lp.Input() +images = lp.Identity(input) +labels = lp.Identity(input) +preds = LeNet(args.num_labels)(images) +softmax = lp.Softmax(preds) +loss = lp.CrossEntropy([softmax, labels]) +acc = lp.CategoricalAccuracy([softmax, labels]) + +# Setup model +model = lp.Model(args.mini_batch_size, + args.num_epochs, + layers=lp.traverse_layer_graph(input), + objective_function=loss, + metrics=[lp.Metric(acc, name='accuracy', unit='%')], + callbacks=[lp.CallbackPrint(), lp.CallbackTimer()]) + +# Setup optimizer +lr = args.optimizer_learning_rate +opt = lp.Optimizer() +if args.optimizer == 'momentum': + opt = lp.SGD(learn_rate=lr, momentum=0.9) +elif args.optimizer == 'sgd': + opt = lp.SGD(learn_rate=lr) +elif args.optimizer == 'adam': + opt = lp.Adam(learn_rate=lr, beta1=0.9, beta2=0.99, eps=1e-8) +elif args.optimizer == 'adagrad': + opt = lp.AdaGrad(learn_rate=lr, eps=1e-8) +elif args.optimizer == 'rmsprop': + opt = lp.RMSprop(learn_rate=lr, decay_rate=0.99, eps=1e-8) + +# Load data reader from prototext +data_reader_proto = lbann_pb2.LbannPB() +with open(args.data_reader, 'r') as f: + txtf.Merge(f.read(), data_reader_proto) +data_reader_proto = data_reader_proto.data_reader + +# Save to file +lp.save_prototext(args.file, + model=model, optimizer=opt, + data_reader=data_reader_proto) diff --git a/scripts/proto/lbann/models/__init__.py b/scripts/proto/lbann/models/__init__.py index e69de29bb2d..3c90aa8efdd 100644 --- a/scripts/proto/lbann/models/__init__.py +++ b/scripts/proto/lbann/models/__init__.py @@ -0,0 +1,6 @@ +from .alexnet import AlexNet +from .lenet import LeNet +from .resnet import ResNet, ResNet18, ResNet34, ResNet50, ResNet101, ResNet152 + +__all__ = ('AlexNet', 'LeNet', 'ResNet', + 'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152') diff --git a/scripts/proto/lbann/models/alexnet.py b/scripts/proto/lbann/models/alexnet.py index f56d7390178..ceef6a2f6c4 100644 --- a/scripts/proto/lbann/models/alexnet.py +++ b/scripts/proto/lbann/models/alexnet.py @@ -1,10 +1,6 @@ import lbann.proto as lp import lbann.modules as lm -# ============================================== -# AlexNet module -# ============================================== - class AlexNet(lm.Module): """AlexNet neural network. @@ -94,56 +90,3 @@ def forward(self, x): x = lp.Dropout(x, keep_prob=0.5, name='{0}_drop7_instance{1}'.format(self.name,self.instance)) return self.fc8(x) - -# ============================================== -# Export prototext -# ============================================== - -if __name__ == '__main__': - - # Options - import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - 'file', - nargs='?', default='model.prototext', type=str, - help='exported prototext file') - parser.add_argument( - '--num-labels', action='store', default=1000, type=int, - help='number of data classes (default: 1000)') - args = parser.parse_args() - - # Construct layer graph. - input = lp.Input() - images = lp.Identity(input) - labels = lp.Identity(input) - preds = AlexNet(args.num_labels)(images) - softmax = lp.Softmax(preds) - ce = lp.CrossEntropy([softmax, labels]) - top1 = lp.CategoricalAccuracy([softmax, labels]) - top5 = lp.TopKCategoricalAccuracy([softmax, labels], k=5) - layers = list(lp.traverse_layer_graph(input)) - - # Setup objective function - weights = set() - for l in layers: - weights.update(l.weights) - l2_reg = lp.L2WeightRegularization(weights=weights, scale=5e-4) - obj = lp.ObjectiveFunction([ce, l2_reg]) - - # Setup model - mini_batch_size = 256 - num_epochs = 100 - metrics = [lp.Metric(top1, name='categorical accuracy', unit='%'), - lp.Metric(top5, name='top-5 categorical accuracy', unit='%')] - callbacks = [lp.CallbackPrint(), - lp.CallbackTimer(), - lp.CallbackDropFixedLearningRate( - drop_epoch=[20,40,60], amt=0.1)] - model = lp.Model(mini_batch_size, num_epochs, - layers=layers, weights=weights, - objective_function=obj, - metrics=metrics, callbacks=callbacks) - - # Export model to file - model.save_proto(args.file) diff --git a/scripts/proto/lbann/models/lenet.py b/scripts/proto/lbann/models/lenet.py index d782770853c..73edfe81a8e 100644 --- a/scripts/proto/lbann/models/lenet.py +++ b/scripts/proto/lbann/models/lenet.py @@ -1,10 +1,6 @@ import lbann.proto as lp import lbann.modules as lm -# ============================================== -# LeNet module -# ============================================== - class LeNet(lm.Module): """LeNet neural network. @@ -60,54 +56,3 @@ def forward(self, x): pool_mode='max', name='{0}_pool2_instance{1}'.format(self.name,self.instance)) return self.fc3(self.fc2(self.fc1(x))) - -# ============================================== -# Export prototext -# ============================================== - -if __name__ == '__main__': - - # Options - import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - 'file', - nargs='?', default='model.prototext', type=str, - help='exported prototext file') - parser.add_argument( - '--num-labels', action='store', default=10, type=int, - help='number of data classes (default: 10)') - args = parser.parse_args() - - # Construct layer graph. - input = lp.Input() - images = lp.Identity(input) - labels = lp.Identity(input) - preds = LeNet(args.num_labels)(images) - softmax = lp.Softmax(preds) - ce = lp.CrossEntropy([softmax, labels]) - top1 = lp.CategoricalAccuracy([softmax, labels]) - top5 = lp.TopKCategoricalAccuracy([softmax, labels], k=5) - layers = list(lp.traverse_layer_graph(input)) - - # Setup objective function - weights = set() - for l in layers: - weights.update(l.weights) - l2_reg = lp.L2WeightRegularization(weights=weights, scale=1e-4) - obj = lp.ObjectiveFunction([ce, l2_reg]) - - # Setup model - mini_batch_size = 256 - num_epochs = 20 - metrics = [lp.Metric(top1, name='categorical accuracy', unit='%'), - lp.Metric(top5, name='top-5 categorical accuracy', unit='%')] - callbacks = [lp.CallbackPrint(), - lp.CallbackTimer()] - model = lp.Model(mini_batch_size, num_epochs, - layers=layers, weights=weights, - objective_function=obj, - metrics=metrics, callbacks=callbacks) - - # Export model to file - model.save_proto(args.file) From 06fc515fa6e31b8613d383a69243fa2153e182f2 Mon Sep 17 00:00:00 2001 From: "Thomas R. Benson" Date: Thu, 14 Mar 2019 12:18:20 -0700 Subject: [PATCH 179/443] Move markdown documentation to sphinx-restructuredtext --- README.md | 6 +- docs/BuildingLBANN.md | 228 ------------ docs/BuildingLBANNWithCMake.md | 351 ------------------ docs/{README.md => README} | 2 + docs/RunningLBANN.md | 76 ---- docs/build_containers.rst | 107 ++++++ docs/build_llnl_idiosyncracies.rst | 23 ++ ...onment.md => build_spack_extra_config.rst} | 15 +- docs/build_with_cmake.rst | 164 ++++++++ docs/build_with_superbuild.rst | 222 +++++++++++ docs/building_lbann.rst | 72 +++- docs/getting_started.rst | 4 - docs/index.rst | 34 +- docs/running_lbann.rst | 96 +++++ 14 files changed, 718 insertions(+), 682 deletions(-) delete mode 100644 docs/BuildingLBANN.md delete mode 100644 docs/BuildingLBANNWithCMake.md rename docs/{README.md => README} (94%) delete mode 100644 docs/RunningLBANN.md create mode 100644 docs/build_containers.rst create mode 100644 docs/build_llnl_idiosyncracies.rst rename docs/{spack_environment.md => build_spack_extra_config.rst} (83%) create mode 100644 docs/build_with_cmake.rst create mode 100644 docs/build_with_superbuild.rst delete mode 100644 docs/getting_started.rst create mode 100644 docs/running_lbann.rst diff --git a/README.md b/README.md index 932abd4d402..df2ba5b3025 100644 --- a/README.md +++ b/README.md @@ -30,10 +30,10 @@ spack install lbann ``` More detailed instructions for installing LBANN with Spack are -included [here](docs/BuildingLBANN.md#top). +included [here](docs/building_lbann.rst). For developers or advanced users, the CMake build system and the -"Superbuild" are documented [here](docs/BuildingLBANNWithCMake.md#top). +"Superbuild" are documented [here](docs/build_with_cmake.rst#top). ## Running LBANN The basic template for running LBANN is @@ -52,7 +52,7 @@ optimized for the case in which one assigns one GPU per MPI the MPI launcher. More details about running LBANN are documented -[here](docs/RunningLBANN.md#top). +[here](docs/running_lbann.rst#top). ## Reporting issues diff --git a/docs/BuildingLBANN.md b/docs/BuildingLBANN.md deleted file mode 100644 index 2e55abcb5ad..00000000000 --- a/docs/BuildingLBANN.md +++ /dev/null @@ -1,228 +0,0 @@ -# Building LBANN -## Download - -LBANN source code can be obtained from the [Github -repo](https://github.com/LLNL/lbann). - -## Dependencies - -The following packages and tools are required to build LBANN. All -packages listed below may be installed using -[Spack](https://github.com/llnl/spack). See -below -for more details on using Spack to build a complete LBANN -environment. - -The following basic tools are **required**. - -+ A C++11-compliant compiler. -+ OpenMP, version 3.0 or newer. -+ An MPI-3.0 implementation. -+ [CEREAL](https://github.com/USCiLab/cereal) is used to handle - complex serialization tasks. -+ [CMake](https://cmake.org), version 3.9 or newer. - -The following LLNL-maintained packages are **required**. - -+ [Hydrogen](https://github.com/llnl/elemental) is a fork of the - [Elemental](https://github.com/elemental/elemental) distributed - dense linear-algebra library and it may be installed via - [Spack](https://github.com/llnl/spack) using the package name - "hydrogen". If CUDA support is enabled in Hydrogen, LBANN will - inherit this support. - -The following third-party packages are **required**. - -+ [CNPY](https://github.com/rogersce/cnpy.git) is used to ingest data - in NumPy format. In principle this should be optional, but at time - of writing, LBANN will not build without it. -+ [OpenCV](https://github.com/opencv/opencv) is used to preprocess - image data. For performance reasons, it is recommend to build OpenCV - with [JPEG-turbo](https://github.com/libjpeg-turbo/libjpeg-turbo) - for JPEG format support. -+ [ProtoBuf](https://github.com/protocolbuffers/protobuf) is used to - express models in a portable format. - -The following LLNL-maintained packages are **optional**. - -+ [Aluminum](https://github.com/llnl/aluminum) is a - communication library optimized for machine learning and interaction - with GPUs. We cannot recommend its use strongly enough. It can be - built using [Spack](https://github.com/llnl/spack). -+ [CONDUIT](https://github.com/llnl/conduit) is used to ingest - structured data produced by scientific simulations. - -The following third-party packages are **optional**. - -+ [CUDA](https://developer.nvidia.com/cuda-toolkit). The development - team currently uses CUDA version 9.2. Building with CUDA support - requires that Hydrogen has been built with CUDA support (see below). - + [cuDNN](https://developer.nvidia.com/cudnn) is required if - building LBANN with CUDA support. It is freely available as a binary - distribution from NVIDIA. -+ [HWLOC](https://www.open-mpi.org/projects/hwloc/). HWLOC enables - LBANN to make certain optimizations based on the hardware - topology. Its use is strongly recommended. -+ NVTX. LBANN supports some improved annotations for NVPROF using - NVTX. NVTX is provided as part of the CUDA toolkit. -+ VTune. LBANN supports some improved annotations for VTune. - - -## Building with [Spack](https://github.com/llnl/spack) - -### Setup Spack and local base tools - -1. Download and install [Spack](https://github.com/llnl/spack). - Additionally setup shell support as discussed - [here](https://spack.readthedocs.io/en/latest/module_file_support.html#id2). - - ```bash - . ${SPACK_ROOT}/share/spack/setup-env.sh - ``` - -2. Setup your compiler and external software environment. For example, - on LLNL\'s LC machines, one might load the following modules: - ```bash - ml gcc/7.3.0 mvapich2/2.3 cuda/10.0.130 # Pascal - ``` - or - ```bash - ml gcc/7.3.1 cuda/9.2.148 spectrum-mpi/rolling-release # Lassen / Sierra - ``` - - + Note to unload unwanted modules you can execute `ml` with - package names prepended with a dash, e.g.: `ml -intel`. To - unload all currently loaded modules, use `ml purge`. - -### Building & Installing LBANN as a user - -Now that spack is setup and installed into your path, it can be used -to install the LBANN executables. This approach is appropriate for -users that just want to train new or existing models using the python -front end. Note that if your model requires custom layers or data -readers you may need to install LBANN as a developer, which would -allow you to modify and recompile the source code. Here are three -easy ways to install LBANN: - -- Building with the latest released versions and GPU support: - ```bash - spack install lbann +gpu +nccl - ml load spack - ``` - -- Building with the head of develop branch for lbann, hydrogen and - aluminum with GPU support: - ```bash - spack install lbann@develop +gpu +nccl ^hydrogen@develop ^aluminum@master - ml load spack - ``` - -- Using the Spack environment method, (e.g. for an x86_64 LLNL LC system with GPU support): - ```bash - cd /spack_environments/users/llnl_lc/x86_64_gpu/ - spack install - spack env loads - source loads - ``` - -Note that there are a number of options for all of these packages and -can be viewed via commands such as `spack info lbann`. To specify the -compiler you can add options such as `%gcc@7.3.0`. For further -information about specifying dependencies like the MPI library please -consult the [Spack documentation](https://spack.readthedocs.io/). - -### Building & Installing LBANN as a developer - -Developers of LBANN will often need to interact with the source code -and/or advanced configuration options for Aluminum, Hydrogen, and -LBANN while the other dependencies remain constant. The Spack -installation instructions below set up a Spack environment with the -remaining dependencies, requiring the developer to build Aluminum, -Hydrogen, and LBANN separately, by whatever means they choose. - -1. Establish a Spack environment and install software dependencies. - Note that there are four environments to pick from along two axes: - - 1. developers or users - 2. x86_64 and ppc64le - - For example if you are a developer and want to build the inside of - the git repo use the following instructions: - ```bash - export LBANN_HOME=/path/to/lbann/git/repo - export LBANN_BUILD_DIR=/path/to/a/build/directory - export LBANN_INSTALL_DIR=/path/to/an/install/directory - cd ${LBANN_BUILD_DIR} - spack env create -d . ${LBANN_HOME}/spack_environments/developer_release__cuda_spack.yaml # where = x86_64 | ppc64le - spack install - spack env loads # Spack creates a file named loads that has all of the correct modules - source loads - unset LIBRARY_PATH - ``` - - + Note that the environments provided here have a set of external - packages and compilers that are installed on an LLNL LC CZ - system. Please update these for your system environment. - Alternatively, you can create baseline versions of the - user-level Spack configuration files and remove the externals - and compilers from the `spack.yaml` file. More details are - provided [here](spack_environment.md). - - + Note that the initial build of all of the standard packages in Spack - will take a while. - - + Note that the Spack module files set the `LIBRARY_PATH` environment - variable. This behavior allows autotools-based builds to pickup the - correct libraries but interferes with the way that CMake sets up - RPATHs. To correctly establish the RPATH, please unset the variable - as noted above, or you can explicitly pass the RPATH fields to CMake - using a command such as: - ```bash - cmake -DCMAKE_INSTALL_RPATH=$(sed 's/:/;/g' <<< "${LIBRARY_PATH}") \ - -DCMAKE_BUILD_RPATH=$(sed 's/:/;/g' <<< "${LIBRARY_PATH}") \ - ... - ``` - -2. Build LBANN locally from source and build Hydrogen and Aluminum - using the superbuild. See - [here](BuildingLBANNWithCMake.md#building-an-entire-ecosystem-with-the-superbuild) - for a list and descriptions of all CMake flags known to LBANN's - "Superbuild" build system. A representative CMake command line - that expects `LBANN_HOME`, `LBANN_BUILD_DIR`, `LBANN_INSTALL_DIR` - environment variables might be: - ```bash - cd ${LBANN_BUILD_DIR} - cmake \ - -G Ninja \ - -D CMAKE_BUILD_TYPE:STRING=Release \ - -D CMAKE_INSTALL_PREFIX:PATH=${LBANN_INSTALL_DIR} \ - \ - -D LBANN_SB_BUILD_ALUMINUM=ON \ - -D ALUMINUM_ENABLE_MPI_CUDA=OFF \ - -D ALUMINUM_ENABLE_NCCL=ON \ - \ - -D LBANN_SB_BUILD_HYDROGEN=ON \ - -D Hydrogen_ENABLE_ALUMINUM=ON \ - -D Hydrogen_ENABLE_CUB=ON \ - -D Hydrogen_ENABLE_CUDA=ON \ - \ - -D LBANN_SB_BUILD_LBANN=ON \ - -D LBANN_DATATYPE:STRING=float \ - -D LBANN_SEQUENTIAL_INITIALIZATION:BOOL=OFF \ - -D LBANN_WITH_ALUMINUM:BOOL=ON \ - -D LBANN_WITH_CONDUIT:BOOL=ON \ - -D LBANN_WITH_CUDA:BOOL=ON \ - -D LBANN_WITH_CUDNN:BOOL=ON \ - -D LBANN_WITH_NCCL:BOOL=ON \ - -D LBANN_WITH_NVPROF:BOOL=ON \ - -D LBANN_WITH_SOFTMAX_CUDA:BOOL=ON \ - -D LBANN_WITH_TOPO_AWARE:BOOL=ON \ - -D LBANN_WITH_TBINF=OFF \ - -D LBANN_WITH_VTUNE:BOOL=OFF \ - ${LBANN_HOME}/superbuild - - ninja - ``` - -The complete documentation for building LBANN directly with CMake can -be found [here](BuildingLBANNWithCMake.md). diff --git a/docs/BuildingLBANNWithCMake.md b/docs/BuildingLBANNWithCMake.md deleted file mode 100644 index edd6c3a81fc..00000000000 --- a/docs/BuildingLBANNWithCMake.md +++ /dev/null @@ -1,351 +0,0 @@ -# Building LBANN with [CMake](https://cmake.org) - -LBANN uses [CMake](https://cmake.org) for its build system and a -version newer than or equal to 3.9.0 is required. LBANN development is -done primarily on UNIX-based platforms. As such, the build is tested -regularly on Linux-based machines, occasionally on OSX, and never on -Windows machines. - -The CMake build system is available to any users or developers who -need a more fine-grained level of control over dependency resolution -and/or features of LBANN. The LBANN team has made an effort to expose -as many knobs as possible through the Spack package but if something -is missing, please [open an -issue](https://github.com/LLNL/lbann/issues/new). - -It is required that LBANN be built out-of-source. That is, CMake must -not be invoked in a directory containing a CMakeLists. - -## LBANN CMake options -The following options are exposed in the CMake build system. - -+ `LBANN_WITH_ALUMINUM` (Default: `OFF`): Use the Aluminum communication - package. This will be set to `ON` automatically if Hydrogen was - built with Aluminum. - -+ `LBANN_WITH_CNPY` (Default: `ON`): Build with support for CNPY for reading - Numpy data. - -+ `LBANN_WITH_CONDUIT` (Default: `OFF`): Build with support for CONDUIT. - -+ `LBANN_WITH_NVPROF` (Default: `OFF`): Build with extra annotations for NVPROF. - -+ `LBANN_WITH_TOPO_AWARE` (Default: `ON`): Use HWLOC for topology-aware choices. - -+ `LBANN_WITH_TBINF` (Default: `ON`): Enable the Tensorboard interace. - -+ `LBANN_WITH_VTUNE` (Default: `OFF`): Build with extra annotations for VTune. - -+ `LBANN_DETERMINISTIC` (Default: `OFF`): Force as much of the code as possible - to be deterministic. This is not a guarantee as certain operations - in third-party libraries cannot be forced into a deterministic mode, - especially for CUDA-enabled builds. - -+ `LBANN_SEQUENTIAL_INITIALIZATION` (Default: `OFF`): Force sequentially - consistent initialization of data structures. - -+ `LBANN_WARNINGS_AS_ERRORS` (Default: `OFF`): Promote compiler - warnings to errors. This should be used by developers - only. Developers are encouraged to build with this `ON` prior to - merging any code into the repository. - -+ `LBANN_USE_PROTOBUF_MODULE` (Default: `OFF`): Search for Protobuf - using CMake's `FindProtobuf.cmake` module instead of the Protobuf - config file. This is useful on platforms with differently - architected compute nodes or when the config method is inexplicably - failing. - -The following variables may also be set: - -+ `LBANN_DATATYPE` (Default: `float`): The datatype to use for - training. Currently this must be `float` or `double`. - -The following variable has been deprecated and removed: - -+ `LBANN_WITH_CUDA`. The "CUDA-ness" of LBANN is now tied 1:1 with the - "CUDA-ness" of Hydrogen. At present, it seems like unnecessary - overhead to support the situation in which Hydrogen has CUDA support - but LBANN doesn't want to use it until a compelling use-case reveals - itself. - -## Controlling dependency resolution -The following variables may be set with CMake to identify dependencies -that are not installed into the "typical" locations that CMake -searches by default. They may be either exported into the environment -used by CMake using whatever mechanisms are allowed by the shell or -passed to CMake as a cache variable -(e.g., `cmake -DPKG_DIR=/path/to/pkg`). -The latter option is recommended. - -+ `Aluminum_DIR` or `ALUMINUM_DIR` or `AL_DIR`: The path to _either_ - the Aluminum installation prefix _or_ the AluminumConfig.cmake - file. If Hydrogen has not been built with Aluminum support, set - `LBANN_WITH_ALUMINUM=ON` to enable Aluminum support. -+ `CEREAL_DIR`: The path to _either_ the CEREAL installation prefix - _or_ the cereal-config.cmake file. -+ `CNPY_DIR`: The path to the CNPY installation prefix. Must set - `LBANN_WITH_CNPY=ON` to enable CNPY support. -+ `CONDUIT_DIR` or `CONDUIT_DIR`: The path to _either_ the - CONDUIT installation prefix _or_ the conduit.cmake file. Must set - `LBANN_WITH_CONDUIT=ON` to enable CONDUIT support. - + `HDF5_DIR`: The path to _either_ the HDF5 installation prefix _or_ - the hdf5_config.cmake file. There is a known issue with CONDUIT - that it may link to HDF5 but not properly export that dependency. -+ `HWLOC_DIR`: The path to the HWLOC installation prefix. Must set - `LBANN_WITH_HWLOC=ON` to enable HWLOC support. -+ `Hydrogen_DIR` or `HYDROGEN_DIR`: The path to _either_ the Hydrogen - installation prefix _or_ the HydrogenConfig.cmake file. -+ `NVTX_DIR`: The path the the prefix of NVTX. This should not be used - except in circumstances in which one might want to link to a - different NVTX installation than the CUDA toolkit. Under normal - circumstances, if CUDA was found without issue, NVTX should be as - well. -+ `OpenCV_DIR` or `OPENCV_DIR`: The path to _either_ the OpenCV - installation prefix _or_ the OpenCVConfig.cmake file. -+ `Protobuf_DIR` or `PROTOBUF_DIR`: The path to _either_ the Protobuf - installation prefix _or_ the protobuf-config.cmake file. -+ `VTUNE_DIR`: The path to the prefix of the VTune (or Intel compiler - suite) installation. - -Compilers, include CUDA compilers, are found using the default CMake -mechanisms, as are OpenMP and MPI. Thus, the process of finding these -tools can be manipulated using the usual CMake mechanisms and/or cache -variables as [documented by CMake](https://cmake.org/documentation). - -Except where otherwise noted, this list attempts to address the first -level of dependencies of LBANN, that is, those that are one edge away -in the DAG. If deeper dependency issues appear, please consult the -documentation of the packages that are causing the issues as they may -require additional CMake/environment flags to be set before properly -resolving. - -## Example CMake invocation -A sample CMake build for LBANN might look like the following. -```bash -cmake \ - -D LBANN_WITH_CUDA:BOOL=ON \ - -D LBANN_WITH_NVPROF:BOOL=ON \ - -D LBANN_DATATYPE:STRING=float \ - -D Hydrogen_DIR:PATH=/path/to/hydrogen \ - -D HWLOC_DIR:PATH=/path/to/hwloc \ - /path/to/lbann -``` - -# Building an entire ecosystem with the "Superbuild" - -__WARNING__: This is primarily for developer convenience and is not -meant to be robust to all possible use-cases for LBANN. - -LBANN includes CMake `ExternalProject` definitions for a large portion -of its dependency graph. The following dependencies are -supported. These are one or two edges from LBANN in the -dependency DAG. - -+ Aluminum -+ CEREAL -+ CNPY -+ CONDUIT -+ [CUB](https://github.com/nvlabs/cub). This is used by Hydrogen for - efficiently managing GPU memory. -+ [HDF5](https://www.hdfgroup.org/solutions/hdf5). This is a - dependency of CONDUIT. -+ Hydrogen -+ [JPEG-turbo](https://github.com/libjpeg-turbo/libjpeg-turbo). This - is a dependency of OpenCV. -+ [OpenBLAS](https://github.com/xianyi/OpenBLAS.git). This is an - optional dependency of Hydrogen. It is recommended if your system - does not have a system-optimized BLAS distribution (e.g., Intel's MKL). -+ OpenCV -+ Protobuf - -The following dependencies are known to exist but for some reason or -another are not supported by the superbuild framework. - -+ cuDNN is a freely available binary package available from NVIDIA. -+ NCCL is a freely available binary package available from - NVIDIA. Inspired users may also build it from source from its - [github repository](https://github.com/nvidia/nccl). -+ HWLOC is often installed by default, especially on large - supercomputers. Certain components may require superuser access to - configure, but these features are not used by LBANN. If it is not - available, ask the system administrators, consult the package - manager, install using Spack, or build from - [source](https://www.open-mpi.org/projects/hwloc). - -The superbuild system is itself a CMake project rooted in -`$LBANN_HOME/superbuild` (distinct from the LBANN CMake project rooted -in `$LBANN_HOME`). Options that control the superbuild system are -prefixed with `LBANN_SB_`; other options that appear in a CMake -invocation for the superbuild are either interpreted on a sub-project -basis or forwarded to certain sub-projects. - -## Choosing packages to build in the Superbuild -The superbuild system is _constructive_ or _additive_; that is, it -will only build the packages that it is asked to build. Any required -package that is not requested is assumed to exist on the system by the -time it is needed by whichever package requires it. For example, if -HDF5 is provided by the system administrators on a system, it does not -need to be built and CONDUIT can be built by pointing its build to the -system HDF5. - -Packages are included in a superbuild by passing -`LBANN_SB_BUILD_` options to CMake _for each package_ that it -should build, including LBANN itself. E.g., -```bash -cmake \ - -DLBANN_SB_BUILD_ALUMINUM=ON \ - -DLBANN_SB_BUILD_HYDROGEN=ON \ - -DLBANN_SB_BUILD_LBANN=ON \ - /path/to/lbann/superbuild -``` -will invoke the superbuild to build Aluminum, Hydrogen, and LBANN -_only_. Acceptable values for `` are `ALUMINUM`, `CEREAL`, -`CNPY`, `CONDUIT`, `CUB`, `HDF5`, `HYDROGEN`, `JPEG_TURBO`, `OPENCV`, -`PROTOBUF` and `LBANN`. - -### Forwarding options to sub-projects -The subprojects are largely pre-configured to "do the right thing" for -building LBANN. However, there are some variables that users of the -superbuild system may need to control. These are exposed as regular -CMake options in the individual projects' CMakeLists and can be viewed -by running, e.g., - -```bash -cmake -L superbuild//CMakeLists.txt -``` - -Several significant CMake flags are automatically forwarded from the -superbuild CMake to subprojects. These are generally "typical" CMake -flags (but not all; if something is missing, open please -[an issue](https://github.com/llnl/lbann/issues)). Some examples are - -+ `CMAKE_INSTALL_PREFIX` -+ `CMAKE_BUILD_TYPE` -+ `CMAKE__COMPILER` -+ `CMAKE__FLAGS` - -To accommodate developers working on edge-cases with these -dependencies, any flag may be forwarded to any CMake-built package -using the following syntax: -`LBANN_SB_FWD__

=x{!|L*#VM6P6oRTa7=X%MBvT^0uvGccO3{jk)?vXi8n}hjQ)IA1SmBBpl z7p}4hiSCh5PCk4;1ez=Z%XBHqDs}pcAflq(+jDcQnRUOVw2w`&n)^YtXZr&y9CkQu zeRttX&W2#L{UjXW#(cHqyT+vWmqQfz$y7l8N_;?MJHICrSKk%&uXCP~odI0we7f2( zd}UdPQ{5ydI9)-q0|v7$;VptmA*G)FLDsGjU+}s9J!_7i%GS-(i!+yGNxa5lv%W5A zH6qZX@Ja{rf|y%HZCqchUo_OQ#i?>)hD%Iwd%^?5$|dHQ!p_^_EZmKrQ#5kN$*Jm^ zV38DKzMZN7pe>G~RCSX!HFFpTN9{ zyRm##hIF319ZKhLFRly6-D6lTLyW9GO|yzOx@wP z@ykmqIR#rRYu#aIG$`yedE_o1X54rf&nuQ(azljb`qxMqaCKMhIw{-HxX0*uS?Mlx z&ohR^Orj)5!*43ui$=aKO9HiplK(=7$w+z0~A@PAhiRD)F&k}N7Q(B(hBCvR$?o{9B-pp!naElb`H zd%z>8c8<=(HxW{+E!j!RnP2Kzt*6&VsZzUHQ#zIrXN)VGevsf~0}kX&2hpxDl5gB< z$_GzFY8#P$y(Ymor9Q(VxwuK0&mE37GdD+II~cEzIkAAHDdO;IDh-~__ijEvCHMWd zwLIOh#2wf`3c^c@g7VAu-b5#Zknfv{78VdYU`7v^dhUexw5+B@dGw-EQ>9^6ZEelg ztt*t_tryAbh(?g(C1AKdI{!sYL8DNv@>Zgnc5_@a*nO6Aj zVXvtAA1m7<$iaC=^f(A|$OxGgAP3d&yhiv-mwgJ)E?`x)jP3&hVSU z)GxL9O-DMQmX(J;nE$31O>Y7H-P@-rK)AXB`r~{97TA1TSOP^wVh_)iU3&mrey`x8>)NcH%fGnLq!h6o*u(_7 zQ~LvtLUdy>{I>WeDT-F zSnbfgHeKhk=-hVj%Vtd!0lwmzNe7325!?kM)I#BA4X2!}b15JQYxxFW@~ygI8|-bE z5wjDUMQ_FcqT0lW&%J#MMI|h(DY5M(9d<1Ia)nU4zG8fb#I(arH0Xyz53|b9@xj$^ zcuS2h4WSZCm!y-&8xV;L%(3R04y{$qH$q}@TgAkIG_#e3+S|`m0hi9*f&+69f5)$g zZKp-Em^0**WeH*25+^II_nP$f4;E{sD9?8iL^Ny}pX-q;#e;{`q5?;Iz@Vbe;zw-u zvkyVEN_qqhbG!i>1T=>P-1eid7;OZ6RE^NM)pWdL73Nq|%4U*2_f5Kn-D>_VS?fEMQ1p&x@DwB5^KYpRa76Aba>?UT8&?`RNww z3(7{()FtAMu{;SL7I*?|!!3f>BzBR`EMpY+gXhG-?-ig~3>}?<6*sW4%Q({{^iu#6 z{5So798Vp2I?)WG0is&Fh%@h1gb;TM#U*hJkV5D6@u+n^1J!&dY>)($%)(f_UEq>n zT}Z)^QMXH^$Wws@Z^oLKT-BK=9_=v^Bn3_%=`3E^dvWXQLEeHmq@@t8Dg%_CWmJKd z4zX*LsAj?hkNwIi#~LE7gx!d16SJ}9ydyzME>i!vnHu$Mc$z_6BhguIoVxQ_v?97h z*S)2OwL+p)_JMKLhQMo&m)UGd4rqVeV%P_z+0e`41cvcn-PcA9j1=v5YHK?;_Oh#x zYFIs#{!X3Tkdc`Ka%NC8{aNWTTXnFS1zgj{*>S`qgiR>2Y+mMR#=8x?aXrIch^Qn! zI_6>I9#OR67smv!%6W(**;fwwUx zanRTDadNLi*Ry|wuawOpD-eyLUI{X4+)mWehSqjlFp*zVzuW4x!)hegG^t#l1x9ee21#%HoS8EV%>D4U zCYbEVSh3()i$N~hIiu)j(KCUK9qd9i2-X zBS}fnE-MR%2KR~uDUVT?{Rdssp8Nio-d#<}t4@>e32t_}UhXosATz@tT8iC`W_iaw z!zd(rZ0tAXvgsqX(L5G-N<6EadQL9V2HtVx(U(>~*@4t;zVqM!(yD zzWBUs%ZWowFlXbKBeG_P+L$Ms77cMev{muk?FCZD&59uK9a#4ljyGZ%#E{tMdnzkS z60P^HUs-P5El%9M1IlAhXpB>;==xp~OxW5rJ%2+p@cu&iZeuOf&ZD?~wc%vf!i9c3 zRKRu2B~T)6?(x{MEG&#Yy-PD$%e|s(w)pld(ikW3$J5V^z`TuMZQuniPoM|tP=+?? zg^;N_bJklajotpMd1#jM>K*)2D8O()rg3Uv>1^g4K*>-z72lzj^{Cm)I$mqTIyTr*!SQbHAg@dvopN)DOQ&SL|V1Z?aK0mjMbsrCI*G zmAxennW(jscbJOwoy*8|TCM`=%Za_wK{?{rL`*Og5p;#jH@k=<+UGJ`n0NMeMSqOy z58l<#_HU>YFKga`d*T2of4-?1rjA*j#?C+Y9nXN2cvBe#f1)+@+;|)|R4q2kRGsj& z|5Xdy)2EE{seZU0%_1YBUrWEiU)zlaT8MpVYiapZXv<1)-Dj|TF=ENS3}l(f?R4{v z@09eODmbr!zg;T4Smb#iS$zc08U>F0ktH%kzb;W?Hw{Bi0vpL^0JFA@>>Fd$-l&Xk z(>;S3FWg~`oAw){%nOza@q2uOx=JIt|W1S%XdTJ{$~ zxr|TlQgWmM>T!~qnBSErP4;<_%y3&iNPra6Q_Xc6QtV z)1wkqXpk5K1Pp-OkA;E!p&FlWLSX18>?+v~^aK7IGAO84KFng1c0J|<3XEFF8Akb- zGPMgFka#PfBJ+WB4FV&9ce31yPicUhD{?*e$IoF-$JGblfX})RwVyAdbu2e(m$3IY1C=Y|;7*ByYXPnb`TbNmV@YZ(5jF=U zGXE+QkIqN$J&F|~sCFR0Ehz;E>-$UlIAF<+pXx7=QyWvVbV*yXRBW%|fQ#bV`Cy{n zLCO_)=2o4sEcL}n3H*u2PA(RtPEV6HZ(d%A>IHkq4T272%6GA6akz7H~8()7n;6TI#=nrUAs+O2fat&A}uw0$= z!cfZ{)6Pa{IXFC8Hb>4|SC22U&S>8H${%c(Ib~wX4+D{7L|-B-`*fc&z9*QIWIH!q z`X`BivQu>LrBeUwBv_g3h3c0T8PQu@GASrqLCCBwonuU!!&{d}_dW^kh4VgVyK_bR z)2J{bslT3?{^qE+a0^Xcm}679?LpT$E0*v>#^wavsD5l8?}*F9-jD3h^?~f%C#W4T z{i7)iQzYaxmg@M$ZotM~k z@cul3yz#aaw;Y0~pGL8^X54r!0DpK5U8-X&FZ^@2F!kWHWS;*Cf1Od5X?7;hWj`NTlI zS!Na=eM0A~E&@Wz-_whOr}6DieyhRoKy+G+N6o2+vxDwv(a>IlvQ6Tjnp}IKf)*t8 zPxTwwpb8h6@*VkfsqFzh|MOgnSpK=yNKP~}^-`~lTA_HgIT z&tzy|!S5kWHG}=Q9lL(DSztJvz9rcQEj6L5AFbn_698{3;inph_dl?vi`$N`T0bed z!sR&=TCr1xj+jofxvG=z9D#l5l8}XR>n&aTHnMHO2%?g+wCHyWka_OosHU`WTqc2Q zmCScQJXih(&{bvLpgXsb75s6>k(p0kvoW^p;mk7>fzoK-^+7GZbZ--3W#I(#gX#lD zLfq!4|HE}pr)P9><0kwPBsVY_FlXbcvvfiIQrmx#00Yr1ZaRKk9BI=yI2u!m<`6cB zi8}%^3rX((QBv6JQh9Dpwv+Sp;0?vR+HS{{K*PfG*;Ei zy+@YkC8%(24I3YMXaFV`ak(uL#mfD{Y$H)FpMdy;fMRLsa9(#tOL;mf@lYfI4v(IO zvT8b%{~rRVfVuSEbO4a3%f`2?dEFdFmvK(ZdbH+tKQC0+qYQst;>RI zv*77gG^)MvUgF5DVo%(D;&L7a0qtP4`0tuwJSLphjKhK$T#j?To|3O7o&hGd670H- z*XNdcY90GE)c~q^$M(vo9=%0!b8@jXDDTwL1IB~dIG|6@EH&Cs`Xtpd6sJ}Lk#pZ_ zf1K^|AcXD(uj-1F*AmYqpVx@>xM32$zzNxerYd!(8*e-BHoV;k3e@SU@ztZG*oe)w zxr}FL_rf%1B)p<6lIBYr;Caxz{yJZ_>W43Tiur5(r-|pq;5)pS=Q(bXjGCIpu>YnB zysEXmYvt@~5meI#eptW2m-t&nXnEn7Go%7f2AKgyKf%Q017h#Pfa#{|DzJXaQbiXS z4hfwXon1RT4F)1PSq0qCP~;}eVe-Vgy0oFoC;QtCKPOLB* zL+7Cz(31cYD2`Vzu;5>Ui2ZJ`y~A{5mJGeYMh@^%K6VlYH&F2GIp#|Xynhi`9$5GT zCYZEzVC^C$X2|4ANuisWIS$rXtCo0bM_Op!n(eO_ba)a881MJt5!yM$kJzUW(fg;C zmF~+0bL*iojrjEgMAAySE5%fuAOUCOJXv%NZqz{6Wk^f4ORpDoV0J{y-NR#o_~w%o zR))uX)2rv*xCNGec$MC|vf779D_YUh_cVbhyivf#I*#?q+!S%q#MQZkr^>ZVr4W6M z%H-py^Y&Gs_09<@QupYA>_`fNZ|RLp4hi9lE?RrW@d=nUX8{&J;E51*K0J!VUefd_ znt+0r1gi~{M61!*?Y_5od=eipS~_BhyR<377!+>f%obbXasXy-eeSYXckw3ntxpkh z!XRT?(7|GF75f)Kl)h*#?HD`TsDWY`*6p_XBlQ)x(=$oG!*+6R{u}oI1ro~SXQHVU z*Q)4l7vFtbbd5l@XOKZ?&;eVGNJQU`o7QyisA_)t;w5gAJ2H@FA-~u=tUyVuF}tcc zv1vzt6{~9?RjEJ52)&+3()EdG$ojEEkELT^3v~P-(^H@q#Xu)3)Gvm+`FO+Y1us9p zm9Vk#AVp5?@;rC&er#8%FC}GG7u!^J_QLYGYS|PpIpSg7iWpP3ZJ1S78BL@WS8u!>5pt8q zb9tYlIA;Xm+{)-y)}L#*kLJ@ft3osmIXh}|Ipw_2q`%8^Gdh$Y7=`^hRtnaHkIP&u zX`PAUm-fzkXpg3IAw@qbw{81IE+|Oqsv&#ynOm+I#i}|jwwxEEE`5CZ@CcHOJEVud znNt@wiGyJYb4lp^I`gnKx20r={ z3O5p6Gp%KGVwMpKi!1?=_dSN)J^#Q9QvDjDBJwECP7eR^}l-kqs6_N$1$e>PlOdapRh;NN`#)Y znHO&f0kIFlpN+%g^^NmD@8UWO45Hk5kEGA&v}3b*|LD}y^TcJ~jwsJp5$A|<3yTV1 zri6;v&?DUh(8C7!29~MMz(vuE*VGBdAnD_E>?yiij8J2=$~RMH3Xus|bPnxrL|SjX z&BOP+I45Jr9O;p}K{?gne-)vae!Jh!{^f(nMEw}nC82JZdREQpkomCiJo=Hzu8E@s z!n6ld+GhrR6x=>dRa>b_7P7JWTF>#7%ja^|Nz(Cx{kwks`n^XMTCcL|g-Y|r|02k< z4s~{{b3U5aPdrmd)Hz!wCR-Yr^ss^H1C6@{P3Z$x?_b=TX9Saidqa1w}^qcCl2sID`OcNeNP;&AH64cNKEG&+PN&E4+F{7+C~K_ z>~V?YeJDQfEtfDkWWekCj^5NOh9Ah9EH(p9EJ7cgi=<1J+pZu%R>e}ik=lgKloifX~OCuq18uw@3;iW5udgSv(m)!LS88gJ|QKE3q zF&@u}-Q!2Qy=LND66$}}3xzT~UL=b#jYb|$M)<{B>AIPh4KR$rz^*QoC}zm0(b`O$ ziow*!Ie@P(eS8x>>!A}>T3Wh42}pw(p`Y6!xk!Ka`F((xSu7C4`lS`{!(aAvhRNU{ zefruy*SAv^Xg9b`e&h|jP zyiss);xj3^mb`r07iZv%)aYqGaRjX}8DQ6C??!Agh^ZMivVJ?cr$*gO+jegxsV_)C zXf)3kk3mGI@mM`k;{kq@1KfC_IKa+jvDm1KSSxM=VwT4tRJF)Fe`k3|M2+`*N{m>< ziL|u+TN&9j*a?TPC}q6eNyDN1=;5Hv zbbFwUv`T3(_9m~u=OAz9xt|v-UP(*}5ndfK$`-L8=`Td5QQ53FKUp^^9Q0gxFwf7x zq&MzOl5nLeewy3txL@{l5_0Cd+ZPY;RxXYzIW%G6jiI@%I>w9e;@j(_5)^o`ts`Z% z>ZXbBCs`L$Y~2v9!I68*rW{eeasFg=ASb=B%7!p1|MW^ii(V(9;h$SAy5qccRJfK} zjN0zz%dAjFvl17ysjksTV84WqeAn#xIu5K+rXTczd{pMEf4t5&s^)`kQwpnvuIZKmB9Ve`Q-+-eQy9oC%x=^6 z>W$pKc-c$Q!5oci?O-Q_+oXM*0-RH|UVLHd#Tyo=tM^>s%3L#{-iUT5R(t(IyQPtU zBU|*!>cu{pS8d3GsTQLA`^H@YO>%)wQRu07PhT8jHbR#l<~CFUgVfOJ)mJCjy!?=0 z=F3o`JBMK&i`Wdfecuj9JbmirtwbA^PF5_rJ|GuX!b(^O85vqhyTx`fj@2I8hH}0t zb9~mAzRS}CWN_w_EMsIVVmKWN2yR?y>9TOqD6^neFSB#9g*OjOr+cnQYN@V`;ss!( z)tEcZ0vv`P!)($TpgSi;9X?JKGE~s(q7ACg-W=Hu%#a)w5PoPc{-8uO#gpnw96g5n z;$vYK(>*bdXEYydK+sz;R2W(cHg{e@l%kK`8AGLGrTmLdq4PG&hd+k05r)IlCdElS z6grb7SMSkqx)9~lTNP5esi*HQ^r=BXlqKz{%Jir9T~-Yj4)pjOoJrMXd_CcgAHFLL zdrt%T4#u5z$*_?Z012Cm4O-Q?dT?`U<%h?817#TNg>cxk!BA zRt|kD9T@e;@aicn`(ue=O2fMhY}!U7W11m3(Ncls?|iR z)IwC4HzGPHK>T0cTGR8BUQ0bPP?xFGjWo61F~Ibfohrp<_Qh+i@5sK`o-4KfVT{@x z^;&@(zpiRZ?0vYg07$h#7 zW#ZTMU-r@vE0@$(A|0+wr_>3V-qivDF+z(S_7PS2q*;$`J;=SuD6tufD9h&3jAK#s!i7ZAKaj=Ey?yUT z77S#sM{HS=FSgd3fIiuU&&5$Xp%^{Cv2V%00nvp|bqe7P4_{OPq4NhFd!IK>%1|b5 zATIxE3lZ;$L3(SKQBsGM{6x0~Hxm=cLc$)#>_nY4v;kWhV-#_S_QSl9$TLWvWJw@e zcN&;HTKE2ofVRTijX9CexhoD7M!20gzKO2B=jS~y~TmI89uDd-^6O(ml88TI(M(u6DPxL{2&GB z(y^RkUgEx@4e8!1OwK3O9qdx$ni9>WYCn1NcP8mcQzxGGA%@Ej^bcW#=~EG z=t*TlUa7XG^ zuC%Pd%<=ddy_?zBded5H@)KsW%ou-h0>;pnX72>k$5YaFks3p;CnV1_lf_TrbpT&V zz)2;1KphLXII35Z%C6ij$@$=VA6+H6)bWBv^Ko>OL!x8bNzS97`*W8<9F5KTpriZuYQ{bd+<)c}lN^I2BojIo+Du*Y`7-o(oOW00CRU%6Oa(64`-PXIK-|n>8xXmxn(5mCyP2q-B&!fpxj}}7=C`pPl62@*npFVuJQ<`&k zzMc5q`Nj?0KIwgcy(X*NBCNFDx##O2?s;^O zpjg}DmVVFa1MZ8y(|ZW>dQ6BYd%~^#Wd-rno~;90koXysx118wyJtUbwN2$oO z1Of5$pIC*;gm*eTDLc%QL+#PByzHjU}Fj2lfMf>GB{cH2|=`&;X7{zU-N5N?8fpBfxILPk$*M={#xpLhU$g0EaKz8JNt<}Zzw zE2Zow@IseeF#{_$CrbbF)Oqrj>r&hJM+0zib3ynRoE^WK;I#A#is~+xaLsO}-im-u z6i(e)sy8{73&@4OJppP9=+rJJW%*e_X=p0aL5QZYVw9QL`p^}ck!uK(STA`3bfU!L z*bSVC6~g-7bLcq=GT{lo?REIdy3VdIg^cPnzxH7MxZ)NwtJa%l>qo-f`5FH-zBd8; zMAw6r4(UmeN<0UD4$g+jZOG7yHCXaD%Fc_2vDV*rX0OC2m`Y4F65sgf#D-u>^Ci*e zPG!9;Q7o&Yu{VrftHNb1NyVyiBm@CDCShRl-v8Dd6R^Z|OIC?QZrNyKziu<<#zfl& zQ6TWvfY-TnYB9;BD|=gaJ?0NUKF|IVj$fV7w}eo*v*EM!8watsyaa4@^| zA&+EBYyE(1ZXh=y>*f0Yu=kcxQMP;EFbDzyf~1syQo<0@4Fb~AF*G9G-7$)SN(u-_ zgLF#_BRO=3Gz{I{4FkR>`+4^LTwZ%$*Iw&h@4MFXu62Jn@sl5B;5d)p|5tzgJT$m< z=8uX|4&)v7oR9^J?_Ui*_*i!w)bD%}Z=xW1iGL&}64Jat_i_ z#A{PZF&8|^RJg{A@_yYPM^>f76;+?mS|$;Ii_0=oO_vWc37;AlGdd`^PUNU`oJr$g za)5jgoEiEqqFEK=wbWd7#-3^bx6w7VY&1Xoj!M2RPd7&oW?&T;RaS&%`g(x(&bS4d z4`J~euF!9FHA^Z(+E*Rg*^dudw`s3<>@Ennnvr=-fq1vySU!)bX`xq=oRXwmiD`Tb zLFE=`#Ym|qrA2@VP2|!N>Rwmd#-nQ!rJ$S8Z3&b4Hq(s^3L-%vVsP! zn$5tRROM0TkKQT|JBnAj)hf=U8Y5`st1N{*p9%~FK%1?`^O>3`cEI#WFqfb4E3BSfl5z|Dbgj2@HfpoJ1X@txr zyGZA2C-5FJ<*1P7N)Cw-)RkSha0PDEdJIR%_%4l=`x$Je;*IHkWoOM{k#DCoX%{^T z13%2nNy+m{lMmcUPo1gbB#-8IQmtCKy8V3c9A7mltb##zN@(fO6^Ck&nAJ33M(wP` z2)>~~?QrK$qO3=9ffSsYq|mk3e!h*EU&_dbQle!mAJW?;K1%y&&-nHugUNGoKky*h z1A!&lajEX1vOOJKRirF1d#27`p6uGR9MN8HgAdw1xS$kC$?Tat^H>TqtpX(xz1L)J z2Lqjqhn$cr_XZhv=0j=mEwf)J>J7yKZuL)&R1fP0ZE|aqClc3b_ zJPD=&OUzGU&z|&rG?5C-ZUpw=Jl_>qR~B+Fh=)0szNyo-8^mVZW@($5fu5QQId+PG)qvjqj^_roy6GcN>mc)`VPTQU4$92)0 z@{KW1UYj(s4G3=R#cbN99u*Vz*$piaVu`#ilcrFtq$kJ3dDpxqHae`ru0{8dTvY4y z$QB-zW#vjgj1*OQB`UC`D?|#thCaHYggh?VGp{g#dn|tKaicODdl=MLoEGi}Hnka) zwxyZOl6&x`pbhU3)4@40rjfga#p^>)ERE32a5!N7jCyxmlpCbEz+Vqc*Nfn5dAA{z zmGcyTaAo4E2|vO4_eE?uviK(!s8g|i&nKby@51aR@%p-OaE}Ay`DzB zz)og-xP`573av3dAz|@56ZHu>qwba$ol-(}=WtXXKn~Fflua6eA#8eTGnUbdg*$xe_o8@5pb* zFRgbI#~l(tVwzHX%gP&K?Z&`z+W9r%kX6igfNF)k=GLDQ$ zJY6Yf3yW$z>Xg&`^bCh{oy!Mr`4nYLCUy}NTJPddsWz+>e(Z$v9_LvseW=Gw)0;=< zO(cbRo7#y=?_scER=xw=iZbMfk#W)V%}e&&QYBA}jJspG&JQH0m^`Df4AN%n*8g+lZz7fn_nmz2o;f;GGH0?)h+$edjKM?5xEJ zb=`}^Sbn6a9}dI_UfZX8@4ZSfr|*fz8%-F{W<|yJF>~KC3hkjT77O2*N&>u=wYmB5 z(JS#S<3myPkb~{Xpewmp^mpy|vA)O$(wYXQuLXfN+U!B{mXM`dHXJ*9;s^4McdIbQ zIa5}M-5fBL&vQj8;;jQnff%aryOyca6F0cV;CcyNOO%zXVHd=tV0LD&%IrsEcLb00 z)>RXg?NP+L^J>uwL$OcSX}7p9HVsH?F#Cn=UZpggiKjS^w_Guf!QJ8F8wtu(IYA_= zqr3nrT$jQ1!ks?&s%I4%+ z?n|wZC8RVD0rL5Nc@L{q@uRh`*oiL6AY)c0LGIJKp6E+}|H4a3Wj3cW%78bc!avG4Sviob?4qed6P$6A+q*o}JB?}@*c=obj z;*~f8r)e)FKK^O5hD&|ySkhJg4W-}vbzOeQW=G~2U-cqcrEcZJunnt5=9V2{DtA!X}R;RnPRvBvqYhq{oeH)YuA7xV@(Ds{XM^8?K272!Ri7g}}6H zhPe8d@z*zLrR)7+90%WgVu{s^v#Af7kdn7m4(}mbmym15xb*zrg$Ho6v-s6Aq#4fV zb@AZtN5Xz%DVY%fA1(2TC0TWz*4>Q1L4Bwp?Np)jl3l7n>U)svz*hglfZv!y z5<`&@!G1tGLzwY>6I$z@Rf&gRH#I~(cBVR%zx+ZepNU;5%}tJn?A|;+@#nt*rfa(K zyi&mtLSR2yD@Ac!b-|qOwD;zia|^Lqoj%z5Q<&XFry#b^wUP(vUqSF{n+5-54g1={ zom^$O=+$LHcN$A|v9z-hi@toF^Mbx_yb8N9l(np4p6fyfCG40!YA+gu_qPbVIRBOA zeAY5R-d#=C-YHJ?Y<}Z4n{h~7SULK8?iX_`hk0|1PoFWL@X-mIG)HaDA8sXqFQ2g zCm(&+5c#*LyF!Ivd)zmG(Evz~a3mDt$nDDZMQ`LyEp$Ob*%o3>VCa>>?1?VAa> z#NpeyRQuj^Wd*sh?dp;6OqlX6%haB=@v^|eu&tr^eCd&>Pl2(4p#()Jh|`tZ68q5G z3T3zNU1THC<;ecO$OT%w!q;M>rZ9M&zH#LWohfr9y$3GmyYumQW&V_62we)BoQQaF zic7z@S*5S;PKgo1D&ZZ<#<(qho#2+Lz3;9oqTdXqAt}A zl(VLi9B7;5gQ5|DOA&>Xv}wxA-x`2jKmRwy0Arca6305}MPc$PZz2C)P6?=!M(&uxA@y%EfySgY-7>5REd=isPt#X{2%?`fAoX@(GUJ_ z)DN}_DHRTI^{QY6*89!fad(K;NEeNtaduLtaTr}7w{2B7i6)BleDrI%-?9vVWGF|C|*rO$=W9@R3+e zSL~^o3bxJmHZhK%+-v7|l}l*`izUf|tC`czU=QBBQtqLf4$A_hc;d`bgl+9Z#d!Q9 z1$3NB&b{Dyw4eJd4Kew;DUjwf($=ZxN3%^r-SYADj|#a{i*)V~A9rqz)jy0A)$^yE zg7E<#(^EwQQh53g5<8n@X|e+6G>YXOW*7y*(-`#U9d{)Lu))J22<_LS6H8?kI(`-?FuwK(^T5T|qQ;--O8g3M;0h!@d82+oK657QzY7ssD*bS(R2;As z02$^=7F5Cp^|IPm_M{NCgQF{5RIL4d#)imUUSrUr8hITfG1Ie$Z<;Lg&M~ZJ$!7Od zqS)skTzN1uJlD?q!=ggiTiz~XL!RIb1Ur!OxVnFW6e;m>H=Es(>1ox|j>#8|MRML6(risT{#JI$y)rJuYxu%{>*q%M$Xf?i9 zs6=M2*w^^Iv)4B4+F5CyM5qK`%lf#gOl6?v`FdXW)ZY1{AYw3n-rS`AIpT7zH67<( zv8kbGj^yjTRn!SVdf$2Mg$#)?d-jm)sbk~Fj4zM-Tz`D~VM_vi*BXPcS%*~I4+G^q zl8QxeucE=vyI7n8>`-+)oXWycI7qVzER&i8+9|$sOfwM%oLESxq$C?_buePjDie%XJvK`llxAYf92ea~fi{ zE8$UyanX|Vh@%%ld-`(LZtGb(D_>rsCx166^129?dn6(Db+v|KsZO6axHxAfJ0h2B z^Az{x1O1m$7fZYEXb#K{lHCS)Bk%}PG;=9SYH!lg=DV_D;tHk8zNMF4shs-5rueQ4 zyN!h%mCh;4&R26wEQ+=*0I4wRE)bWd0|eavsv=hZ^NOEq{15PHI-t*ld#VX?m+JRi zvki9IpK6l~zp})=MnR)b9X&#-++5J?m-Z#={$n5sl~#|`8D>a*N;hIptt-LVUXgFv zL_2Sp{6Kczno@5l4xt}6vfWmv?yat1X_)s5Wrj%rGpeKB4;U9x$1ER$wvE2!PjA+^ z$8gQqkbdV1`yOYmj^P{=vnji#9I0NGcu=%hU*(%^*oWPn-FN~@UW~6uDYd`T3za5O z@Hc%4WoPH56nJR{`oyG#PNSc6M~<&(G!)7 zUB1(nfBtn9)ibl=mn%vhO47n*^fB1bJ%y^ z;bg}oVLGzS=gX&LWLrC0cLln1C$Aez(oEv?M4|6jt<38!Kro^?C9b9GaTo5r5V%1Q zJY=u97`hI%^}4$nQ^8?J)yDCa>ph;1N9P)eZHv!Jd#=5+ziXlOLv3e9`SS3e8JmiG zuVvA4?z(9;k5?qm?AW_|)+p&-@>sLD&ZMxB^qzaRDlRfBHP%-JQL^0)sYwX)N9z=% z6Mc^I$~4B1*L5tDg=;x=S%ICs7qH~@elD6S#v3|ZS&OrmFf5BA%@9#+?7%KT*_hA< z)_o+W?>#ih8EdlzVY!_r3K%WKCOLCYcX1AsdC=qAR7ewNGckFb5P9CZ`=0+dQTP9c z795uB{`}m@isI3eO59KN?<-hh4c^AYGUEK0G|XEnh)D*zF27#xMo}q&Hmg7sT`Sy4 z?@`3h&uT3GIvApSfgtT1P7qI%uYPVZw?z zEJe3T&AVB0PfJoxEvv8mODBKsSo^IYa(^bzYow3Zm6#`je0#jj-Yl9$Xqf^-jul$R zl59oXDGpbkl)L?-m5L^iLf%AIi1w!+f(u^)g*cT}(wQlR3G*VZl65>YMwNJ+$r4#yHgz-~C`#iX(+l z>_DSh^C%fLHFjbjy6M|Dqjy&S3#E5>vR@D8TK=s%#kloAf2%7~*4x%fo0F|?wC7D( zpi^G@XB?Q37m6ijwwp7gKxLL(;A)MAA5I14S?^EQ%pQMDr6=Ez3Glp~p9?$@b`+8E zYh7-sM$V_c)vx-U$4zmM_l6G&zi5Od8!m1)@j<0=*r*ZBt~^|`6(m!i_Hq)OZ?>w* z&)ST?g}5Z=>@hi4%`Qk2YkmZd1x^*iEw%Eti?p@(4 zO5aXTZ;jUSoP~GLU>S(ek<_2a<f{3%$CCR<@b_IEA!Wyv?-p17;^2tc$L@5SI<`b zLo2m>g|9><`J%gbalU9H`f%SiioJg~mTQ5X{q+q>7ZX%5RJqRvq@&03#iu=i^+19; z)9SGMUIcH7qS!r5G?Dp0aWr+l3^SOm)wsvvN{yr$n=tZ-um)y{&@2 zwS&G!1?hqOiac|QN9HdSx8!+hS?c2pUL`T+x&)45b;B*{_5>*hyB!0|{HDgecPf(u z6?otYn6#FmLffu%xzQBQ^5-Xn`F0U?mLi(=MH@Do;rY5ByhXA#gqGnnPuEK6BAsO` zlGblu0cu(~Bdo=15Bj_vsfskwy)`*mo$83D_eE{wN@x>ScpKUH<+HGUE3$IoaY z5h;gmgShm%<6BNOULMvVK3l$SWz2O6xwjTXh=Tc@ENj$kTsMKoYdgNjAifUwMfmk7 zwv4cgE6`s$094)0k`6&>?YEiWtT=}4hti}&QkE9@^tfgb4Wr{y_(t+VD*ik^Cq7eM z+&YquIw|4VMIIho=t|sA)-bG7wU^i8T;!3PHmj=#lpA8iLiNZ~&mmz{%n4MT6;`K$ zteCm{fzoU%E6?D|pZ$oG?CpAbQoCYerR$$EDvK&&@3Jk`gQOp->l=;l0%d0}oDa~OmSRh#9~27sv8AiqTA;i_-2e_2*6qQ%ebU93 zA^RV${ei}b4OZx;)=K6dv!_Pc#u@u(SMU9eSAD%W=F?05TtPVt#{5$U6F1x5DZSUCkfV*{;c21@WXXNt-5V^)#JfHpS3z znSLU2>_z!e58yV6X%Y~)q=;TqEw7OFEv)q-Y1mSvRSihsxlTSFc0MfcBhkN-?Ac+n zKkb2tnjYzK@Oq-(9FX_V`W)wt>kiFgtr;rxw$Q$gzBg_g)W3-5-Hzw~%=$|3)TFzo zQj34|cw&g(UuIBS8ZXR3m0U$qaG6F=KU_nll9k z^s7rlTMbKv4XHf326UySh2ydsw}9k)08`{Z@zF_EA9kBNTIIn-+1<}XPn?f9@|Imh ztz8rW=*9Av(93FszCkBWR7fDvQ(%tILxywYZP)fQ&T&L_4}@e+U*C4nW^5m)kIBMl zLNLVmnK>5K_noMIkPi<_m+P=ksi?aLawgl!SLrJmc&tDEv_DEm(WFdvaA9p&D0w%x>dDHGV@o#4EQ)_tCmt{fU*Lck1gM zkzKMSL{;9m>NRx@wXhnihN~O94~#=9>Du;VAFRDyj(YggOC~x)RZ|(}O@qKFZrdP* zkbeNMS4zbpW7-ck4tSjETR?n^metXwX|K2Cs7EbAm=%uY+_;fOVEW(%rYpDXYkiOa#8BKeYH{mH-;nO>=Ze>X5H6U*(LRei>_TnrqAF=7=7}y`85>nFV@PLXViuLb50ASv%!@{)a4Y^sYeB zaGP<88|LcT1D+bgi0rH>Zq<#hrdWzdpus574ye zEtYq>NtCaY-l-iTFW6&x`o#u3S;_l_woOt~E(3U7LcVsj&9e8(Lkph%)!`@B__D(9 zEgW8T259_Q64Jz~s2abwx&u-m^&iU(jb&SC_J9@x9OU3k{4W%eOGwl#K=GYfe*c#? zcdg6xKem_HEN;&B6Y=&QND!WQ8?Z#Sj;~gCJJP5D7J~m(V4^6kLD7&X-9-_eam9i3 z3%G#X`U1o`mXr;Zo3g7uITnY-GdG|UmY5q@vH<(UU84|ndzNCo0P0{<(+n=l;pxHN zt``a)E%VNCLiY|{WY_op37fux?;Y`l9iZ~ zMm+m6ek`B;B+~R{28_?6gnMLu-m@wD-@q&N<@i?>z{kyL=m4<>qEovrJ)3qCUdJf5aaV>ZPwR@)q_z75Sab zld_9gU;ci|SCP-0(Lz_+$+L9+OwAv_@YMcG*;lkzC?Klr2olX`-xI-;(piLRY&fU8w%kl478Th!D==P{0?kZ)>Qf^5J+jzv6 zAAi;@uT-wRd>JZQp)?ZC1wmvrgFnl&`MV{R}gL?197? zCeI~^cv~LG*Mriz52;TZuc9_@^p<#xtNR>wjS@79fB5$57tr z<)4t28GP^f8Y1In0tqJ?$@>;^{q!v2&*p49Y?ol8X@;;rdp8Axc3nJw!EJTGx{^b^w<|=Cq$Lre}}5nPhD@{_HlXN@>Png=KhHw}oWFaKj)CovMNj%mq$K#hT4nZY z`a?FJ3aUw1V=e9D5!5El-x$cVqfygRC(C$ zX`4Y=9A+c|YqksSUy)i`Nq@m)=@;@-YtMY>ML+@4Htzudr%?VIP1pa|wOJIz3+1F) z^uxeoKe-rBfit1yf}yN}M(?`IER*|cXsQ)f)vg%JapxYGE03H(Ccq@Zt26>Q8Qb1* z<^?Q?0Mn!2fVSnWwa1qR5u%OTWVdlv^%Y)Ia}< z`4AXbXSlJaB!^De4kBJE)_u-OP3xv7>Vi0gAkwYv8_^=xH*^4WLY_@@PF zh#Jqy)U};cxTO>{`zYQfP15zP@;kNi>Nu<7J+_r-G@veAE6nmb9iJwgx?g{SwTJiq zXr*V|0D3V?rYR${9^?F4usYVp#0aR+x|Fy)C&Oy6IZ&SJW!97Ag--)0^N0TT_1Hz= zoB`(TGP5qCd6nsV6*xQ=(hFNpv*k(sUzR_6MX6D3619o@m2P6J*d;!3wm#cN&(s(( z0S(S@i~A;^=>1c8%|Twkq^TbzkEV^EKY%e_v23I=$&uaosgyTvgWK1uHsuwKekTkc zuxayn{vN-(?d%z#9jI6e+RXHKhpf6oh}s-W8N(l~6ldtP!uY}LciQhYe+zuhKI1)) znoJ!#*mjdI?q#;Lm+Uu-bT$~y6mnLNH~i>q&3G9JlrcS#%%8LVZX;%##xUgV-7&el z@=mbBFpPT`uF_tjrp)E+8Crpk7$s({^tHX$`?JTV7wo{)kZ9ge8O@#{3lkc z1P|znb&4-Q_Bz9x%r99$p@qHzFUPst&=p5loC?H=RPP%rJy{Ny`FS0GBPQ+#2dD%{ zTKqxjz!&<_ArXB(;5A!vdjwv<#@ZFX%-tS=67GMDADufdDcZjdvx4#v2* z6h%~J^AIBCfu`Y>>H~oijROL-k&PqadAe)}1n0B@I?8fXr;>!h;PW z-wUj*P7T#Wo=b}e{z8$wiT#E0#`zaYkc||s_D}j}FQTL=g5(Az3*P=j5yI|{_4-*G zG_$i(p;Ej3(jvnbqBngMO3D11R7z(UnqO==WS?$a&a-anrcNzLfF%->{JleOICw~j zrrpPW$lBA|Uq>wKGY{bFwB8cR z7%K8xuB@(!`uarH5v|+B_jCH^yHV@PZYtF+^WN&gzEJ9F;L#L`D(3@_EH`kh; z(4hYk;1glw*fMe`ZmJ${?rO!i4CHTJ&YWbiS=#z%^G2};bMy47?D0jVVqnOv*6M-G zL!Dz{c`cB8Q^?TMI8WKTcnQw#)(bdZMU2q;p;eX2+80`;FCO6hD52HdNuPl!`Yr111pd$G_dS*YdA4bX9Gk``TO2?Ku!3=GWC2wGy=TL3 z<~)F}`aR=sTuh^2?)BGeC-?jS_xt~88venC4{SK$zo~x>v@6{8%O!!}#TQ=V^K+iR ze${8EzZf{vWzIdt9x>=WXKA6r-Pb=e(eAN!0p`unozv0r;@b7+Lo%lEZW^RlxzfK@ zYQpPv=%D*7P^$i|!G-LM%*AEHHh|Fh%2hziBDVNiw{+xiM@j`A(-go86EfdR^DIi3 z|EOQVoe|Cb8AUX3WftS+jGwA03PAN)mCBKwjvIXnU#VU-~QlEnqHcR$$>nT>E{4pAKGm+Uj2d!4G`7;4?-2El{Cixao_Krk8AN zQMwa%p2MCtdVeJs6;JUbDE~+hNNkw?ekw5Y(KGQ_d1+U0=}sEAHe|ee)isT~x&v4= zDeYg_;w%8`Dp3YuOOWO4CBCHkV-lKsT^vgucPZ`wggJNyAxkgd}>_pvht3VBm z*!bNnJQ*wm{tS~KvQ`Krx0rda-6=fTWBl@Gwi;uiK2@moVn>_ab>B)Dj4v^}QlCNq z7w)XUV~MgPLXe8TpcjBYyn9@VwNb^tcFjoUv?Msy_1JYAizdi^P)&n`<@0iY@dZmc zlAlqFDz;wy{vBZ8dJX(Y|Ks!j$88|t2ZRXUfjgUgV`E1KDzwj|TF5<^d{W;~H;x$< z=MK0oZY@}v3Is#&6zw7aX02PFC8+F;^VU#pH)!$c5`zNvJ$p#|+_Lwxg2m!%Hvp~< z@T~7K*6lOj^uoj4ro2sRtnDY!?U&K_Ml9hfV-*oeE@R78@gAOD@SPWw{9Uk_vjf=f zeBy}$$UT8)H}m`gb{V@fb9z%Ux9Nf)){tE)B~}<4?r;=4xgH>Bu z2qkE7KVv`?LN|9T*p;j4Wo zrNWV(kHyz;VO^UV?J}{9`$q#*aI0j!>LZ-2H^CD>Fr&iuy_iCdu|WmiU)KEDJZAR_ z;WH)e5w)HoaLw=kg-5o3!y`1ecqI7-T56pu@4c8tS~J;$X4vGw#?Dw}uDV5O7EDKe zzw%%j>?+OlwX3O37IFi?uqb0+Ohd%j4bb^U_r_lg2&XqD8hwo8Cq+PuK0o#N7!SCw}SOHv%1DEgpCy`9s-j24e^6^0rWb7z}XoHkVKnrbu?gXE(ojkJK~rg_`i z1JX2x!lFibQ1b(WN11Er{tFRY{w&udePE&;+HYgs<5#e~sn-18(5QlO3zU zW@h=mth$1P%;O~JjfrgdfxPV(I=gjY@pv`70^1lk3=P}fuiW~F^l3im%?{g{L8#_7 z_a*2_)hA_fE~38n4r4HR452PKamo5@tQXsuZfYti-XcEnpZVHfN}Yh&@8j*-jDHBL z|NQq)wBP^x0&DGqJbCmtT+hj`K|OXnTX`ZR3p()fJ;Kd7efeDAw={@eqWEm;Ghgg# zy>bY04nM?AL*8gdyqfnim6*i^1Tlp49`2}jr~#s}YdeLcb^MT_k&TiosvKEndmlBH zL&Al!tlGs`+~0xPlADlq-7t7Od=Yqs4_(*p02KtXX_I=>(!TQ;r2KMh_6&-drPhLq~Dn@uNasqc<*=nUw-J%KKzA%2; zywm>7d3>n6jyuz6Bb8y8NCTRv;h6r_EUD8{I;L;>0Us9q;|`2^ijcI@?jh~-I5>mj zxwEgO<*Ii;RMs>+SVWk>P(^8!HGHbciFMiiV}jW7Cs-y37g7@tJ7z`HM^7Ppni_0> z!U*;2p+f6cVFsMS3?v1U^kR$r@+zM#~+1MSj zEU==c8V{3Gm!Hz3>eLvO*;}F?%4XRr56RJ2ryQSwJsr4+!#<;|r|+O(Z>|?T)+YD> z3coR9rY|-P`FnU<*pg#lsOKd)3%XbA=vE!G!#+r&Gx(tJ%|r`^K>NLZ*Db>xlA4YY zXs!L3{#x#dJ@)Cq$k_xny^ws0wl#hyklP*c5JpjGRp48vE-7WpMJ32 zzU?B27Gn5!_;tjp1ePd2a++K-`wQiQJAQSpb9f8y40ht13K9MCLF-^RTxdCaG)&%3 zx^PkQvH4@Km)R{+thtAPLE)b!${1B~g8KR`AzS6QiWw$R}z<JQyY-_CXn;WqI(w!hWT5Oh~tQN689q}qPO4+LLK z0bAb)=Lg#j2Ci zx$pCccC0~)Ex3cE)e)sVNl8ar6C@tleU6)F0*1;XkY-bFu8sI;9q|ulZ{L&d zev_GtBBOo9H~ogW<^_L^-kh$Xo`Ed|y*9RqwOvSmBjMd@8MNNUt_|c?-M8GWftaWc zMUIxznB#|4Lt}K|U!I3O__l-2I88#f@fKMab2A-s1rA>Hr_{4}+l`Px_!We7tQ?Vl zL?IMTtGRc=JE3MUQqca;-sr}{Ia)t6?0|=^4Z$=~?6%eE*w*Z=K0U=_Y>Ak!eB0SX zDFFyS;$Mn$Lup8C&-mlGrV%!AgLYQ)T-R++2b0ml_GP@pgWN~72ipZ&Cfj^#PR6xU zZ_126*%T_q+W$hie6?)a)6)GTBlegR~!gQ*1iJr3mzuI^wi=)0*IjM@mre3{*u z+vEOHon!}GgKzKNCOrT7{3*Z@nEQKv!=DBh(x8asIFj`+MuInPWW9k%#!PC{pF$iu za|CR;b^Gj@_V$jvt%vCMs5s+ad*-jBZr`^{1L`r2frZ_!#wT(CTw>#tBP{mDBJ?|p_Ql*7sin27GEs-nh&Aawi<lg^QrDIinyG61^n2IlI3;u#smT@4$@aGxUd=0=OWb%!1FrsIZo`}ZDR z=e@JI;Ja$L;k%wDGBgSz9z5S_BZO8SO!c$9R2N~*f2{fVyIj*6$;b%5cHH>bfW4<9 zNh^N%pu*eH?=D>Un4j;HaPAg3N0-~og+S63U|bc(V`<9@{|BWWPTd#U#J}sM&9PnE zS_2y!vL4YBJC*=N_wp^@&-O04h{Du@Ti7M*hg2?ylUrLU)b7 zCyMuFZwLm+tncL*b-8=z^Y-4QeH}^3uJ(!)ra zWk+~op_pfbLqub7Twm~&?GLuL?AEc~f=8xypIc2ISEUMv1RY-icC|a!K2LO}^=-z= zpJyPRkUk6#-jG_d?hY0qN*SFSR6DsanHOv9uwCZ;+ie031ZkNs0GeU*^_YX0xE}Ce(OvBO? z`Va6*?7v!XS?AaEe&|c>@0a)BczGw2GIC>ls0JGpLGmQGj{%Yp9bJzQykaxzVq@DL zmLA#J?lpg+{oN%49A1=8Sby-tMGV@<5#-2LCrGZycepSvll$mRlNoF#k`*rjp=i1f z^a@Bj;+p16<-j~WJppcKRj;t8tk=4VOFY8y#EUi>huJPO2-(?dVf~J%)*5C12RU86 z+p(I#iA_y7{~*H|0kiAv=I1Ape;h0Gek+bO{(a4aQH~5zJ}9aH6etWu zdi~xVt2GfMDMybQjWV#IE%sZf?7w8YQ*}*p4JGDFvD>cWz2dbm5lTX#va=roGwkO}E~RDeZ(Xaq zSd{3550>6(xrH;XAUPK%LNqgSdY*D<{wJ}y=LR-&7{JY{Qq zPN?s3mio>s_yl%RdXYT)R%A#b3u3))F{f?|?k#H+`9b`QlAl`hd(+enNp8{R2Zgeu z=%Lk3nOC14)!{RD?MendC&`p{Rw%O6Na;`>KjLyx23xK0)W1gb-&>B98%xRh4D90G z*e%(eHe)8IMIDqtppa-Aez3Mat+aX;*tY1pgeH8JSyptQO1 zgDt%++Z*=feh0wSq>cVxm)Jk*5&qx?9KHObZLpXbLMWE!d>-|Cw-I;{YMZo2;1$ts zpQ@bZimPgG@TeHwh3XR7Yts=7S=Jl`>P*^cu2D2k^^~9ynfb<2@ct17dCTcZ$~fLI z)8Q4hTOwBCH!tCD0JZ*r9~qL^aqZRj3niW6_w{av$y!JuqtauM>5hd@?|91`LvTD9 zz6{va2urW|rzD6B@C$pIl24_r7>os3`kR={_f#KG?I-$;=eq<>_C|f;M;GR-rHXjI zSg#Or(P=tu($$k)d@c{!>0QuD)P|i{z%_cH_r-2h?w*%VZ^SqF&z4&TK6dEs50>kH zj&iD9Oun5ZR2q?NJ_ri^V&; zfrx1;$q-!%eS}#>5WT!=AqX8M5MQv*?C$`n%h`(|cjouwmxi$;1+{1c4iQb>0$q;; z#Z$yOeF2KOW(KGJn;FH(a zwc3ZU1;;~?Bi)A-skIOYX?z}~Zk;f1`NK-5m)VC89k^3P#a~jy-RD}Jdbw|p)bmSX zRU^GYMI~J8Mv#?oO_7!Ddeejbr8#ymzNKe=Ll%v1a@g&dMRFgc#`p%LK5i&0ud!mc zE1oA{#c%MAQ=Muy6=!jd1}SPlY4*R)zN|4{j>BJ=q&2x)n->LJmcb&s52MB2&C98{ z_cU+H_T(_+7s{0E&P?$w4K5)1gmQ+>v8be&oj05iboW_hm9*undv~TYQCDA?9pAW5 z%XHJqV5xZo5;PZ@7X#=c0jwsT?odkAZNF0N8LR7aOHF_(7y)uaWm7SRU{e5VZ{fk{ih z)GUySx_~Sf^z2V3Xu4=0BY6PtV)e&=loZ_-75(pr>34MM8Qb)`v9Y%MS`pZe;F^6y zb|ZI5LDL-h4f|h|_EI3R_(w5*ev#(vo=<6S2PKl!)bB-A>QQ4&I22kpPSp8;-l^U2 z(`;@!=6BxsLhGaH^O|$riT=lFEzwhSAIa&s;<_{JUQziRmmG$ihX{132L|dJAHXiC zU$Xotgcyo<9`y+cca@4@Ln=6y4Ebht91`_CBq+U6g-^)$hEIx?a;BlxY}qp@vm94y z7=4HKXZ}e0s~?Q@>LD{0O2TS=9<=)2oqJW5`2+|>` zba#kIOG>A73`2Jc2q@i1i^R}9O!947Aj z{$Jng7ou>vX9>STWAR+IN}g3^=&B9@0}_pAEJPF)7qjrfvpujvAZ(-;#;eALdv^T0 z9JIc4DMQwju1_W*u$=#XK1Ds-cXO;nmXwVfTXzpH0X6xRixXVa>;kk`^5SL%%;|ni z)0+h|+{D3=;ts!;BX)9=*KMRK0C2lW6K_yoWrJ(yjI&{s^0$g~Ez$nDS6};OfZ|>x z3%?aNd7WJ}{-5YWi@Gx{)2cx_j+oPaGR5>YQ~(&CQU82p<~{3}EGd{(Y|$mM zsPAQyPIlSxvfka&^#P*J=+`g&t@Hzp5_+gm1#-UG#4G7a_`6bo%q{l^pCJsrU3pW18$FdVs$!2q zhMZby^r-pjQwLUHjrH^7v;#sn5JE-F{Az%kjycJZNZgY4ms+3s!TI~tj# zM8+$k)5S|mp68cb{e-FjrDeg>P`jh@nRg3Z2h+9k2(ujG9KJryWBiy30D`y`x}G@X zctFETEcP`2+3u2`oiz(hXd{(drVBBA>8pUgLxuauck>vB1DJC2C#KIMs3u#8TXgk+ zx)>~ZM?K5ww-74vy=O+{+;nTYpBr$4+6#m{_YPS#rJP@o1(ryF$Gea2|$9Z==rt~`U6B&M3Rtu3v!iHKbVi?2hQr6QD}a>LE%J%sk%0j;-6 zPJF01IJ1+P;rZ6FrKDCb2lsx7-D#h!g+z#-??E(|Ov2h@i`Wb`hW*eck2gO^x@BB zz*36lDhkrJ4fYvNAn)lYU2EwWU48L?(9D(JB3b`H(>p?xt&9M0aKwTcH6dm0C-3(y z5p(95QFmD(Qr}d4sy^aTxFoZw0NfF+oIvAN|JF>XhKl0ly=o#b5Cibr3Fzk8AP?+$ zTl!8=h-4$!{1HZQPCG=KZ%sl}`Rj6R$jObO!^1pT!&mC#1Dt+^Etb49hRR3Ec$Au` z04&`)!CIMOB}Y$X26WtVG{;D+aQ$J)uBM7xi1O8BoV-!M(mCmT_hyJYvG$17mv zAyp>D-`;?}o(D((FXm^<#h7uQ2J9M&(|mazmp5UgjT3}xI(OxG+fzN}2;)oFe6yH2 z8YzO!{%j3bZnJBF52?A6!E@AKR%V)L39Wq9pa6S~(2ws85qHpL3MLbwO%*BHS!rg@ zE4!8S?OzfzRojXASl4hbMR3KUI$)pp$N_iRLtqSkwWQh_q1t3;tA zQ3q{KFsb4{y~Lug0(}KCdBlnZD34lz{l|DWP;t0=1D_=(lr}3Mugbbs0 zHpPA?6tFUDjS3zgEg?P_+mxmHl8WgP&fxaxfc&D20xRO69_Xjt+)HcT90`AomJ(GQ z_6)DbmTIe}G)0^>8tw%Edg$rR6tUe8APkBW574VfyBD$P=)KNWgC7JP|CM-w419@PD++(&rPqT_*x3+eAO z5yvGme<^vnV^D4DTOhji%_%*t;0&I4D$<2ACYbTrTUP8(UarCefndytZyX-flHC_%oEW;XgLxO| zG{5Gf?iL=^$MTK8M#X+k_a&!ZK7nt-fg`-0>;-buL~s#4pTz689!U$63zG>jhniLGz#8BI z8+}o5gW?#LuR@uuT+hfAy0a`8UtD=qdLDjghB{R8m*wtnL0|$UoXF>e{-5I4o#4;7 z(GPKRyf<4cP~G`KFqx30R3Cyhajz$buCWdke89!uMVrp!u2MeU zgqEM^T9EVo&c(1Xhbv^}R*AvPvr)%W#lPkQiT`#4W%By6O|h`#bVRA0Xey}4K8Hp{ zO0BW2J;uhbpe<2b%=if-duDLwwLAmbS)ZLff^y13#rNO{T&{B|%AR3&zsASbJiZU? zLI)X458w@vjSm^O-Q~}M|_B%4Yeo>;?QsOa|et+x)8vNS=|yR54;_Y zfC`IGt38){;5wS#OfOo$MGo6PqSKtG04DLtsX#)=d zsg8HXc6M&1hEZ%g>(J+n%_sRrtlS(;L_OlTYgIqQmz!V?h12zdk%lvXS*DC>`cE|< zTG6SObADKmZyWAu<@Uhy5m%*a_qcfGFkv$qH~%mT%jzBKTi~WpD$I7xHFjIuh>C>K8Ib#nA87jasIVvnBbCd==?#rV z+UwusI4?Re1hLl=a?U4b1X<=vYuy`Afj=^$S*2=efbYIm6^P_Iqg~xe<;QxR<(95p zCIU`89W>Yy$Ea}simeVPJ5;qp3ZCH8F!`|0Apco8;4byDoiJJK2UZdc}Z%593QmV%TKh#wfJ1Yzi$#>um zq=!CuQ<}43K0jXL>|2DHg@$0SiLAA=(L=j#vfJjS#OGxp5jssB!SACr*1n_tu7N)l zK6rHV@lJUKPsSult4XIm_Tm7>CY(Gt&k$pIip|l;n6%Fmxt-;l!UhttMFtsGb;ED& z`@pInr+4lH!%w^;N!wn&>pwHDF^{iht?O?Z4dwXU!A%wK)-NoOUhMt*mErrk`ji94 zsr>33a>26hfvo3QL|RmYE{D2~XMX+1B&n_TzoNb);jmR~sxRI{i#?cwufl!Zkovj^ zg4Sk;Xoj*Q*OGQ)yY~opoTZXFor8@q;}*4_4zWd8iyQK4$z}uS^amR5du_43d%n7I zzlrD4cIDYj4u_mvlB13(uVi1vbDLm);>$I1zp*;G3K=HPVTsUWkJd_tavCdAId}7- z9@iwfm?_g+(Yn1lyCI`HW7Z|2L%6MwvANfSPpK@?xMuQ z&~InDz)D*|*ZyZVh%q`*ff`WpyNk6_9C0WgBm^o1ciiz*ac}}m)%MN=nT|iGPgnq% z(~B#n4>Q;2zyIPndAJC4z@HTvp#Rx$T$Jm9pXr)96J=MdL>IoYrXKP!p*m*1E?({} zgT$`NM}>CCu}(y(?1jsdoVsr$>z_s~)$`Hqs4QUB$iE+2zD$7Q6()jDJg#f5w7PR% zsz*7F9ab)pZrI+hY7|X&oOj%z|CqY_XjMqrw7O-x)LgfmIg~rDWVra0`q{uUKwV}HME8^fnV@hFD8d{qVb&4 zRLnHs*>x4{%J=6RW&>ZoD2kF(*s%WeVCElPjNukrzg+wgG)B&8o&B&4zFIoCu5pzU zQZo!nyCZ@{ftKbzjQRMm?*cj2dIk-N=sqFO9ZFc*95iHorkQt@)4wY&E++jAJQ>b6 z?Xx>_F)waNzfiiBQ=KM9C6Yw`6<2w{#|qMJUgp{0FA4hD(W?xgyS+h|$_~s$d2X90 zQ9sWh#sEx$75U#U=8Ok|ES!As4_UoawHCNNs|P2xOl<$tc)Oh;P>@RTGS^0MNf5_& zH2}omT8u@T;xqSu`yBiJKs!H2ZHEhLbg@t&!A;83f!4xH%QYS`ni8anO;gq(K0kxK zS~?$nY8%IoFpqqj4)priIvbxL6wMV$@LhU@hnA19rM`n2z@WuF3UpZ)@4!XkT7$2#^eLW?{kvx9 zENH$=qV9ZsRbPvy&eGWtY)zVCUpdp$F!{jA#S-5`qzt08pj<(X?kd)H$e4x{k{)BR z)OTJ#FiOM3alJ$awhZOS0z`a4GgWMputQMphMf%P9CZEVL7*;{P2@*)drQd0N(FG^ zi`+}aM+~%lA-vi(S?=XAw0*R*M|-FCGU`p>MwnFulKk`~LMlOvXe`U}%xJA-Pu;E? zMS|lDA4Z^#<*46@V+}%~B8>&}6|1@=KH9jenh10GqPL80)6v8?{!SP@P*)vL{C)IR zLqC3{UA2S7$zTtq96VGtV4ie*@IBvdlMtnFg&Ih7EN9+WHw&N|xz4jWDn7W_tr9X0 ziba(;`W*1Yiczf1h~V{bIvhQnO>B$`*CSvPu6;%wyzZ7oHDCi zwY^z(NXU)VaCEMwMJLTB1U}VV=*!bm3%{K+J#PN8=H*?%e+TBRfP5^P?c)D}5}pWn z)=1joYnsas{-7cB9LT#E`zEev;|1DF7$LdO%5axQDxQ?rrkqA<+<;BYDmp-cuQaIXp$gQ+%JV6zhy1PS*WIY*q1(!!oP6*oBepKIj z#V^5WZB8Nm^A`d0o;Dlferx2Hl^-xr_vD(uMi(n1t+2K&9oU( z|KZ$@WM|ID{dD1#oxkC$nT+o($2MP_2H+gPgCDIas~H+GWn`?qY$V%?drKMfl5c92 z4VCB2nq#|nrs-Qfj3c}j8EsrcXX8bqS8KK=9U*EiKTV?meuiw`j>yd~AH0AG{*Zw$ zT}GVJYjm+GQg;lbmy)F5qmBKBF*_W&80YBu^+-Y7d`IpABwF@Bb7f>g4;WL(y59bl z(=GsMcMmK`=2yF!PMzuZb;D`KYuh!Pn_wgr!Xw1Sp+6AW5|()KXls|x?oI|*NU-5j z&`amglVA?=>$5_qjkOGQx%}JNa$E6N9&SIq<33C=hVV~2S+&G0d%9*eATda&pT$+! zs^pvQRuu%<3)IF)dNf-H&qYtN#*v|$k1 zl~xHhb)wPV{2@Yh;0))HrfQBFeCVmWWY!A}?<7giBjiC$QNi)Tc&Q9xZo#l0&4Xpr z_DGHTjVNI%>rUWaW&m&`@F8Sf6?ExzMLOS?;ZNF_+E}xoF92?Yj~LNCS{Yv)2~jiQ zek*=fV%W8BFUOyWFwGT%Kkk!%dTF)edsAI0y~xg$WsO z#FjWw_f{hYYurz;&0tJviSlW^&#e8vzh9W1*p zGAB)u5(j_2O1&#C)k*s1dc#aWhv28$rdaRj=uO^vSH33v>^3}|rX$Dd2dbI+@0qH! zaG})`=2Pl-c!PZkaF>|xWD_RS6=Y_1u#cK>U%+qWyN=2*>5tJ>0l6z~dH;MdH`~qw8~Tg9iUVTOqi)X-37A=>t&x+VX#i)mLgqrQZI} zFiSWp%o5W%=r3ixl_yP7f5F-_fo2a|x_7*H>ENIh<=-j)6MoM~(MIjB*VeS$Qm-;C zuGBv2{nYo`yvd~_R_koSNy8Y=H4TABnzV;u1Ps~tfgP{i%MPKD2R6TF*hPS^8_3mx zrSKqA=rs+Ar2H|3S348*Y=zIbF}`Nv!o$w^xm7@G?4%yzlzWU!_e@KO zk%_#MlV}g0f#`Mdd>WaRc>&SeK}JHgHocBtTn|HqDxig)`$Drf$@Rg!&_NzccM>=aT7@lyt9FJ8=mTkVas z#i}$9K&w$I0$|LSGrB6E?mA-}=Z;pLAzGhsZ%ywBl3>!WoJ_}UZM{11qFkiMijqLC{Gc8f#iJZ@4 z4CIZyRX^JI4X95v*zLAHrF;}G_tY%<954D7J$gplNbuY8{YMp3-1^$N`iu{0@6;1{ z@RNvK3yRl$pI^<${(*+g5pj2f3Yhrl@dsM}UM7iLW}=>zdsRb+y-vq)uU2GR2?}SU za?{^||BZhq?{U4#BS=( z1w({wg&D>8q&%Iqc~`oUB|#?0Ku=t#q|a@I9J`+F=RD-&s?JCfVI13upG}rd&IImq zb56ry-G7$&36y1%CFP*pPMX#`&%5CZk0=TL`1Aq0}meI%mzu8{v*X6 z_(vYzigD|8k&LjJd+6DyN@?`U>|pUcnB9r?n=((q2|SnxZ2vN~e#;b~G<>joFs$rX z@+y{rC8K!V5s#8}YX(=kn7-qozvlPZ9xP)|Bj*)SY5W9RfsZHCSL$y=5_{Ioro>_< z00W&&>xM+rJV15Q@G;?M;PBw*grRNG&wkc5dAG2V5zE{auj^W~L%%8u+CCb3W2erH zw&}vQ`y~B^fk$@6>W8P=#?G<6=%G0xYXUHps?Hc+4rHYxY!KOGOQImxzY3{le@hq7 zH!~Btd01a|Z_(ph6~R%?&VGQApmb6J9ePod$aUiS^npoa6THt4R91jYn~cJT z@blg@&#_12@>V};d9wgQyFjI$)a317d)hUm+z8-z_$}SVSmup5Nu3a1O{gZW3bX%! zv?m%$HS}}*#PatGIZhNA+75WS_t`qFPe)@`x%eq(0dyt4AWb1s!`ZW@--{4m8bvQtZ9JaePiEewC42*yFhl5Pmq^z-0=k2th65D_XxD=jaw;zJe_!7 zuQOJ&Me|T*VmVKM-|s9>RgijsOwzorEBH^%T?zwmZsfnxY8}&5fhyYP{YWA#yMhVY zlx;$Ph47q->_skGfECdio;ZtUZ1RthU0G;PR}Jyn%@EEr0*Ia12hF8v1X&Y1qp9LX zs9=fS;gh<0YmHNoelguqD5r+2uT``+`odYI?^!a)_t|mnVOt8y-z(C8xL{y+P0Zo% z1TyXc8F5sc7%Ytg^P0S;STdW6Kc3nog0L|BLKAeXk5!U&`0O83rlU|k&yW{IiF7^E zy5EZgi!Po(242na_p$OXEL?jp2-p5)AZ7c}KA)jL^rlv%m-Q)q>^tShO+C+>tG#e< zb+wiB`0r<6^Y@u;0YIk{e$)p-YufoMu1_av*X!lWhO|cB`FeV}NNz25NZFQ@7gess zQkE{tg0ZAZgcNM-l3alMBn@M}U%T>l$jW(ycDf@MymDJXX)^8Tf;C%d-Vlk#WWM|A zU8pl0OJpCw^6(tbVyJ(Q`VX|Dgdo7$xWA9j_)_y_hmuC&7gxJB_LfT&EF-ovK}5*g ztcn@tmlVVs&8rlBp5Y$;QLWh;Oo@YzdgB?&D!x#yHVV7&U4)MV45{;-Q2oj`jsjiw-=Yk!tuf#3O7Hq=bI|tqj^|d+?IuxTQ}aWu zgkMN*1$05i`U1^+-Lfnsjy3kHFOoFtY8%=dGVdJ2M({iy%B7bFxz4$sT={OGzO$ce z6Ov}5nUFq=o8{#h(|;n@V@IBGUO&7ICY0N34FJxa_3ZkpdX3HmzKi8G;N(3Oxdq7h zl3!4XfR&FJ(DnbsECBtVW`V5%r;=De)}lS7TEXThz6G@-y$&>>Azum z00GUMT&tMI{ZsWVKK0@*hnWw%+%HRr9;-qjTqI$zB8<=aMh7pC=mz4c2>56Zc|(zk zKV|_p9u*C_J}%}Lsih2xt9YBkQQhI1+#s?cj0b9foji`6@Ey3}^2Dp*8!Du-sASJv zjFxZ*c9E8Z?NlHIk3mg5GD10e1RNrr>Z(svpWvB$xAtiod%N|%yzsn93ETY6aLUa2 z4V*rMM(nDpl?0`YeUs*EHJ)kZQi$3_*hp;FR497c#;~nnL@)&Y()dj4ZFxE%z+6(W zYmk#(>g~gSFNwIg5y(DByin9ax|7A%I%1)3P(`CdNfwn-o@&R)m$rC0{j@(Eb8PU< z=;zkdd}s;k65$s8B zRg8qR*aGx^?6t{i4;1wkEo$@fHh{7DWxrJ6e>hH0S=jOvTn>g-nASjyu@sl&Hl7TPUEmDlQ?vU0ZOg1)I5^Y?E|6Dbrc8o|KMRPtpT>VQXEBoJB3tBxMs z4D0W;vV8UL!RPg`T{P!~m#F8e@OkmlzoF|E{^C-dih?9xAdD6yv>@Cs-hP3)K`b39 zH&Toxw={(RR`x~uVbmEEe0cdL?UM!M#T2fE1Wtgx1Wq53ec($l%nEv-*!2BppJR^M zGfP&=&|mlks0#Jc2Gp8h1#*;J>2R?PtHQWSsolue8oopAG_pL(M4!ZzZPO6fPyR^k zJK)%07w_@ut}adaC$;<|hHY=Go2RM@QZZ_R^#(UWAod80Kyf0!)kfCv#vrF6+qASahbRmU zqQ0rx(@qThW=33Hk~fMEJ8Y+hGwW6_A4sM~)kqB|f{OHuvIB#_^ryhW`&qd-wA9;c zG7KsUVPp;)TX##nC(mxR8sq60XRDhN@a5`3mrEzRFDAx%)-%ThV8GAH6ed_2Xe7Jg zXbAHzQQZ6;uLiki*ulFTVz0+hDCvUdrH%S2@Z|}$!RiAh5s!TACv>?1Te`I-tR0ig za2lRuLA43b%tl?uZ+GKS(#$b_|Axg`C;ydRhpw7FwWl~UDC`Cocr|*~==T=M%~1hZ zzAFNo8FS5|b@~u--U#~I8KA*)H|kb{1!;!(IC8P3Pt6$I6RH7aSZX+f<>y{|u;C@< zgAS7dS7>AI-Q(1}wfA8_qVY-HrXQ$jk|OW`}tsGAVVlkhGY zufQeFfLfd$+$wm!n;M03LO{nbOLm;3M(zfY5IfMKOwmFicol9>^z{|$7(4a}h$yL#al2G_6Wj2Up{x$AMi$v<3Qml-u)ul|8{bAuXtCiP1G2|wEr{AY4^r3$VWp(1WEjR28zXmA`>FqPM$Z=CA%BxqxD0V(!f0xyl8@Bopy>c}y<>fw`AqLed*2VK zP9zI&sC!92jD)1ahqY)%rD-dy_KzSi)f-=X$Q`UBSPi% z`S&%(t!rb{*^o<+3h(;P$P@vROs{hFC*e~AFLPc!{t(*KlDd_%Mz&TnE~$txcpvzx)lEM%Yb5P{K+P00Jg9`n*t!kmSa{G1{hvm8)JL~_ znw{%*7??5L?2G=R8Gp<;oz>z{5M{`qT@*#c2fJ1BiC5jmx^)wK`S5=mJs`Nx!;2B--HrEYU; zVVy$hyecE4|A}3fkBo6?W@=`5J8dc$0Oa+0zn~Sb4>p~Bk_!N+_8<{5LAC+8c3p>}39Q&yDh=4Od02?_~zS{uxJQWu{t~b<4|@AkT_K$y3~; zg5w-E{L_!5ChSKsSGTeiZBwMKkdI0{U<*vWF!MHS3BOa7)^HU-uR(q4OE_>E^18u` z9#8@tA;h+Fj`WIsJG53zy{mtgS@mMUHf*BA<` zYVAy-3S-h#5AK)uD*|ur=BE%m(O$^w{7+X!LmWUZtME-R9FSHda@v!6$vq)^n85t) zE?;DrzYU1HHGUpZ*p_u7?dJR&Exwj>EypeN*C(35d;zHyvX17XwyFp&s|J9A0YBi2 z^%A2-wH2jH7>g~&fy9_R))&r7ceUp`8DHY%GAi~PH6ZxoA3Ya;gyiRGoR_DFQ>dk% zA5Pxhz;ApUm?(^c3LHAVnuTw|5S!z>iw9k|LK%eEC|o;e0sgv8Nxbw7;CdC+({NMJ z@F1O@Yrp=yUId-$2G=cCdi6)9T*tJj!ow86io$=-bqu|Zyg>VvZ!w%#Q6fNMmzJO# zL2$H~lTwUrZJQxYIR_sTwp!48IU8Yj-9oF1UB00jm2b;7z6W%_ta9qPfg4$jSasY< z-JzQr!GQI6^Z}|!CV;m~$FRC#Qg9bTGl?QZAJpaN1Ms8nOENSJCV}wwe~c&2xcS=xQUh!_}REPkv*EqP0#NVcQ5uhe7blQYS^6P6~ZPK^Lmab zkxx`{6C2&a7UMD6E}!;+Mf_$xzNmw>Hj-~uH*k#rma(RG~R5n+$N?F zx%t}>Sa~-WGQ29X%FFkC1UNXpEb4hC@UbxLmaBAcFK8$Lbve6$pf^jX($Z*;VzAVs z-5EhA7Yvw}9guE8BT<@_4ZVIjTBw8yhTKdmQ7KUo7dmC+5Xdh#e}|^!YH8%l%^(S$ zF!C1C3&ID$Kd68Rkd((hFMMM2o~)Y}0EPQUac!#X4|V@HpJtm1W3}fu@a>SG2)%<6 zujdozFFu4!|E+YX?EJ6f+HU=S#Mg@dbNy>s(q{#XgjugL+P@I&Vzw3G-1`%$71K;{ z|DLNd=-s%l6zSZMm4(NJSg&$xk0tuou1NJh=d~cJlARpH&vL|7ni}-Wm?z@hfYjV{ zp}TC##etaOCvL1t5wbn~f8p2?55YkfDy%@sO zQzmgApwkh0JIbMU^UyBqxAby{A|kBH1EUENi$HjUTi1J>YBX|k5BDiB2FEMV<@)}f zy3?>LYWVRJzPnR@r31)wB)znsV>slVc&0|aTH^g*NcLD$B+#jsqP#(;#cGsCuE7hy zv`|gdX_eAYf)MG~vnZzCQ*tj>723{3t>;(vif*b-ch|@S?tOx~oWc5|xehJ|HYU@jmUs<;lsb$^GP# z5vN$+(SA(b@ih?!LrmFKnO9?fd-bImx9J11n78B?D$mjkme6w*0e{}f5$j1iUSsTc z3caEq6o(V@6Sn2o3{=e87n77=9fry$_EJ77ANQblxqBAPm4TP~wbjQY@X^an!vOvT z2&;6&Fuv=G+vRD(8Ld*?-+4Bz=C5-+UH!Ic4%wzg$Zvygf$W0Vx1)Wbyvc*KLaN|_ zh@bf^$Rwpjcit}!_xE^|{|#d&{u_1{`GI=Ml1zNDWd2nd<#??2&5-lMJMpR^WR8p0 zOgBYiV(*Y8lPMv!2{)5@`Ip@IYzP&tjInz7bBQx#kOI%z@`Y^pj z#p(hOc9O3zz8UviP;teunQYxJvZ_Sic+q%H(aD36r`PgSn<}O&-^%D_<*R_1{ERluBj9 zD-$+mQ|UZe7%wXg|0{K_S&aaU0hFK)uu0`J4o@N<6M90tW_>~;5eY1H0tZ;YMM_Y) zx0CkK1~@D%yEE~PiFS(*y(h1eLybz`gQ_%bFGzf*Bs@lVn4}2Y;$)mASh?74JhoF@ zRB-zMQWWfXEU?|!(A;Xs(6I7D`xPZ+&AYioN zd;F_Q%r!>0q+d7BXV416V@m8uGs@;N^3pukFP!&WJLdUQM5xt7x1yyw>_uf7PzlNo%94l@|+`w;N8=KUt5)| zVIARI`~O33-1z?~H!gAiFDx4BezcIJK7F4nVd30NFltUs16azQ3qO|MdGte7ge|GN571kZ|jdPkExE=dq z6FQos&dNgD(_^`Q^b{*ssrBCw^p?~LAxu2Wu?A(i!X~|F7vCC4%R=7f`khti@k?k@ zyY)8y9&;kN$l_tDkKD*$d2fZ=Sxl`Up{CT5T%GD)b$n`?ALNDDpzws=Op^iy_Ea>_ z?20JMd^YL~!TQ{VKP{p|$`wQgu&^?=Amo@2h099>Mipm!^+rBhqasOO-+$y9QBmZ| zA0g^Mdkww?{oV2^{Bx=p6U|G`PJAw=-)pG0$R_-C8bj;-+R(dUzQ(A~6T@)KJo91K zzShrvzjd^VCld>xt#P@Epz9w=(dO5tPE(VU%Q^E3wIRDYsjIJ_GbLoYcs+Z>csVpC z{~vWDW#=+~(PSGDrU_^*>E3(?`4}Dg^ZgmI7>Alixc@-geq6Q<2ajETg~(e7POzvo zhrbrA#D|55;0FX+3t03>ov_-X6j^HoD7Ng!!3-bJn5xQK+Oj7qs5@|o8KLOs**Ph$ zf~ghI{flE5arK|sIDc&{xmIO;pytxOp>ec0>a~QAIs@zqDFNX*%AQRz{hf)JbR-zn zV$D<*9R~kOtcd|P47;nS&0Q$a4Jf#g?tAU?EXgD{&t3#UcDQ@(83p32d2u)*1L{sfUpm4?f1ZfxGzVLZgT_8024r5LIwZB{bj5rT<;Pi=H)}FRo5s&ifEE2> zpit*uXmxo1u{$+`8WjAr(qt3IQclp*ZGs#wc)uaDlSN|>`kEg^dS!q50s2y*$n$Gz zdqc+JTYSopA(Y*N20Wmec+;Jv6Crrd5xA)l*Wm>o5enq2uWz2_=u0^}vHO~6bgfZI zLiTb2+brja%giqtXCr{1<}RF4mSYdRPc&wqz^zwg92|S3rp^<6#&KS%wu00+^w)NI z2!B!opyG}bX@6H!>%YXJmWi66nHs~-g`LEi*fJ$r1j@Q3nH-vJr+GVAZbeYYNm~s! z1qxfdIYH&bDE(^v_|PUb3eNNXsiM{mYZ+|`7!rCxtXBaYef1$s#@YALRF*pnNrxNd z2!P+ES>Bzs68~AEu)xGNg<#bWZ%wFDh%#wYV{#nNNV08o*tg}!lo-c@6TUlu%oqF9 z`l5!$^U%C(Rc)3L@TdeY>`+O+(t~Y>DVY6}4zI|kDvHMMOK})ESH&OuV5dtvsdgaK zT%-bI-Y&@Vd44BT`-9vgEV6Min@A|^5P3F6baH}B_!LR6*OH1Vj`T_&lA6n)N{EKALTcZw7u@Hx6%O^r>W+0)Hk~*DGuc!NT%~^ z;|l8%aHJVjo%jdd4aYev@6shy_Z@fAeYs3=CK#BJj%e(J-OE{W_lX4P;rwO1_0pN?a*;y)F>ENk?Y zSo4k?F$UYpj(Q>iE*P=z#4Qh2Le>d;Bt$OC%MP&?7ZB7cOpT<6rSnVTR^R1onO+kL z@?{aZ#kQ3_6PCX;RDhMepW3vn3ym?MG zuTMXg7I}8Bgy>f$$?@T0@UaNf!qWZ-j+r8!h3z zW3Io|YN=>D+nwYsEnov{s6rFv6dCcsu-e4q0+1j=&PS%BK*^ACjDk`Rt`Bg$Uyfz=Nz@VGd1xplbNQ!6%JL??~Od+ zw&s1U5nLb3v#00z_k&Ki4SeuVx0&eO>>aC_8qRCkM(fKNr>K+qz~k3YJJUaB;$QZG zZ)zuFR+_@Md#pABep*F4sBtfQ&^2mg|5&Zv(9d19nl9IUK|hpth(2mgGFs^MD2D(Y zR4B9R^lVRb4?tKkmTzd>Lj?bpUgtP0{RxfiI!C%6;nSl~i(J}IX4UmN%?iX1@AgLaLsmCkp z9N!4JJ#QMck`N8Ae?oSXJeWpD84nJI{@Q(Hyx|-~5VS6^dU*2?iptOWS$Wc-h633O zJ3--6iIO1^NBP?!kiVnsGL%$I?LVP)lW@d+w@KM4PyZiim{H~S3EM3y{rUVa#)xs+ zU}!j)2hIx19((G{ZH_l1cH!iy8upy9cONYVvYfqaLXid^WvF|JWLU9cWnb_563RWT zd`F9+NL`szYf9SG5Z}jG;Ko!mnWJ`pLV7$@6_H0l6nYUmanrcy)O$krV+LXhIj&4H z!%%bWe`A8#BzU4sq)_Y@OWv=B^;z+*PXb&ad8qe`f-S1&V!JSL>b(nbS@nxe{AP5r zGv>rSWf+IQtRSdh$okm0J7GcEeYELKc6p)ne(&>}DTAVPzx&po06sMbV102V#m++Q zi#9YStvAaS&Ery{aPFR+k!i^`ACuI^0C7}Vh7z?@h14j^aYdGk{upN1`4nj*9>9s6 zp1W27o9sI4d16KMTA`)w!|R8E2-8eWim<+l_BwVc+llWD$9IuGQXSS#NQ>Q1eE2WW2*rA8Rc zEWKc?>^9*N?J~PX&F0!X@~?oq?Dtjpu)d~1=ttarVp1rlmgP+n4ceu^CaiFb?RN}H zOp6#s*_%gMC|mpNKpQ0~z}0NQe(VAk&F_k3;BTjIU6Y6;Wzmj88zcxC928fT#oDBd zV)d%_*P%b9?A21NV#sl@-jT(APMEMq$@ss>>z`a~_DKDyBS8(mLFYc>6MqUx+T2iv zX28VD;i3uaI{SF(bi>y=f*%@(9$+Yw6f#Ae-xeQ^DUGf| z1{A{)*5H6QT(Z5RIMwElQ@3VArxF81Dn@XY8t3bO!dI6ZVrWLbUhu$O;wA8T2@2hA zd8JqN zPR-;FHg~L&XaF}E!5j4?RV^WzFWnbw-(e6-rs)lyvLT6+O!8Ruiy^u*G=$F?_INq* z(_6GUL~U$}Ge28#B_PLeFG0be|H7%=wPLA@-?R$k>(03;9|F|&fQ)%RgJ9(i?R?Ph z#)H5^O~kklU2~J|X9ayymkE)sZEgZ7jZC#wGP7oIV{=_gqhEF4*yfkVa_C~bWJq(- zCw&&uD>!A@FM55g^BA8OmHBZ@Iic4jFE-lJ6>n<=0Nw#9{y@_XV7S$&NVYqz7~#hs zwNB6q+s(HfVmv>)EC=DT`bysWS7uyq|AAJdtTlA^reb833x9q$asK*=bwF9X+^=<9 zw{4zr5v)jXB|s4VBta77nFxjmFKxW$80uV3Scn`vy5m?zr4G*R7<^8({#Nk3ExQ~V zZrm3P{OXyTnRm9F5!IW1>=ohU4isRC-Sp6;xfowXaHE$<@yUmH?O79`n8r0d2V23WZLBMUXt3wqP?J$ECGy;6j10y2eh6aV~b_OM|_J48yICZAk-9X}}sA+Ze~Wf&&=cJYHE$)%=kQC1KB@jrfbROAO6< z?{4_P3h)OMjMbF*o9k7SIXb(_-P^RzuH^%Ud680IoA7fnt_WQkns_*>^k9|ABL&id zAQfS8^j_RSbdPp+*UX9CIC&xLv8>pIMoUbkLL8=lnpgzQdtR#u-aIRkeHSA7Z>kqWQkn7Te((S=ra=lL>yicogSA#B5yXLS!ht*N3NYwaXz&0*U zSv67Mt=w}wX3;)=a*o98%-kXppZv5xz?Q=oA5fMLIsT|;+J!luV;&nI?O?&deM;$t zL-!$|+k4#Yu5766bJ??65VgY+&;hx0BvL4+q{2{8`q36{+&2RUK^-=l{y@vAdR&K! zRjEat4~Q!ZD~;`{=liPw?!T03RT|Xu+%_@hhm(S5+5^gdma{yoXR^r$9K9-$-ur>; zPOO`?4R3>m+O0+v^H+iO4FN&~Jo+KLblBNtvCdCi5(bo(RIJBMiJKdt2tfGnz6Ceb z(SpCm>DuC%_mGIlO&~Q1H(#jdv=?+FzA2DI$@=3?L9$HMcoHpiw z9v&TyD=bX5SuhDvk*v!fDYm=dURYhUL`C^_al9HgH`j773!aexy!o^zYh1y<+k%`* zQ&QH6jf{+Q4_|VA%fyx)z3^n_Wp3Hncj?T+vnN4w1114ZyV^H$w+>JQstsabA2hq; z#6OSsp02I7q4}w6yT8(HU^wtqaG32qFlAbWj07I!Lc7 z0s;b3Lq|lU#DJ90AqoOY?;urr2>~ggLqvM-z4zWrsPVjWtv#2SkFLh8Y5FYU)dOZSDuA2dpV zSMhcmF*eIxKCl#k;pP*-B=-+>j$iNp|4XIAFu4>lSmrsCuu83-VBbx#=H!ZE?x2aR zSpzM(*e|CT&1CfLx!qGK?rmvwT5t;Bt6?Z%d3-At7ePQMe2^EFKxn}e4pZbDw_Hbu z?TcaLhWYFwaK;1DP9+rAbI5s2Meq2FEG|K!mFY&RW0MU~rEmFVL%DZb3xu?JFPqk^ z$l%hPfK^QSqHa7@&OPZ*f zN$*nKaoXE~LS4}lVl_I?>MrdJ;@Xe-&-vO_-__V=Kc>;#K1-%&e9JeX9ieyHz*Dp_ zA}kj8&SiiNPe)}zLGjoL9>b&-1}@n@yO*4tJ<42S!~2mtF_!ri1Z{?9>nlLRhmwo>L;v&U&8HP>uw=i#H# zI_srpL)ko{o9BzCxuX1WB=t^+my~i-W-|AX_6uKEMz1}{;6I9|XTSeCR=_xmd7#*l zYI!tDUvm`(+256Rpw+)yX$(RHLJgO6yy8S% zhu2d-2+~|_OC`XN>=c^5GyZpyd;dyT+pFFHVCTU=!~z!G_lB(MVyZxipTVXW<^L zdtjz*9`#d+emqcwI;$JuK7CVSM{eVcsMjE85h9)=)Nw6f?Kb5*Z}qDsmfRrMDPu<4 zf$AGV+Dg6%T9~u5u3_Vx6Mn4qg5kjAf*mPwh-Xh(w5xvX@Y)7De&>xC*aaGrBjlUc z-63y%a4*pGqX`e+IC}T#OVgac^nr#COlJfZ9`hFn$SI}Q>q)p#bbFC0d2jV|yfBl! z-@u4Ol&NsSpkkqiadRjOneOiST!7HLU-xTy=`%w1c?R+K42($5iMG`ia17pu^@?a3LNpB=R;cX74|WH z?xED?cYrLEUOwD;E4Pay~c zvjoygbUyLXl8Th%*2e3y3uV;kPWcuJr`sIZrdtGe~3 zugb;iJRENxtLBF@URu(IM>=ajTPV(-qpYFekIU*$4o`?;YET(8GE4TY3;qpr{dX+( zKPbcwW`QGv0P1Q$mCq0?d91+U9O3R$JGnu;5C6D6h4%3Haq`PakG_uU=&GYE7M%ff zyO~4KKxM!dfu(eRGJ@VpTWVrSg@I*cpSILqBo3B#>hy#i?FHdEx(nwx3{MzTeJZ`~ zIwKqNgf@EKw!wx7h$bV2l3apydjYU!jY5 zeEnPPnO>z&#vX?Jp9!gWxw)sNjANAXz{$(*i3@ZsvWn8S<=_t)19ncZs}c>=ak@Dn z(quzSslshfmL zeB)L#mFU>Q7pPUa%=HSH`;aCtk)(PuU6!m}>rVn0 z=d1K8Zr~@r8w#R+!t}R3l|IZ#rLnt7L-%PmDJ#n-Pii6*6<_WA4HO*5eAU3K^qKDn zlkeqFmcfl=mZuVL)WcQu+9x&d|03Y}JAdcCFHz}l{+0>|jPsjnRxakZWhl0_L+~L? z9K7QE(V><>v!GJ(w~ApX2wUCc4l9Iye^wCCC0pry{5^TCd**L-7(Q0HL`&S=$VbU7 zLWu(#qgqv6r<-5VeIfKIu}q9;2tpzFU3Aut`svK?p_0ogh{z-|QkG)^iW4+NJMXob zr{mFbZ*YM2EK%uQhLD7N`?<>X9E=7+ zTXDq$Gv>_|=LBDkpCs_QU6R|JJRd=2gJ70FD6ndMG?}4Tf4NLT7bD8b_P$?C!^(QP z{h39K@NUuaOyElEIt^_{x&Zh>upI8EC5W=Ywwl>pC$VEEf#PbzO}b}D?&lYrAf6SL zGB(oSK7_tBhi-{72Wk+uGuVA{7YdNN?uy8>#Yfe%Rx>3^`A$d{NzDb{iEZUYBt|y~ z#E;4{hjaxw51|rPv^PR$+G=QQpx(;df^ns|_PFDV47fe{@pv`NOc2E0aOl*)d^TI0 zo8eOcHacC#qmk+6>xBgjy?lNmYo+4vOkz7yNZ%Nhxb>mr{cKlFzWQ^A2^{EGLoNz@ z_p$?quK1LA&9p zL7(8k56bq{LEe&+Ojz=xlnO2^+a91YQ3>#_Z*)HUCl*+sh3bd)WCZTa^Q=R+Y?5=V znd-;1E0Q4qHwWya>cMbo*2Gwk!e;*TV35GA#tM-Uw#1-1hbut32Nuy~+#aSLdUOwu zAd}j1>>23j3&41Xl==fRw~NivP2L@UWwbVDSNG!CUbf~xM+%vT4s{pm1d>-+rmpcl zo=z^^#V_laGb)0bd^ui7-xfhQg24pm>f=&P^DjS^jF-<)B@_3LkF_^yW#R-UA*8KE zaNlS279|pBPnxH*iH>SMU^w*tbdMz|@z6m?@9kK?>z7zRM=8-5%DGG8|Wh?qR4kN zocrSWdmh~8kObAR?yC3Ds;|B*S$~t({?RgP{7Jtaq^}u1xqOWEU;6qDM8%v1$V48i zY|?xmKv>`>{veyG->*+eE5$}|P38gFR-7{Sh<+=tJwf7c`l-*m4Xmfe-Q5_z8qfzq zXb&8s^2|6dP6~xAgyFsv*4vKrT-T@l!??jfQ4dE;BSL8Pfk;EoWZTR-8Jdr-g_)~J_qD=or5w$X@{)05P?3PjOQYYUw;t;(yHiy7mL&C0}Kp@Z+EO?aW2XCcNYmjU1?A=<*dK*VwjHph~T}_{F@q4nol>3sN z{N;g*sNea*`taebxXJ`ysrk1Pf6xpe(yQB)7c$~YdxAt;MK!ijx-Z}>CzX_iv=|(Ax z@>mOGxEz=eKPI<40t$}yH53OlyLCOa>WCZD-D#L{tUrU2Dp=M^+IGPGW_47Z;ZTXQ zo+jo}b8aoiz;<0M=)^Nm4;vxV*eK_gI;5AI$TgDNqm5JfT-$@)hP0Wdp)m~GA+oy) zSIz4Y9DSL9`3AZpX(G|6;FLaOaSLZX0@WlHu{#4E-D{dj_r8ow*823}c7XfJZLbSU zIO^T|pyPa~zGI+y2! z_m86cg9v^r0F|{?6bqr1Rju}c$7jlic9s&ys)K<2UA3;4a4ZVE=0iWbkAGODF*%jbPC z?V2eGTEBM0h?l<8mc6*Z6Oduw-8^TiL8Yu*)dzk}bg0E7nmqtq{ zKoYNcLWNfiuh6h(NNWU^SBZ1v&F+YKDVFYXty=Ah;9qX4S=MWl;A~$zaJYeE>Zd63 zHW1Hs;ERx1)0bM$w}BRuKCbu0q%1g-1J7M}6iCGu#EppCkfD|t&?CwS3Q(OWQ(CafT*_4T-MZ`gz+)ZMy?g>{`y=p@xa1|t!1bA)m(`(z=V_HsL^Mej_{7||swEbH zakXvPK7_Ci>zN2V+U2f6GcAlP>Qb7EHCbH#@^&-LE^jj~7)Ju&Q{olC%r_|f&aXm~uYmX(u z`5$*4Ko6_56|>H!2|GvwPHh_Fx^1UoKP<*b5AalID5`QFjQW<(UT%Cn_KDzJgZS#_ z%_%9;NV-lHZrYQ`b!@S391E0QiB& z&NtA_gTf8#g9L)A!k8=XVJNxjo>4`x?h_+t#9OrY#q-XK2@#fD_;Oa6lNRUjA}*e-;#O9}Sv;=ZW&a0ue9$|QRZ(p-%e8m;9}>oRTWNx-BEnu{IUiiA zz-OKHM^fx$WF+tI4BD$A7;_$&UW`1LzEkS*L9Vr_mQv!&Yuu7uVSaH^u&9H>ORJ~J zzIT5Qc>rl#wFP9w6|hw%fUNjc!)&U>Q@`V=GiRW8<`Y#<)HcJ45B&$)?*Iu+K;Y37 zE3HV6tu*>Yi`-&xRkr7&!5shzR?`Bu9wmD3g5C`u0E)oI0-d$_@TKz=lh@%*Svl}Q z|Hq@k&@Wjgb=8Ezr^26yK*ecnz1=Cy{n`YLT3&8`&1IXeew@0fL}LFmGGf4pPAy zN;1u3@**TY!qS_$qG)?$S%O8NjPK{f%WZ_$HTIQp;pXO4X(gkYqE$*}^QC3n8}t=y zC=CY!5Gi3sR&KhlN`1el65+t;CfZOOMuK310En)SFs*5g8id4Re&3kJIgYzw^}uaH zJ1C?ma$0UKFV8tvb1QsgO;s8oD!9qp$gawiJN%55{B-~(KFK$%H=q#-Bh_Gs!64$r z-AQ7`O{k@}Bs*7V=><#KiA84Sfy~xq;cfZV&*K;2`P2d* zX2&j%R|?qhc);l`hLS%AXiFEDBVHBXl0ycn65)7lF;a?#ivI_lH*+UDG5?-s2FS;-(GeNq5fQZT;Rt1$e^ z5S4)8wL=Q37G372HOhMjLGgh6#0w6YtxtW5tphAra-G`ScO{#-n!HCHjHF4`-ve&_ zI}zYtA18ia&KQ{(Z!|Ktb`scRy&MA`;kD0fZk*QxfA8gU2cV4TF4Pqv`=i29ITA}D z>W_~pPgnQZ(Pte|hsH+?OJ;C+jYIOza)1D}A8Mw$UuzeZ6f&2hr zsg+T{lRVFR)Qx2P+*NcUTzvFdSN8h4Fm=RzfH<_H^QXOK5vR(@g6}IOZPPfKipi;^ zc$;=EC(OlsZ#Ce$ zYLWK}^M^%;l3N$bvSL@9 zY0R!?6m(d3;#1SHB&5YJ7od1yYEBousV_~I(g-$B$Ltu+NkSzS#ZwL~6bUa+<@XUh zbt_Q#m~<_`KS#1ZlW|R@W-7sFAonmx+VI*ExsI;Vv+G`UZylhlHE$c;)iu+|!s^sv zWSVDln{I+*2~M`QFz&&JrF85;`YuD!$e^JS;&YVLo{{4ffwE8Raw;Y-G&`6gweI9R z&+q4y=-WPvyO;Vd>y^O9gVA0Z=i>uXjao{bt3p-CZaW8k(eUhci;tbmK2}JW0wR{y z1UXe{#ME|Y+z=D{FzTUl%i{}~M*8Pj9%)<*PN8IG-c3s`$kK#ywD&f{pL~>SeGNcd zyx&O+{)(0XxA=e7b8$>h1TU5C$C{RSVDa(4Z@T!UIr6)vi`>)NvyQw&lcReU0Ja{L z!d5(!VSX;ZhT$fCn$0!GJC0(KDa%so{(uPJq+fsdNaDn`wdu!`jqFbC_N3qiyL_^{ zY!CL)N>PRgvCADQL$e#M*R92NAD1@>T4ZI*(2ZJy^^7PssKd{_5o)Z`s@f`w60{!) z=v%e5Hnzh&ExLAj5!TB4Rj(R<2UqCgx!y3~7tR|*Eay%r^yV7?3NJzHfaf^AfigTf zf_wutW_vhlJ}MsIxbY1HO+Z>R?k4$Q{1!m88YRXQ6ek&!!2azJ`XF^^&b8VxqXvNz zT3X?g54^1MI9c1Ojyj7DktM!LG8bHffl=;d+y7w*Fr=cHkLm$V77kVl2pDWt990$E zWKPsj)xJI0mA?B1BYo^fyNP^B&nc@j((J6gwfgl-K7(#F@VZ$m0Y9h?4BD~rBz?ZD z>5l^frQHV!R>Y&Lnk6$6cEth(2&qajlM?Hu5!=bcG)om71xg*Ib<9%vW> zf_uLz$O5;d{GCVS`&`HSQQtuK`M;MZc&+o>xZuaRnd(k2xQ|-Ddq!52D=+`|WrrWD z64g0-f!_xJ8b5RStP|e_5qX32Bv~x#iCnrVi!?D|`Hjn1{IcG)qQ?va=b)V0skm#C z>>f`R*($uL0||lt$ta~Q*8?i)>d+Cds{VHmP0TpiG3;;0ggQGh;TwzH<$%^Uk|{8c zV^VjmWU#gUY!;S|fe-F=*edwW){2-^2SHO;9JY0G`^&&@a66gLrCkImam!ksng5yJ z*S|_ebe$7uw76F_>9RSGJh?I=I+@jKkwA#;>ETzfD$a3mj_Yoor9#8+Yr8ab~vJzchJ)PaF zD+!3n$#Nka<)QX>Z}(YE2U8kH-X$4)^_lESsmw`7SH0YF!|Xr_@>#jBOGats?rf-H zSL3|b4j2S&IvJarJMPbByF0_G#7g4K#Wat7RMw2RN2gER=?m8PUSc?VKU}0tHzl(^ zT^Qnj{qRKd1>Kb^Q*y3c*6;;%7^4)jefrW6CoA7-*U#0s39xoQ<9?ytxpxB=@J_vR;6Ur^Tf-Qd4;nSIv{o|p0~ zyU7L+*!~f_NjCb!p3FA6&xT!E*^RHWX68>^wk}*+4q0B+{EprvZ*buVC8ySw0hD$j z8?s&>!UF8OJ0=`0W_Ar?)u4}MOnT{+*+?0AH$+PimQ$Wx&of6Hc@H|wIByN~Gy5n& z`SqbZUMsnmuoTzuRug^+U#)YXCUok1qb67x`JA5ow%Vj(znu15X$y=4_*xE0O-5ZX zf+mue+X=+{X*I!R$6{`fT%@Y#7}oG1OGgTJy>7Q+H27_}|7k(~*;S~D8vB#fzSP`7 zeSoQI#2K&83S8Ey<7g5(`nb5NLg77My!W^&No@S}=Y_P5LKpWMB_mY+#7z%&tLLM7 z%i$jTYmyL&MK056-PY@l`78NySI18b6iEkSJ$)YL26$6aj>x8q_8m)lr*=&RUZK5T zk2}g(7DIe_sHOHy$3v-}h7NDZ4U&``o)1fn37c$&2gd^u2by>ZF++QPRl&UN{Oi(S z&h<1cZlTQ<-`sMtFWWDiS10P~U%5AQy}#TvMpL{#W+P$@7)lia>Bmwb;{59agQvma za}gnp6NhvP72*~({KjN~S=I*`L2J|Tj7$nfB{qZ;(X)dC$By>LgX-mF`2#u=R~kt4 zzC!x}>DHpVc8sT~Z$!Mwy1I45pfAgGd%K;SoVZH!Afj52p1#k>+2=LH9J`}aM7YrnA6C>=q}{_^8vnJ7o%uSz z(Jp(+wrd&`4JQMHF&b;8K2B|6gVL_5-aCtR=h9TOfICVRdNIIh@)!B^4)4r3Erh`Q zE&J8f{l$xCa!bGRj0v&;1WZBBYb<#TtA~Ci?Nt-4h>GR0g@fLSMJ|_Vb56^36m{0X z!c4>RMv~c0;*QlwlXqpVWOIbCFJH*GQv@#tn8lWK`r(`Nd4R|>(wR;8xd~r*kNs%t zMm{HWGpjAb>xBaJdAHTU@X8>GvIqs-vHID$eqDQ>w?XCBF?JKATsiG7IGEvSH?9+& z+kv;Kn3`{SZmir})@LHcmg!*7Q?y11 zx1fZw#WY>Nuc5jtf+O+BJWW+J&rM4R3K|(UOBrFK0Z!roRM{zPCO#xgCB1L zKJ-4Bp-g|SRi*TdC}pveh+7-a1JrB#BH||7)mwW7sTMHs}0^Ug2Y+;cD%R zA3MMf1cEd)+X)dvDu!${CT6+ndwcpZ>%-{E-3VX!UhEp+yD?C81PEeyBa`NDYh>qipT8*vJ^06x4cU zE*CT!f+=fm-p}S;3UpO1UlV*lzb4b3?1(upXmvHt|5;+=NHI$egQf~JH+z)hy(+{w z+_T2W6_3kngX^?uNMN{!_T5V<=7)_!%!6qhot~+Ed1)2>{F34ey(3?xea}&h=-N;3 z($Ktd**nG6kcRHBESIz+plc;Adp!rbjW-6r zwH{}z#QTZm09AMV?HSAQb za`^ep#tgW@=bZ`57SVUgE!gCYA@ZjknC_*B!IoOs3k=W8(z5>bjNV*6Mfo1;%mn86 ziLrMCsiyN0K}!}_lnDxS&2J2rm|I^8`#hTcP|Y^^m8Nm&A}AqxaVOW?w{!KnLL?Q} zGOU7JREL953VW?0#MBypr6rFu?<|`_*R`TBks}J;R@P|UrGDT}R+%s8M~5H{9-lRr#c%~!ALcOX!X-$fAQL9MJI6wLDN}$LJS^_PkA03G;_Xu_fUZM zGaw!EDJ)+W*Tb1!r(#B&%vI`>zByF*jS@Kt&Ug{x5-#nLicYwuGOi?v?jE<>P(|=(n~EjSR-^hkb6>$COFXvUMRCW% z;1f`MeLTs#Hwlxx>@Ft}tCKwRe-W415$W5KrrlQ}sLXW6Yr@O{GzH`||7<({t6HkH z+w@*Jl}eG%H)b}**O_>0nR2RiFR`lcdJe?^(_vnw#@|2>Hm?DqnhxJU#`-9-RV6Rh zz+GRfc3Mv2NFNze` z!loJc_y6wx2Ey-X#ClEuJ&<2Eu?-a#EXV2eN91Fk!?guMiHDl@l6!R9L|9z-0U!Kp zVLkx-V&22IM8ARZb4Pq7vV5@8n41T$*8-WbWbr{~xRn6>^>*?sfPO1y4Zb8kLw^c* ziZw`aa!@t^2Zy*diN}dD0CcuLd@CSo`R#XD&rDf~hR+$=5dHO$|9F*=>_1)Q7C>gh zr`JkDCMoMZeRv4x*o}SMVt0?E{BAqMM-obyGD#y_FrwhZkr{h*sY+74H@TmkZbS)h zp85niGvjRsY6aE=Hzo@6lWw97)K6R%o?RHZe(7n8Ewz!wuF zudq$H+%X1rNn7nUXo6Z^8#TzgdSyFo9ZLoPPme$M4U*gEH@OIK*KX4c_H;*HxK8Jw zB$364g`me=$Db5elW3-5Rr_7j&3QLawc9CJ5?WowS#b9pi%2JQmunvAIwu?6x3rp;Q`myrp5g2R)X3?# z2_;JzzKZtiRfC=CFc^LvQNgIC5QPH1@vfK~&9Skf&!A@akiFQF*|Xd)TEn zA_*x+F}wHl#LQI=`ksaPt)X;jhux^IBi6E|briPQbWg>4Hm0dA@8*lHQAN+wo*J%;SP zKef-Rf4Aj&DJsf@~=0w=f82E3Z@0tZAcd!S^I;-$3kX)gzO~M3P?4`X(z9biMpf zQ1t~zK3JWMmdWVV!$EL^zoCgYLu6G-S26aWDrrl8m#K zuj{so7lklp?aF9}zwI;%TY;FoD@WakSu27&9Noy@&Pa0VJqESgKpj$ zpI0VniS;HNpakkX(S6l)=H-zkE(o}}#X$*q8kGsEeCS>FMO=zK6%E&`Gk$)nqE}fL zWgyzH*))mZjHOK*+9X z1U07=T+h%H^iyD(h+G%G9f z3wCqt%20Pp&do`&TNjJltfd4YZO_z*PW?8desRc#f9Q}Qasx2G)yX?2UoqvcMCEoD z%1c6Qm*3foov|ML>q@kxN9&8h7>t-Oz2eP3DzKQ_O7BdKHMr zAS-n?^}>Y4*)(oo@AcuMF$*oEa1s{q+0gSqw%+3K+lwqXg0fro`Tx1PV0f>W5qJ z=T!eKL12Ib{^!5t4Stg*EbdME#8}26hb;iarK-O^#NNv?x8xJM7hZ!n#h|{Hvr=J> zp}~>h1bJ1j(TH*)e}AQ4;uD7a#bq!G(%MSRCDN!6s~Y>7?h$VzwecWli_q~v*SlwB z&vSg`m|hsQy`Czpo#%t4V$Ubbb9cFcVvG?+t)3L_-Pqt5y z+uvNwgTG$=$Ak6P%KRI9#(Q(R?%HrWM=mP2S>)~uIW9R(hWszM`?;R~#+k~Te!ZNr zHjBA!DnsHIfVB1QI=ya`vur&cZ)+fj$99_ugJ1i(X}Jr_pYlWK6+7KJ+R8=T4s}UW zPH)V-&iGo9-BQC_$;Q9cv#G(hryTi{C)kZ8~kLyt`a!&8qh&D)4z6xPrpf&6?Afh)CKKf?o zR;17S^esMW>a`VEkTdlc{Z&O0#3t~4e zixFcW1qDTcUmGSmXeZQ(+2u;-)>YmnwiNsujtzNBo)mjIZOKW&1ZaaF^*_@~`UxmsKfHo!8i{Gc{D)zFc@Cu`c<2EDtpo>Bx6; znVWPpz$p5dOSled?r^$s2R0_A8T99IvA?QZrR9XmL%%p(Yo&n(=~b}Iu4=hydY)X_ zc;q5vFZ*BbbGUwk?)|sy`QO5af4#x)nuY(-CGzMOBZp?J#F(TyHxw^Bt_5~FJ8%k| zje$flmjGGR?;z+tut-8$=AqvsY*&}1T^^@#VP^uRSlfDsSjj8n3%MfO6XoMSGq2rX z9B=hyl#)nBw}a^os=I?ZG2Q%Dr#Sx&kqxTz?|~C-bq8$-&bRf#+G0tPt&+eMsV!^+ykxE>ka_^mL(>?0R9;8 z4e1fG^74r{1*FUrd&W9C|2lBgFH0mt6vHideDkTV!{aJxqvd}+VAs8&@-H<8=>8j@ zb3iRgKBaOdcK1=Kj82gapj7(Xsy(1*@I&(Y@4QFf-vOhC|I;Sm1*Dp&cP~0r2$pwE z*^Q@;`>aA(BI8Q$(ZFc}PapK0NR-zm-@b{2S}V-zro&okzz|lbaaj&(o;CFFcpX_RrmpG__r*nlToI;l2ROONA?Aa+BnqTy*rlR>fj-A!rQikORZmrqa?|3*<3sHazdJqZSYmq!Ph zAwycD!?hJH9!vn0Vth9Dy(3Mf0EFJ8qhFoDcE+O^4qt~}58KNohB!-mBeL|&x%DR}EO=83*`KYC<>wgFydYp% zgA~N$M@Zn-xS(?-cS^+2q(B>v+e{C#)8r_%@#t8il^$=oIB4++(&@F*lPFvVNu|UD zi;Q=*-$1PgX>$?ZK(y$4ks7Puo&a&tR^?iF(-gy2QA}J5pKjhYv75W%Y(34Y^ zhT#e|M0WgHm)Wn-Xx@LKdo8K*TC8v-=I)K!Do?U6$G)mxr+chIC2gqEF0EUA*;Ul7 z72*6?&Nz7bK>Q092R-ecVYRHeCy67ASXSWZK;%5U5bRA|K-owW`e1X}H|Wix)nVm1%JCE6|F)+}L0V!f00;AHs6*PR&VsoT#M3F<}Wl=2!L`4d?a4w|1>D z49Q1aJ3Lu_Nzywl@@J8G8#*x}Sd@f+h=|Sy)h7Y-TcpRvWD8X9RP%jv4@{jK=o#A`7JEDIIPl7em5 zmp3;J>=`&=79&;QCTttDx3buf(>R%8kX19~f#Ffcv8*Q6HG`bqdlOddRdmE})_tFd zJ?d46U>BkCf^tte>7g}gCek6-VwHvB_Q+B)wv6#)&{`TMLScfV6wi{j6nj*X)vSzr z(x~~jV;ruT`0>v-qo(11Z1mn=Vv)Z#*k4E4`1>yC-`bzR*sWc~URVW(?ui1pytII` z3)w%ghZCG6)R;oGnwebujsd5Y?1icv+s$NOR`1`NocXhBrYkZ0xd@6~@GAs{Nr@8P zP>|fWS#vilJ-}7M@d3#vJ!S7jBFxmB{}I2Zk~ zSDD!CM8tRdw2FSu<8DdGU zpIUd9EDTW9MvZ?MPgy0uFEnUr9?&YoNn{P=L(zD4@8ys8WAmkmrfy&})nOXL^T%vhgML zq>-I{e4H|p7eX|?fj*;F>5p(%18xU~jB~;b;AwO%{?&M+{qEV*`~Dh|Gpgt~fQcH{ zst+K7Gd@wSJ22edPnEXwJVZ`j!~$KmtTv84EdqFM<_<5<>O!`$l4)Go8gQbjrCy?S zc9EVG>u(?u$hNaPvx4+X-9taw^2Vsl2VujbMrYPKEX%jOWi-{sm86YXZGGx;V7*AC z1X<#ZkHhV?z)bS zWfEb=^C<2FB7&;^3f~gF_A#sJcD41bED^Wb=Z=Gi)P3+oEBW1(yzIV%)+so76b9Y7 zDQf-=R3tSl`2o5(k2SRJRZ$Fl6v-bdK;X}tBSNf{_ds+dki+BGX@8G%0JE?QFimJY zacKA$-5lC(Ya#b6BEKu`Z~2-6*rl8S2fnnI%mDA=$AH3*LZ+QS|8QbqNo+*aacW{+9yX7IbcqKgqvY6 zV^EOGll{U+cGI$guzX*spuxm}j|W7gL-5Ap6r45VLsLDTQ~2?o3zh02GiDiU2G^;R zIxI@e(SdshZHYLuMFZoeK^ld#s$HfaSm@XW{oql5tQEyl3GUU`U4odG4<%JXj(Z&C zoAuf9IN-M$d1?h)@7h%NVBn(Eed;$bktN0g7+~uy$9iKm#BGau3q_5Ym>eqrg7n7# zVaPx00RY!7X;vgR=IDT^Tg%^Xc&qd)9|C)6RaHN#cOPd5#pX_{Ox=j_Il>@EY6tgZ ztMOc(PiUrDwOI;{NtQ#hBOZ51w4{&In_`voCSE^h>4?J?KG5GcY(ux(odmS7F+UjS z1s9uyxFVXAhNs1@xyhm+nq+rd;^IuoMV1D<%vggRo&dYe^Zw5UnSbZ{g~V-Wc$oPI zH>>(~zYoEIrL^#1NiT@`VE&4TE4lUA@KXVCBI`pM-+~re`Oi3yYj8_*jcZ?x7$D$D zkCpnYM8*=9T;N;-4sEZz^ub@kuJ=AzeXrmIi)`5K7IhkLF z$Ozt}>u=Q>ZJ&vkhKcswP5(N%)Frr`xJeJtYLc4YHL&erJ1*E2+SdB;AX^<3SG=?9 zuCP56E7P0H51Gzg$?|~D4qWKrUxRlJ_LTdwWFj^CA}0Kj*8;eQ9J_p2`>S+UCV$}! zVfgf`{racdbUF6dxyvse8EQVmd8y|ZD}(Addw7GP$P~WQu6MTsj{mA9%~>6l#aN{u zV8QlLg4f#ytkj!lMST|j<@5-vo(S0nZ1b@nHH1{0>g8DN=@}b5TiuX0Wu*t;)B?an z^QZ9IN+9isJ+(hoO30jmh(Nui+t+wLuT&IF1~25Jrz!iaoci`#ebfeoE&-!XL_K-h zhhUk)8eRSfpY10OSf8MtAk&AEq?=14NLc#`~j9yTMQ#8JA5NA^^uYy0V z0PoIW!zbD5ofD6nk$XcyFQ^mg^LEL4Z?jvFk+KH9AvmX$GWu1r*5hau7?Ro|?_24^ z@@t-^Y~8+_v7&xB z-r*~x!zbxOik3uwzqoACWueL>D|cGPeV0EAybOl7N-artqSF*}3>D!HXMP!i9h2MJ zEX!0F`o(448#lO@2h5@8r%7QOjb;3c>Q>}}@{;Ed6CF;gGEVlt5hjl*ek$R-?ga+0|BI!Hy5i18Dye7CJQ~ev&`8gt=Pe zE=%7Ll<$~(a^{`{ec_Y+r6L**1SI>K)xu1>XJl>gA_lJMhpKy;hAssn+Ki8bD+4oA z5IkRUT+n1o15TH=O^i;t1CCB*>5=L|HIbo0VNWY6s=~@bAL1TjQR$IX5&?M8x}&4n z0DAB+Me^fZ4_q@n@2=n>R~5jM&K#J%PLX*$p}e7K_yGtf@aT~6hKOW&^}Wcsb;H)6 zMeUI=i%j&kz(5^Nxy~?Ghg)HTBnd=iA1bS3OBh7#Jt+h!lab`wO-wWFwY#D(Ub)a- z-#wom6$UuqX>3otKKD~O_&xAR9(pPey_VUi2i2AX!U_A6^^*2tn-K@kiQ@|HKsj9!# z1peYgtx6Y7Yx+HXZBq`5MJ6~9BLMWdyP985!EvHzX{+Pz5*dnF&jm0hhmt#|fswvR z>5v;SIZki4Z=P%m5}@2G%=^3^XVr|~i+ml<)XMY=PlEO>MLRiCM0Plj>=sr#pO9`kNh=l51Ew+3F^i=1YB$xeDfGKy`Q>L&+;ED)g5`F1I_$PopNF2qyt!8*yYHbM)cl zrp#5ELx)PLBVfdm$Rt2l-EZITjL|gDmrr&%Q~Jw=!dEM?_)0Q1r#~K?hcw?&q*$<+a;@b$!fV#j@82IMz+zXA2OrI5t|&9Dz^n z0~tb&D^GIbk&o0b|KK|WKa$)f3c?;+Z6^;|?wmv&1s-m6olG8)OQQBBk1y-MCxO0f zDybZE6G2*8_;Srs}sEajdi&hjz8`+Mm$xp04Uw8Ygj`qD_~9MfFm2n{Mab=y+^g zSn{^%=tUqCj`Fnn9^Os%W0Un%JU6Qb*ZhSx*?K5 zP)ke>jv?1w=0eY$)LT)T`SqCVE)%E2zgnf!mtCDp+^CamC};BJJIT*JyL5Mdw9^MG z3%u4BVYf)0O%3>zw#* zW28fslN(TCpHqUIDAaRc?1aE)%Y&!p?xX*Uz4wl4a_iQ`v7(@$B1NhS0um9VNGAf) z1q6gZ=mOFNq<5kyy-AlE={5AwA=0Jy-g_@WN`NHX*L}Wy&*%Q?+2fphf8#gqACQdX z&0E%*^O@~gbIlJ7$QLtcf=}7jSW`D{0!nFvg>{Oiiy|}62*S?@_E`813n-JKso$Lx zoe|7uiw-Q19`6mnb^zgRhR91YXzH~wRGtxhsY^K{SlPrM;H7yF;gq4rHUpu3p+I0J zJs}JgT#J@o1D?xRhyAvP0gMU8@PlD69Z+bKqk=twf-Memj_JO;)<-6Kc(z>aY4aFH z9Cw3F&$f;%UaD&SVcrW>REb^yPB!|ot@>gA@g53S`Xe_GJX26Z-Gp6g@rb!LdgN3BId#5;LPQ(=if`-lXUHisH>&0{9iev)eUeRNr__0bZt z;?Ox(YyTVKis-#-q@`wWt#XeJ(btMQ6PQeVP#M}UuM;(I$-z|$d~dkdsYS!;K_He zC?)M{Go)6xmx(d(@#i3Hi5jVou*B;zeKIu(QU(6OMHbu#Vz_=8EU!_pkDeWgapiuA}zf9zz7@gPO>ZGTlb47U13 zLO5e8{W3fcrXIU`8v?pk_b|*!%qd-m<0z~c{ESVn6jLiWy0+&*6^e_ioF+#IEF2{U zaw_PJ0d>K1s3p7fl*(V{F?0)di(`8C1fDh@Gi1_NfV~O>Vyx1K2SKRatFW;CKJDHS z-no&+5YLFv-E{oa2y9ggc8Ui#gyy{315_jCIP_F1)C@Mu1vEHJOQ}R{bvf_vF5tOC zchm5r=sTx%3>P-|Q`e(}emgbHv~LxVsP`RMUyJeQXzhGt`t|Z6SYD8QtXq&1xMw3^ zu%Fk$<8`x@KEDW4P?B=(btAxKbWX3^_PugO;OYRtQga-`O*2TzhVt6(w=11}Qm0|H z?H}Qufa*q3s?$b2XxsPLTRQ5S~(*i*bdRph)h;g02~Grc}Y9HF5X-l53dmOzFnVDs1=V^nO)sPVAUBC1oM zsAsd;eFAqQbUyJGxHjuKZ0Zu~b575K!gWFIY9N9qgDFF0Q_XB}U4p%C#~?hpF*{=Z zi~w>`7QcQ@aThD3Ppn;M>>=x807}Z{@SUKp>%fmj3-BG%hdq99oSttP(6E;u z=z$3!68&`CcpPAILf4Vg2TOa(ddXcLsOI1!dp96`;HnRcP+q~*qLu~}lyVvHYyeE2 zG^}00gO?dpVxR-3Bda0Sc+lmoi~`(&>>}nH;4XH@fiDAB!lu9L1qMS zS0NUgcuw%%X+~&03KJ*V1w899egL%teAhJc6B%J043&g~lvperf3e%*JNiA~Un6+% zgl7c)sU|^`c&Hfyu+iSYgC4-QrMD^na;TlIQxF+mr*s6Xgb5Ixm>=3m*F>EbFB~I( z9vOuVI|c0RpmBfRZ(xfnnP)vC7{<6coko!2YhQ0rXC&j2O>#v8?Mx3dNfyKNKJBwy zpp(H`pt7vCQfo@%#+AP;i3ekbBa2=wfT}E(JgN?!G#MY{sM@H-uji*$4Oti+2`r+K z`OPk+8<-&<2^W1Y5xcl(FpX*GQARZQCl zWiJByn&dz?%RwVKzU2JU!z3>`KAU97Nu28C;9GS+GVaG<%Vm$zk8Qm6A z`#F|JR=d_gKfN)o>BnymK&N&SZxcwr6M>7|B`kq0OeswXA%QCl9SjJDSMeCA zXE5PmFfh)yxI**vmh(g60#?4FGlD^u;hL5c{Te;{UFH+!RUXr#J#H620mAZ~``96G z#W$BUfZaVf+mQ1B=F5XOZf8v?5)}VExAu2lNlcpb_MSMq;j6*;HXU9&(fdFXi@?U2 zgRwJ$L%FrwjPl*=6L41omEKVRJ{M^BDkgaw@?`p7Wt0EmJ+u(oI6pzu!s6aa z&#G1-UN1I30KX?9+Td2{xhbPXpIL}iA`hOU5g=2Hcv-Q$eB7C*bmRt8#f+$tPBp02 z$qxa(fiPBYC#UgRndg2jrBZzPygiSyq;1mszZ-BUciCL8+D0(t&AN_UxG72+_s2kJ9&LES!AAt^&eg ztri~-zd2<$^jZi0{ft0Pw}E&*s}2JD>S3E#N5pkJ=4rhzXzn9lZHgM!eh%N^C+`9B z2;?R#e0(E6l1`VX@M^-I#JvNET-0fQ`96=W!SE03K3F`dMNsr$|3S?0^Ii{YxzHVR z_4g+(?J&eMJWHQyek$o)1lIfNTrdT=I#V1r-G+aYsX!*DRGl&I;T{F<|INi_*p)tOJK9E_cj+U{{Qr!9=YZh83Ncv-c)es&1k@}GtD24T#@>K$&$?}r&(lU8x14;kO z>r!cX6T9>3+=L)F5<`r1P9&On124nPdK zS_AhP&j>gc(80i*B?v>3;m3X~xZRmKD4}AJ$lI+RT(RD6Mj}X25LQiLIQEkSw!Q&8h)?m7K+~BZFhWyM^o#4-urGd;1EUHn;sG39 z&JOVDE^1D4cPbqN`>aJb%>alb7b!-zRPW?U;|Vb4$|<48e$7BEcy@f?4@zWb<}OFw zLr+`lUB;N^BUY{|x?cJczG$%Wt2gM3WS}*}*4z6GTa&)6vsZWvD7{KGx^*AE5iOQ; z!4^SY?=FXuP7WY?PB*HLmqRVQLV8gL>kN=C7W6B5U@R74sv!~0RSEDPw~KwpS?T<+ zgQ3G;@dHUaxHpL09{60@M^kXf&YEr@U1A=19g(bb^pYqkv_K`aUUA0hnD9{XKCBr5 z!x966Q9kQZ>gbq1F(EcwKr{S?5&Rh;WXD7Gw2oMfF;!+3-Wjl!m6asY;f&|N_929m zCDepmrNHqD3Eu~?FP`08y~#>Ughkw3YH#GlaKxv?L2*3?;>2Y_!4HJc0ch>Np2@SZ(8dW&{? zW@wljoe=&6j8)?R;v?`f&1=ta*r8g#vh*5g0~30Pr{a9GHOE^A)VYQXjSPXa~R) z*PD1=;3`O9T1A1qvJa_{NNa2qEKZ<)A($#j*WsjD%|VGq1Lv0y2l4E*EQ_Bf)r~?T z%)`vezDC}fl-+Bj;L-q;L9MTp$&0UQkWsdP;dd)sAZIa*5UQaqXtr0Fi3%BLVj+FYbg zBR2rw(SVi@lY{$HEs15Vs*E$|h(gzNZfDOutQRMer-`+p{?R_=?I_xR1UQuut3K(li`xh&xqTtgtR7;>ib;q{eG9~bP zVAF^1r49&IhyR^Id4k-!=en*A|MYc`vEk${aBJ>*1(<=6Mpv%=fv;Ey53B^{woANF zEwKYzWOR;^)U65vf9QIK=je^V>wN%kwyd0}HTRZAtz|$xIkWLmf@Z@{a2+5ZjB$Lx z&>uKgEe{A9$Pli<5dq72qStc5$uE^8v04NRd5GHmk_Ja_rI~Y`cy3_1b*NkXIQ??> zsWO$fSlPegt?}nc4TIc<$$d=X|t#JPej7NzPUZ$kcDLoTTd5G z$aHvbd%Px|%Ro_{_zteiFCbXfjE@;(@!!$xJy`dR>?`|TkKrMg7C&UU6%S z5(+b0J3PP!AOam)0>?1`p8%s?7n=t) zFIpyS`ndCak_%7%%0^D*(1G&Nsnfm(?8&Rfiq<63IFVD=eD4$1l&Wt1zf*Ah&oV;* zD?y%zlbjn;zJt3MKeHU{izWq327E*7wz^tYODQc&`9XF%TXFgW{@C%SIdO<2?At_9 zO~Y*jo;+21yTI&etQ!ugU)hRvZxR&z`K$#9%du$*m<3(u^(wwQNA#`Jg^lV_Q`9JG zEClvrv&*7CI0_0Q#xhv%u>iHL!sUNddi>?{OuKZmqu5i)$XjIC7azfJXr&~7Tl4he zrtX_@rBQv|U9*UX6CNnIIW9 zO%C$CzE&0?Yjnz=_Pqd4JhM?6?~7?Ab_MrM>Oq=S@pM|pvU+HB*q7DEmfH~?HR$%b zqC$g{SICueU4~%%J?3~rA&rS(zAc3%qaqle^~I#iLh6X&`1uZb&DVuP-rtPx=|s5? zil#c>h388n>02W4S8*qvg2^5xHO8SA|QE}+8#}drUz{Mq@ zzubdis&eQhkmxp|Ubf!?c2fv+D@P*^4hrveDwvtyQrn*_@*?aV#%*$IYbso?9TOMQh!Ep z082uy^`)S5OQUyyfD_!gi`K|mQc^Ggjf=ozj`U`G^inF$2uQ{@AZVz$t#cjTS^kZ@ zRB0K}I~SD(&e5Z8i|kq-i@=A;n5b8Ntg_xV%)u7&S4NY-)?HToz!ola?4)rP0-2vZyzxqpON)x zFGYVS(3)=KZ4E*aS15mQrtcn77h9EUxq`id=oo!dQ5;9(mRTA*_$iTGGgb5}=Ux2W z-i!b5;r`oZ&aP?%D_bLPoX2rP8eb?YKsV%4nE1iMy zRVuu{qg^qSmk_*c;rz124VEjpFrB>ny2s8TOH0h@L0)jhxPEbj&*iZWdN+cqb9IfR#t3*IlOpaTGSr(rE}H{&wh? zQ%owZ&M7@BX{Fo9+Z@?DF(s1LoyN49({<689nVG(I|gwJEl}@C z)^9bWCg!x5p}3cK!wrxubab*45hurmvGJVmB3Cu)89tC# z0`g0tDq(GZ7$NvMA_d^14PL&;&_qq3xy8OM^_YD=(pBaID$!UQBM)p=@bvZ51OEVg zB3tc$U51B0BUBhJ#y!dPNrhlOD-8jVj=ir61-wQ+XifQr6oE<9=Vlsr9mBrOgNt;2 z<1_tff8xF;Htb;3DhGP`D}09zKrZWQJAE)-abs8Di~u9IZOK2xqg6q$!DQ$PFdt8b zHuT6%N1TaW!;0{C#FPfnYH$DX)aVRh!6vzyN+eC?Ns{-h4E^O>Jfy@3(W!7!jsSMR z<^cY!+aqJJS4s>f3Pp~WN|6b_>pZ2SB%XP9AUCiWpccD^vNTZwg}?p11|Wax{+Z39 zT8RoeE`J@l4>=e4DN5~m_BP-3G@PAuVJ631m-{}@_Z1Vk;Q;vG)DZ`vwRojn!F5pA zK2BPraL_<-$M4`vpEiY$1&#IN|1dV`_HjG@?DzXGXzI;vP-xC~p==P90p^B9 z%0EC`{Q@;?YSHNS%j>JQOW?d4 zHt^XMk{#68l1OB$g0fCRWzQNrw?`E@B(rMiIELm`XGB&)qcbF4)V!FRGy*`Mw$iR9MnNsu@ z7f0vIiEd+bxRd;kAHtofHxE!|el}+1)*$w9tk%J!Kc^c>ZcKk#H(Dkq);;M>3mh1z z*C~}jCY!+~=5?@B0gd_#lQ*Gcn2?s0#(C>4@aC!iybzSLSeDHGyUG=Q(Nyw0Q?*E! zREptNVYK}{K1Dm(bdM7{a`NReK62t>ty!pYl5$It`~ zi15f`&w(GCysZ_@3|+R+%K=>DTXJCdWC(2QN)P6S-4D5r9Y;NfZm)HGf18tzcJmcr zqpfeuMa<}=xPqne2hEF#pPs-sG(Tg7Uk%->iO{T!=6Tm+)!U+?yefMUAPi*Z5ezJq z6@E6`UKf|YDD%Q(@Zlw;`RfduyE`29o_UTfgBw&F3Gh`EH`ytqseA^#lGz+Hf1YUR*S?8!{|Ms;65cuhN z)K^q_E^Cs4qd_V&QUjJYiy9Nj49!bkFi~XGfBjOt321T?v~0XeJ;1EHA-{AIv7b`e z@sCAbD+UMwjCr0ODVcF+Uln94k@9$^FNPJ%Qw;x;&QQ{ya@yXr{Hfdg`p-Q*$h|!0 zfUOz{2Rmb1bcS#gn=>o;-4b)h~hcQY!Qtkmg$O}D`u3pEK$X1@_Di43pPfU2IEa6 z5EpUG37_ZCI1sP=J-;-GSsPv%P^=fYgBSnSIL?7km{x&0$`|9@ivQ_Q-=2aseb(ya z>2NFaSynCDHYA#P(}Onna4}RIL}Z_-wpXvcQQ(&@ET&8~j?ArAKD7VCG4nhW%(Ly4?x91XPO2^WGuZUEFeelrSv9B}9Bn znJ#sG5Uiu#Q~S;OS0y(H))kDVo#E2O-@_0246sk!SN7+N;q@icFG8~|_$b+^C=@CP zM{R8|M3W`GrPbGH+*s9or9hP^?k-VnVZMl%40zxf!9BYLbN44Nb**-`p`OeD%Z;zR z#?19WXS=H`$9L4+N#CJ0g&vS|u0|G00A;x@94yg(U-`hV74uGhM6XBbn`k=Vms01! zGpkV7VPvs-m+QF+2OWqzS^?I^F8Hz122_2Twr}gnlG$#!P{lM%dpMo^RcNq{>YKhfsosTbQP z9v<$%<_<#d=?#Axe+n0w>Ko?0cY4H7>9-&3=E>WuENS7JOwG9jJ@U}nmNd`LtAw{X z0_9xw7U19CT`q7=k~}_pBRWohbg|>zjN|PkVEv_oc|)Yj@@8PBqS}LwOOB>Od8Rs} zY*}q@_|I1!A50{Qzd`oS{h)d`@|e>faGAj$GMHntEJB!v66!4z5NReCfp)TZyZwBx z>1i4bbt*=U1>JyD@>W+2+;`F!Rti)jf9wld%K6~;QJ3(ru$W9|R~j?VA?M}o-94j- z&!mOGIlbTZNjmf;D+alk<-&9nU879Yeke==<2=~;?+o_;$zwo_s=aq>BetGyTUt{_ z#ffrxZ0H8GBdHxyJCt>(RdWSxmp>K(Ow|}c`wp{w$)$(}?XSlS8+h0`o%4dgPSKkHkwYwp1MMmm3uBaaSC(ab)GbcJj#EoKx!Tl zSQ_1PvU(EVi`bk{fdo#$bRc5iH<-*<>%)A+2)!N-b&djig(A;o=u}!fRJ6>no;xEj z0&aXohgX;vW=W@(R%%g#FC%vq?S=|vmh9jhfRty|wSPusIR8g_Ly53hJ``gu30JEL zak=XqCMQOJ^m;oh8B*Cn1k`5Y5N8DMz_@}J7$E+;|7zyo)bNZT{ax<`CUgy%lwnLm z9izR7TIsA9mJdSaK{qtCKOmN3&MCevC-umfM%tiZBu^RQ)5&Hq@lIZM<*^W3YOw4p zOD!a-ErHaR*HTY9qTPdAIl3zZ%58BMv|Y)NZVjjV$^>iRo>vrk1n?hcQZjD_IV^S0 zN9=K2dat43#`|=k|F9R}Yt0EMolv;T10{9_T-0j|>ZI7*?avskrb2CoV+(rt;A!wA zpa!nnz&>5`IjTR&&b4nJuP`R2%(iD#ahxVd(+VtRX9VIs{;=H(X9UHm zcrrl6lTDgXm~%jJ6Cx%_Kjd2?4F+-p5-Qc&1F9eWI^KUXke)*^|E*6&yP3r2qcJWZ%s$U2MP+OWz8iYFUWsE=rk1Ma z-KQo&9ZHD@l4-y>jXr+pzCv}-9+T^5p8=Db>Mr3Hu1A&o6g z7oNiBaEkrsQ;)2T?+R?o02w0cjePK84(aK6*p};Rt{m0)7qe;Vgo@6xXu-H(1>vCr zOomH~uNz{uTqt>Myk2DW~rqcl|fC%Jx*<>Azb^&jD5T4QfRC>h58UKgq@KhfF*HB+qnRgp(@f z%m08`08ErLciRN%_Hm|rWy+P z+;HS!G@fB3#iPD0uIc@=O-@Ax$4K?aBCbqco-(ci?_hJ4be7cKg?;N=P}8#e)1=p& zBa4=0N#Gsu(~W%u>90tmbBUb0Nzv9e8WE*LO7!xjSMk3z4ys$4o_}j;eiEz3mO{h` zeR+o>A47Ud*wS!fEc=KPwp3{MmUe8PZJ@HFM-OO_q$FDh*)Q}ht2-m;uguNMR~;f+ zn8)~xoqTR1cfB)j|8Cv?KW(6CyHNdJ1eF7?IK<)Kk^?Uc&|4l za;q@yDUJRN6k9DuR`b{Iq^(LvCqMvhR#@QowFNf2;cN~jgfWKJGj!hBKKDwp=avnX(@eOEcmHxni5GZT*RBB=Z#$jtU5!Tkjo2|Wt!bVA)5$G%7 z3bgZUa=O}el%$gAdCan;jm(@;>Xm)Mf=9#g!gF?(H>N9TjFR{P<3(qvg_PFlw?x+R zk529Ey(p4%d`hyqs7VQsSCt_-|Fv!qBMu=?o%FL(ANbD_+3?@t?bPn#@V5>Q&b>X5 zY(3KnUP|6)SefasL{o(th0E=^JqG+hp%T~I9;ueql2!oDeUrT94B(yHWgX_JI^}_G z^R*#yRy6`vDgyiSu@|3AJp0`6fletUzm=Cj~)XjISyL zm=9^+{!x(jFuaTQCQoq{Kqk@*A;{ZO)h-*bq2$)#oAeUZfWzc^0;Ow*A7~|-wKtKd z^pOypoDn39&}l;{1R(=qtnvYn%;1?ygab@>nEJF^?NzlOW$b~K66b}N5Z8G=b zg#4m4j5HYyKDXOm7p-h7)#_pcPr}sc?>Ee(UvX3ACrK zeM*#OYz`xjP=u?%I>T0Bk%b=FAJS;u+=J2Kp9GktUx+S$wI%A$&)Lic-Ki7fClvXG zb4haCZfZiGCW|2Y`jCvL`wW$l`(Zz`9_m&lX&Kq(1fn`kFdnXu99TmI`nFN})+$y$ z(_sP|J0zY5Jya8=!~h=rvYE2R33J|&!Dazr42+N@6XSd2{Z@V7LaOxkT=_wAFjOV| zu_myJ9~ujES$GD#`PYB8uv@T2xp+M4Eo?uZ8ipORKK*b;u;psE^ z1c#8LFEmpoQN~EY$s|&{-gRV6*Ix;n)xMr^T~SPoIg8#&s*kzb@XEAGC(!r$;=BW~RuOu9ks6ymOsul>NkY{(lG z88#_kzn99&_l8s8P}pr)&G#enRF4SLD*xFm!_skkd!9lo_Gt^t%L?nMl^~N3JxEHi z?6FnqKnr@>0bagm!7gUx^~$}J&qi?;QpN61GN;T$Df;(2`bgm>X5`gW+Qnr}#Ghpd z6I|7i670a_%lw{kxJK$Tv-beNZN2iD69}(E)Zg0rCc8_&8gmO|K4|U;8}JbL)d=t_ zb`!|!wQUDLd+p+X7c6pK*?dH4{Me>GC8f=`or^4ZD8GstjO>Fpr59F{BS7l~@%%$F z)lU^erMZ6hgFozz(tUih3EFC939apNa&4lFfj+?1C34k!u$TH*Py%}s1=kM95?JER ztqecB=aPO#sQW|()<8B7(z2#lQ-{Qv9KNY$?`oH2g$W8Uzqoz(nq{}>+wjZIyv*sd zwT`gs&Jwa-I)aei0(L8-utn!jfkaV61nP?)2N%}Vk_CM-Ve^)ZtyRoHyiFBlBa9Uc zwIyPQ>i!bZ3KcAj@2L_T1Q4Wz7Sw6e575%S2l6xJPa_^(24(mgzL7P2MD9hv->R*8 z&;v+MOh^rP{*Y1bcQvaq zV-tnb9)4#f6zCxgb%koUPHvc{1bBJ;=jR9b*tD`fYm^YISjwWf&K8N z=ZAtn8Hd*t#TW2$K=rSX1wWD>P&pBL%=Yp3Uu2_cBmhPth^yf*XCxIIt}$bdUzn(+ zgbrr(mL9V&;Ja#MUZ!>@F z`2Oj^J==4yl?drTN-BLw55)y&396{yL4PYb+N30Ntm4WD~=QjQW@7T@= zV4#EgeeyMjm2y1ENPZ3PY!~n4IOblft;Z~ecFbcJiOY#kYp_!;Eg~De5Fx4A3x=Ob zB;|`_psXor27k$l6LO#l@sfZyRj>iXZ~@iOzl{ZTZkIjDu7I$hS@6%ZQ5%Oq$v|!{ zzqk_Y58ivA0BCp__yls@^gG{bO2EQ@uE(k<@S|s~*{!MK!Nk$gTlnZm9bS1JdVcop ztQWx}7k*(9f&10areXGjY+kLa_De-cz|s^**@zw2Rlc{Q9iQ}N#v7A5J)A9UxP#D2 zen$=j` z>*dK7Z>5+U#`ns$Y2ms*h=S;+nXq5b`s-<45f1}5vOi| zj;}S0w1tD7b8*Dpu?rrfpWzhYZ+<;)foHGXh4>eASJOf~>8h&Ae=wE=;qn^zjO#BF zh_&1%)gDnz#=tZNYV77MZRZ{-l;yEw(?aGtGNbN@RX_RAQ?xK$oF`0IFphz=2x@;+ z;I=4ta=KAG*;e*M(7VxdN7zu9#3QTfx?<0>K`o8l<=E`1ixwR+`9vY_t1h>0KzSqP zf<-rEcgtcjbp6~#skPE2f#)SfX_N~5Pv{@!D>n+yDUJ-6Gnk^6a4(k&QWl%Oq=d8 zB&etkA#}b6-s@`!&&4DV&DK!Px4El~>+9W=iA#9Uf=JgEyZx3l`r9TApMjp0fUw85 zMfLFAnyNmIt^KuV;x)@Oc^8e(x5U#xx6UyV5~kZa;Yn&o4r&P@gO)rU#5ln@E0(I_ z*r}Cq@X%i1UPMURtj%Oxy2aJWt^r{QU^iigR!I1IaeNmw9uJbme)(|qnn9S%Sm}=h zbds5a(rt^w>Fc}9$(C+cs)n}nEVtuiE?ECiwWGauE?kg65FHSKzX87qop3IfE3O#; zm++2RS*gfM9QUgy@T7R9li-jFiC}R_o#;dHC z=gXPsGa2L%R|k28fTl)f^dfyc2Kf(~%PQ%<;kM#Pi%aEbi!JK~XbGAYvb#*3g*1kk z(HsH7;2d3CWofMb6CECT8ZP-e`gu3bwFu6`^POQDgEbCwiMAJuD8^+feyGU%lHlJO zkn}P#hT8QYg&Oe;L-sSYn0ROv1>l>XDLdb$3a(~?{3UM-~RCKDwS|uvJ2!8K$MNEl z|KUDDkTt*kRkECLC*^Hme5GPgjx{6i*wGe%_4qxF!nd@rBFMDIEl>?xA8(RZ*9a~bN9L$z*q zpnyHzTnj3uQwrZ=#SIfQ*LQK@D<$v>{P7kx*ZIb?yNmP(L((@Di)$Ei)@zl$q1uPdg5O4+%+2znf^RF z6NMzuhupPPN+87#v{Z=lOhVQ}Zzc2ThF8IE?JU=f%sZ+in757&y6$__z+)Dzm0y*1 z$z50Jvbx7FLPz>aG|I919{*rK0^NXs$yIShF7=;SUM&8*ulCPg|6sPkc=K}HH()t* zE%&?<)?XDivK5MtYLvL(82~ze)KbSVu+1@BS!clhUcKshc~~dgx6fTjrH&5?`?Mx^ z$lDClW>Si_v1%!rm$>yFPMKQ69BAbI2(hI?=Ne9v9zF)X8LbUhaEqz)>o6~Rz85?A zc>X1;P(}U1pljb7P&^L=Y`I`fGR^zGe9*lKr61kzXSBCyd-W>Hsu#-;`Pn09Idz_$ zLypU%Y$;VHR$cR=VqcTuliF`=iDJiovmd7eIV-azRRYzE;??2uA@4>CpGU}24D6-e zy1harP`_$kmpw1SB2`^himJY;t?kSC{AO(0IPuf-A0s{tpe|(J)_8*;?}Yo&Q{9-9 z$E7S;&WD(JjdwzsY$`P)nV)@YW3p+n2~*D(Hb~aeVK14XOx%*3m%hd&*K?JTB>DC> z?O_j0Lh?&K&>%2zm>08xfn{MnO4LGL6~?WP+rJcJ4U&=|RrVqkF$a1rK3*9V=17~K ztUC=1WHpW|yu565F@c1TIq*`KAX|QVw&ohO`^brRgVuNhpDg1(?W||oHMe1R{G&cZ z5>wK=X`IDiq1c1G+}O$Ux$WrBjS;P=%YoO%Q;K>5?2Y5q{kO;TWL~+=_EzkveJ~aP zeQ$^eONvtGpvAa4KD@qJEW7xip(Q$kyN%4={$;Gq$OEw!5*y&&LjnSFGuG8zTM{oP zXHK#=r)|^;KvGFtGy8L$0}gkTdO04yWq(syt%1J%Qi`sF{v44^H+OK-BY~e?VnyLP^FA6$t|)!5)1$dsgKxCU1y(Bteq488)Ypinv2BGCP0I7e~;PO8{^uQpm%_)~>OfBzeo`&J1oQlHee5`J69$%nH(zUPmYX}W>!d_+Ey;jxx1W5@#McXcwB$?5ucUr!=Jq=f@24dz8SR0rP-Y*|rF zHBv>Tgi{86Y5V3dd3mV^{aCt|!oW@?02DQR;C9O3CrUAJx1u>ZrToqO{Q%q7LFDx< z((D_fFQe)c)$D+NC3|Y5xDVjNT6?u$d>DKDeUqY*K1uT4*S3@lduoKgA2bPTd&Te{ zbtl;(q+Z9X<<9~pg zar3+go5iUcQ>C@wBDdDI{}2m-(Xo}($O1evNnlN#r%Sy<18EU|wW&9)fcR;Dh5hS1 zfeXn1GE_<$C@s)ko_U#6zvEwB9?qX0z;EOz6(Pip1j1(5Ye5I(H+Rwa!{X-q)wBy_h7tJE&{sAy0h0XN(zkU!kz$+xS-b z)sAz~-@2uoc?p9Z+;H^P*W+nI*{vON-5w666q-&x5s?x*wh3D|Eg?=+yRAlpd)=x~ zfh?~|$UBn!bn`xUO}x}{Ll)Dlt-=KkWsSo+)_xA_g`)C`upC$Mh5EWo*GeGrSS}oYi_HPM|C)=U5~nu%@q2= zDGzmS9oRW0bvjrlHEzD+Wzu(@=kXLvhWtMWR=I5X7w!j|*=Xzef%jNZIGDy6!J4)j zfK`8ppr@l-)cA-N@k~HXHHr`CIr{TB-r+w4(1<%gR#g4cPOv*Tb5gI~&b>j40AcTM z&G<#_!@YQOc+3TXRsL=+PO44u&ll#1w=uMq!Rqr%@{b|e%(qmJ{3*JYZhdcyxu!*mLS;ZuZY z4DDQjzCUv~d*+f9)5C5~yN|s?9;yKJ1Up%p!5r^bRFdfXF>=e6QoOxH$Z{dmlTd}V zunP)`1>X?nyjKx(D%n2KOgnH1`K455!M23;#fJ~!G7CWq_YvC8jpAihS|)WILgRr= zAyh_DNw=iv#2-!yY$f4+0-xy6L_&n}EYLl>K#LRfWnb6)Yy;k*($%kC%2Yu<^r;u= zU4b?#Bsx7=&U&)7l^#cw6fZkK`qw+ZU*)GIQJ^4>vscrRS1IY|IW9OL{#1_qWEC#G zD)0>_Lv@sQjMYrl5|$Nd>f;&7*7uUvgm70}f59(@~Xcd44J3C+WVo6LPbxo8;T_NK6ex3W^wh?Hz~nNCX8@uk^U-uX7NRB@v1 zOOfwwLhOPZa>ATf8V8DGtc}B!?med9DUD5%iBC=W)Dcq2M#5=1m_t@%6tGRV=PJpEh1Hu*>!ivAYr=7wdS|0mm#tKnr%LCIi?*(SAcXUqYD($fHVLOsvv zEU52GIvCH>$Z!}}h6pJHtreg0xnUz7sU!vi+)N6f zFHCEs+WP8~e^v$i&+qbrEi|5Lv9nWlq2m2|?aC*I)pEsyXjh8T*=%zkYlH!1=c4v1 zs|3E8j#h@fPS-eOn%NUG_3Cpo+}Qqh*Pp?9sAT`;crtlzm+}a^N0~_;gh=xd5hCsc zT&;Cr3SJFwoqJ59o=w#{+CP!I4RCb}l_kl?ZN2|qA&jxA#e2#tCn#KzfLva6Xp1MS zT=CfMh*4J)Mbs*PUAl>cc`y(oNkJ%uw+-EUIM zgxG~3wCKb3*DNZ`W=@C=8($2d@Tg6=KFG*7~+t>v2Pg4hX)q9-q7TFN}I zaJY5FT2>Y!-=L;kcJ>{*;`{yd0&y-Ch!(9S&t^jaYsTGwmG#M2`s*-X=^qn?zs(c~ zSN0;KhehvoR~aEF3JkANJbDm#lwVkNvjbaGxDf*CUCn>KY0qodk=E<3azY=U77U_T zXw=h>v1~G3b0o83(jPP3&A*=!9r20(yJ)FCW-;4;yJzpYv(K4(erM+HKOjF+KJvZqs?WRDvq0A%x8F++-2c4de56Y7 zZnr~vqpA8@zquKGY?mA?(T26<#>k?0o`d1(oh3G8LTHnI*Bx69gQCE9HaQV&;+eyB zXME2;$E{}Fd20=zaqL2KKEAc1mrGI|;vQ91^cn_SkiQ_AB0Xu#bUibZ1O%ZYbX?X6 z5(huaLkm&A&Q?3VUcR}D)ymAM%AUVq!SHUrwUeJF>US05 zi+A>E1Ztkzp9*RwX&rZvY*0{3Qi#~QAj+*#P~A6O84D>AQE*?p?Xo8Fp3G{AsK}{` zm_?1lrXz=k%54$xj!aVO1s>q3eACHUD52@}dD#<_D`+)ENnL4ZNo1_i88^E|j-QhW zHP5c(q>f=b|6JGzs>n3^ z9j{dC{%SSu$zXpqXW#mJCa)ZJ_*m`(XBiGLn!A$G+xSIEQC zsHSi*OKHi`4RcX5y4MlvQ|QUgBPoSp(D`Y*Wo>a{MKin5-}sPuoE$(z zpEKk4I&wFXjf9PSczbSU0@1NGc8PpCr!2XF1Q}sZf%=$RX`mIp8cuLj3Y=Q#Z0>tx zE#vR>AlG@yR7b~Zx0ZWz!|00P@aLsdA+@niHacG7lr^o6cjBfeEQn4Vnoe#!D5)yz zwPXsOldcG6l$r}lny36Qek%QD)2$(+lp1)Fi7aFI3>zD1q{EJ$#nz)5sS`h$)Wja{7WRD15Pu3W%HLYW7$c7+90ck`yZkWFiz z+fA1p40YDyn0GUK*ro=(Y=a^gJ3W-xBW8Ct$C1Zm_aeVACw^qNilj?x2Tq%aV5IiK&d_)~C&h>}Ts9%83AF_YdW~zk=M)p$;=96GGQXMHV?P ztzL1+7uA<2D(wUJWC)uo@N88j4^%JKx5oRj`&BrdXc263iQ*)5047sRQg%m4QWToQ zIbW0>J#l7yCqp0cAbR)N-Oto9j!iiV@)5otwk?Srl2+0HzWy9G_{|)rmJ*B+-g=I~`8{?HqzFlp`s8q$?p^*!28}6+xNx!TYPq{N#yRu657_Up}U$d?~o9Q}oRg z&zA>!xIDC(hESRW0gHrG8GKPu=+{NmbzS7eHT&t8UhZAeD=y}#rcMyUkwJurH_mS`*C&y#2?l&+pHTf`n~4UlNgsHPyP^F5EP7=PBdK9}+SwFDcPv z@_Xys9_rnq7jjhdx$B`$o;X;~I#xCfSPXvZGh2<}q(Lkd1MrADYh+z-3H)QZD1#0&p1< z{on*~4yhpGUj~>sE_8Zi73XbThW9ZT7dhW}mZ!(ATLt&%4$xg#@Y1pZY^ODoetc%E zHlzD&c=zrEy@^^0HTlu$3JW?(ncOHVy0dMPuRm+Ok*DizjERTktS^sqCQ zR52RMHQr5v`9TDko$TnQs6)OlrHtNpmAX(%uDP`B+rh6b)Ar$d-F7`=Pz!+@(PaV3 zy=*BF4jwS`zB2FNdP9`TI;7q3D&cx8!`fsB z&r1yl4N)%Dop^y5ZMC!$m>Wu6(z`w^J7)p~yLT^{HciJ2d8PI)XsfJQ-qN&Umt<6Z zpFV-I09(bn!s2_2WN0n|LkJ4D&a9<`z6`UfTbR_Ah>E!S&|En8rB(LRc_D)u(~huPaS`IdULo zKkm%@7A}3(kLKP5w8{LdiopA>nFCAl(lxMHbE{9Q2i(W^%Y|f^_Sa*GNv&KHxW)zv zp73|PZVNb^-kalga26Z11=#b9hYrZKGR@6r9Cnx0WUh`0mFH<$w#>+j3L+mS1U$>D zb)a={xjrq+`D9CWASKWa2_6QD<+P_;yS8KMDG9W&hDA9@TTZffSv*}NVs~!8*8=#a z^ViSQV;mOL1X#}#q@HH!0B9cF;w`)gW&+QzV4?LyS60HE!s0oiSIuq*8@|FJ&CIeZ zSx@p^yZ5SWqr9S(IccJX?FP*y)V#D89%YHz1;#6~g&YD!Q?$yEy(6H8BH0G2u5FA4 zIxlDt%?WXPxNpx{55cscbsHFBB|HiVd`P1+r=f4$w<%I5F(cK>Gv>tsW1mKGWgINw ziw~C!O@ybs=o!5ga(?eVwXKEqh~)TiMW1Gqjh9?o zd7&v)x>hYjyh;0a1=F1W8@KOGjoEdbK=Jd#o*=Cb z9!;@^^H9UOYl(h4qeX}rfz5E-B_SsKMSx6tHq*%mTB^45v64UQ!EE!MKl$L?lOu_= zwe&}Qjr-xMM^V>VJ=ycQ)|Lq9(Nx=Jl`OL`#03W%F>^|bo}&J$;JLMXUm|a_2OFHV z>3LNL@aChb%f!KkKq6GBFe86u#p+m0Q=3szs3wn8J@)pZx(iEAv*qgMoX-vc z()aUJ9f_JISC=>}Ir{q^Wl~@g9#){6PdsQ@)=nn2x!g!}%tQ_JEykxl0%{`P>K>~Y zIYf3IGzgJqCrEwL?^N{}U3W|{c09`WyZ2I3%Y21kupG;xQ}aA0$_N*iJgV-P%{emj zxUzBONcm!%jZf1*j#>OU$ySDhn7nkbeMZrD%o8Yv=*&WzC=hd5FuSx>a@pdNR0@hJ zyi^#ATw~1yr7y6AbFHjeiua6mxh~8(mh4!sJC1!}iR25SS1e9Uqv68q$T>-f zo-rR{AXm!f)yW6y1ST!JwuM;x{h5zgm1`lnQ3h7a%E+?&HDt@A~A&&@YA6GN9797}T; zyIWzaQ-!;@#(h6vP`7CEu?12ygl?Po2obmq`01Q-9z6+>TvdE|^d4(xR#^&w1Y|z} zetkOm@B9-Ksx6)Gsd)Fn&9h-XAoRM6C_`c6hIsUt?6CsPo+X|Joh;N>ug6FlS;*Ll zKI%B2_%;jAX)^83muzJG-SOIdCX{9Tnil7IR?Dc17?)A^Ehq?#$z6+Ojb9N)G#~Q&LIo zQ(Sv@Lv(Da&rcvO0j0Mn9CBgFZrMi_K@RkHqNedi3gTQWipF7T(L#4yb49MoBOd?+ zjr9YC-yo`gSa+4<+&HSTUo@lX%OOY`{H4p1-Gn`gq_V76?837-4^e;U-ykB*q4Jct zlrKl{y&_};K)>A@l{a)#WRE5WWCO9>D!~y(8sRSoeg)k~T*CvvZ@khIz~%t`6LTbe z>ZXxy$*3> z;~Nrn$nR~Y#NLSs-y@3v-AduIlmKP9iFz z1@ZOt)a45BgsVAh(a0!{E9DEkQC?IMEm`pH-$=xmYJ^nweP=YCImBrLzQ|j#dlN?u zxU@{(KlRMWlgh)ESMiUEStfmnj%&iGLt7ozdAa)#Ay?sTVZk63rE)J!ozd|uv{j(- z7%s|I{0FscnJ0EmHao!RS0;ei71cPk%W4vLClJ76Ud16Ob+Zute*ub@H zIhtCCrqy>sNxsj{wH!M-)ZW`Wc0cllGSelUQn{!QL4n&w#}Ce{v0 z@<-zH)-9yGAthJWCPL;n-K+u|D`b9emCFK&Cr3>t1vmX0Ci?ldLL6^8zBqUHIYIh? z0>!CD zjkc^6ta-BEL-`|CdNNPzjzxiu9$RG2Uw*A)zPK7v!EoFrLLI0(|#G9iA+dORK zFnZi)&Y2*D7|~{`yh~!qZ7110lJtXP`@_yr=8aSShsfc7u^(Ul?#lkv1}4zx3OcC$ z-$v}ooM&}OhgXEm{U(X~j@QJXK9|Z{=&T!iDe>sQP+6THHcmVX2(yysAZLFvxYecK zRhBPA|9fZyxBribZT@WpLLkPpuTke#U!Po59TuJ>Yh%gQUY`CwqlxSB%j&0{cKT_n zI_@g$e=tHy3Lcg%DYGcspfG`s4tZFx7tZ_06o^*lCFPr`Zlk5nakvK%@AGE=QPbUb z^`dpS$r^S=o)}Ng(&Py>oKlsg>y+{3!i2S#cvY%3j|AF=R|18{S*^~=) zO*mAYY*TogSaE$^hE|+$PnXO7J!tsmjF+DFCFMUDg0Hl248<82$8v4rW3jein!cP_ z%_wh0!+|FTVN$UVLbn6%p{V4g*Zyb{Cd*XX2h;!8CcEkTRx~32@@(PX&6#Hcd3ia< zyNYrkG1uo7KX*PWxM3bH4srx%u^#*iuwaD{1Ku3T>1FnXGdrg}aw5;_bnB5AK~yv~ zWWX(S;CCREKehLN0zr_8ABOEXG-?|``^?#&lOskHZdG!J0l~CKXU~MvpQOl%% z^Qe>CFW*gow?O%?tQ$YJGX6Yn{^xY)JmZGY{S!bBkgCc`ehM3y^;`#GkEX}zZBb>v?F|L|4iNEPd~w6fSBZQZ@h zZH^ToYy)Jta0x%;mMQ9;-d;F9TEmPt!_Ib-u)`w2N=tzQ|d$*i#S(F)K`%1PId*A zM4y6Jdr^dzJ=M?(>%ene2kV=LM=Edq4QvH3(Ga>Nl!?Z;KsMtc+ zvv<_(HC=`;L>mDkO*!N&Tf9j)gqB5IC-I(ai|`n;^5+!uPG&#cd-LK9;4BuQ(2 zmtdIq=_#>W@0&>;=7Nd^ZyB9}cptA6t%n0JC3r#~fGN>NafH?5dySG*1PiK5p9sS= zbmCGrjxPBF_&NYSvqt6h0p$gd7_xGmxOB54xIX|&1Gqcukc`*RH<$m3zgz0exUI^M^!091abpvwb5<>%-E+_y7rpPrH7;FRyP$9Xma zAmlf6l0h%5kUQvlF4 z#qm28e*Kh}f-49i&fodQCE_6E{N7z0KVsO-95|`4+`bK*R78A6rynm>0nf`Ajb)hw zCmUaOW`L88(&o8O#~BmEf8Nl)euoxC%neLzavZ>#6>3HgGAbDi`~>R8IvR=0GUC!m zMxZQnfgc>KEivdE4d5_qEaWT5@YCfte+t&0n(|*)eJi0(z@AofZ(4rW{>t?Wj^7Gf7t3#? zymE^!{=fk8DyOwf7J7iXF2aA~D=18rtT(Rn3{87C!Gr(BVZwh}*z=!uKLXhgW%|~@ zw+=NXYa2($F4)`OdPDN$i=V{#&uE-gQ~!|4?-@8S1hJ)8;VXK@?X3D&(2?w)7ViIM z;j(+gK|d2X{r#gdTvqWCYkwQdIQq8|eADtQausCMUtS%w3r&`{f#WhWj)yqf6{v$Q zk2`$@#TJ@qXdUqDj*g~Hyma)r-g|p2PbU%YcEDdzx z6@jS+TZ$(&F|?v$qG~GhZl(rYw<>?c_5m<9W)b-HJ7Z)1{hvSeB0wm==Ltkms7uA*$`3;ksz^%(dFW@_N=3r>Xq zbA$ZOU?uLQd=)1SXaOoZuDaZDPp0rIXcu`1iRf2Qs1bqQP3kUn7&lO}PiO0v<+Zgc z>?;7|{o|WIP;z;Zh;r1m;Uf?_=qnkkfpOsgw)Oya4y5bVZ&RWK> z3`Sc5k`0Hqx=77^CXbHWX=@F~ABIw3pAta-*$V>iBVhSeSPE}-h{8)k&Hzk-UHD*G zZS8+xp?T)a+Iym9Z`LPtk=Jw`wF6iLbX!gK%8789f#J)CpwAJ0?i;ta^>aUsSAI!TR+hm z{`PYPd*&KctmE9rJCLjOLf=}=eL`qQpGacY@Ev8f^FE4ed7ms@~QTu=J033vQYg8;|JKwd#@~cAy=-2 z5f0ihd6ru32mpn2t8VZPT@ph1AQ)4=CNSnk$80i;T7`_eS9Ee}QwAH0I`bjUu;d}! zVVwQmj{AlR$F6XUgh9m+!=xmOnb*8L0NKf!ESHO>gi_yn((JaI-Nru$T_y31G3xL} z?5~VC9U#1d5|QLGVO~>13u8Ov@A5u9dYX|4*j<)>;0yo9;i*4tz+VPS{pGn6a$iAD z3i=aA;Dpse{1UqHS%`d?5DlLq#nHX5AodqWE21duR4712!C^+vDihxRJ@|L_i8(-JKEV z`s2G_K{U06u$&k*l|UYbZIa2-Xy)aDwp8@d6D4_ zx{mCUjcGRDM>JPm^v9|u3>G`|5C*@rRxWRO%VGGXxHK>g5#(NTwyYwPBIe}G#%=?b z#=Z9a>eyAh0~Q)R$4;^MrV5;yAhDuI0*1g*g1ZYB4 zx_Q-$s;H^N%!Ob?0t5jpNs`aso}^%YJWm~Cd`ixzZlEaUdE-G#-e%Q$^5t%qLyPP= zEv(*zJdJc!(xi^Ay5rEO)~MOoyKTlWK?>?J3B9htR{(F3Q*l9-+9ZY#Ojgw&KS~sL zX`m7o_e3~-ZW&q_)z-GkDRd+gB^zZ${iwHzPJf8GT@c%SW`&JNbr9}LF~!(adrXI}4kU53&>P%H5+di6e8=7&v!bz5_Z zrlPQHXTqV|>QsRtH?vZ$9!h{<9H$wDh zoF}BIK4CT|G#kry>&j!nq0Um3mDTw>$ilM1k&>(Pvh3D;qSk(OaoHf9gxo!IBelG$ zZ8H~6=k$k#G+>HMhavDsd!?pcqzQj`_@EAaKb$X$Z&t)RIY0tvO8#iG_vu2vDs^_& zDk*=pY@o*moY~GDT?e-$NB3P`alopqF>Y^T+d8Xz7yAsyC4+eKnuqsVxi7mFo^bm2 zkY25!`p1fzI z?{*PRu#N5hZ&ArVLGG`IK)I51x zFJ9{U5)~WU+nj41d!Ou6_$a92y^1%DhcmiP8>Y2I^k%Kd+>Lpztjy4%-k9Wxmiet0 zd+2Ns{6Aai|BEi`PyP4}{eZG005s57D^-{dIKDhT4pRl@Os;azT6(+4&)^7G&<}bI zXh#%{P=+1}7oxd7aG;9w1v99x!+;cgtk!WUfmW&x7xQcwP!Pv)OI&JnfNfiBC> z-KSCNQPapN$QssuXLii#nBI_k-Rew7B`kLC+GC<+=$}UOPbdD@5LEI($UlgAbsla> z&Pw{baHo%aG?$Aoay4z+7f$w6y`1&%u0zYpO^1p5)Nz|ri<{IjFP$a|hhAVY`IT`; zym0vR)OkFe14^a9SEhKgo!m1bSyS=^OhPT`289x%`pICiU@Hu?X!-xs&`I2Ac=d|z zN@Vg|=~-DJWgo@5H(lh-_YUu76aEfbkzWK93L zLiMCYcI?D!qcdc}2*30&onlJX(>Z*1(?2Ejd}X34j!Pw25EtE@eKjtXRRRdua4!Y%t+v zzR|mn|1Q1zZ=*l|2pRhKdab?RmW}H1Mlj@#Tc2c|5q8i%BF8vxmF!5m9RZLCWGUd? zP#Vk3Ino~mq_uXZ*ej3&W_Y6~_qb$&VUeP7O9v8eG1+P>`Z(%m^c&YV>ozwIRXq$= zgw6ycOzPkEaZ4~RH9Nu%BBFAPQLEuc(%s+Mm$MW$@bZHN*alM}sQ=K8BN775{qOCKQ*$|_^600-{6>9}4hXJe8Y?8QZn&VajIX^5^{ z#aU?#3`iHUNyc7YLZ^S5GtQ6(w1B4roB=Na<#) zt|6(XZ)tRSgY{e({kXMV+UKr?r!anNHDe8rd104fj-*iji~g-iet2c$A#2?w4cGZG zmyxx^(0#v4?VqCEY=MMt%U`n&D}mf1Yp^jDa;<9jwzA0RO?e6KxY|Io`SwsB|H@AH zU?%`D#I@ORKw+7I>CLzr}7z*tB9HNn@u>- z7t}^~O57=jXU#N6*Sc<9^^bPb^`LngXG=2zheS~Ki90m?@*l=+dN}9`;Op~R(9};8 zSFW$SAKgjap&MmJ0^jK^QytCjsr;`Xlvv=Qmo!A;7 zfBV0Qz_gc(i0;d6cpM=(9=rs}UqNt*#`FNf8HH{0OZS20>a1z>?%HVZ3W74O90&_A0)X*iRWDWiD>8Vtul zOU1^k=5%cA4!1l6Zgp>ROCN|i8azU_2ihj&R|4Jev?%~Tf(`(+E}blbz1