Skip to content

Commit

Permalink
Merge branch 'develop' into sr363
Browse files Browse the repository at this point in the history
  • Loading branch information
Amanda Richardson committed Oct 2, 2023
2 parents 79c606f + 53def75 commit a20fa6a
Show file tree
Hide file tree
Showing 26 changed files with 420 additions and 36 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ jobs:
- uses: actions/setup-python@v2

- name: Install cibuildwheel
run: python -m pip install cibuildwheel==1.10.0
run: python -m pip install cibuildwheel>=2.12.3

- name: Install GFortran Linux
if: contains(matrix.os, 'ubuntu')
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ jobs:
- uses: actions/setup-python@v2

- name: Install cibuildwheel
run: python -m pip install cibuildwheel==1.10.0
run: python -m pip install cibuildwheel>=2.12.3

- name: Install GFortran Linux
if: contains(matrix.os, 'ubuntu')
Expand Down
23 changes: 21 additions & 2 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-20.04] # cannot test on macOS as docker isn't supported on Mac
rai_v: [1.2.4, 1.2.5] # versions of RedisAI
rai_v: [1.2.7] # versions of RedisAI
py_v: ['3.7.x', '3.8.x', '3.9.x', '3.10.x'] # versions of Python
compiler: [intel, 8, 9, 10, 11] # intel compiler, and versions of GNU compiler
env:
Expand Down Expand Up @@ -108,6 +108,25 @@ jobs:
echo "CXX=icpx" >> $GITHUB_ENV &&
echo "FC=ifort" >> $GITHUB_ENV
# Set up perl environment for LCOV
- uses: actions/checkout@v3
- name: Setup perl
uses: shogo82148/actions-setup-perl@v1
with:
perl-version: '5.30'
install-modules: Memory::Process

# Install additional perl Modules
- name: Add perl modules
run: |
sudo apt install libcapture-tiny-perl && \
sudo apt install libdatetime-perl && \
sudo apt install libdevel-cover-perl && \
sudo apt install libdigest-md5-perl && \
sudo apt install libfile-spec-perl && \
sudo apt install libjson-xs-perl && \
sudo apt install libtime-hires-perl
# Install additional dependencies
- name: Install Cmake Linux
if: contains(matrix.os, 'ubuntu')
Expand Down Expand Up @@ -167,7 +186,7 @@ jobs:
# Process and upload code coverage (Python was collected during pytest)
- name: Collect coverage from C/C++/Fortran testers
run: third-party/lcov/install/usr/local/bin/lcov -c -d build/Coverage/CMakeFiles -o coverage.info
run: third-party/lcov/install/bin/lcov --ignore-errors gcov,mismatch --keep-going -c -d build/Coverage/CMakeFiles -o coverage.info

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ endif (POLICY CMP0048)

# Project definition for the SmartRedis project
cmake_minimum_required(VERSION 3.13)
project(SmartRedis VERSION "0.4.0")
project(SmartRedis VERSION "0.4.2")

# Configure options for the SmartRedis project
option(SR_PYTHON "Build the python module" OFF)
Expand Down
3 changes: 3 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
SmartRedis and SmartSim share the same contributor guidelines. Please refer to
[CONTRIBUTING.rst](https://github.com/CrayLabs/SmartSim/blob/develop/CONTRIBUTING.rst)
in the SmartSim repo or at CrayLabs[https://www.craylabs.org/docs/contributing.html]
12 changes: 7 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
# General settings
MAKEFLAGS += --no-print-directory
SHELL:=/bin/bash
CWD := $(shell pwd)

# Params for third-party software
HIREDIS_URL := https://github.com/redis/hiredis.git
Expand All @@ -42,7 +43,7 @@ REDISAI_URL := https://github.com/RedisAI/RedisAI.git
CATCH2_URL := https://github.com/catchorg/Catch2.git
CATCH2_VER := v2.13.6
LCOV_URL := https://github.com/linux-test-project/lcov.git
LCOV_VER := v1.15
LCOV_VER := v2.0

# Build variables
NPROC := $(shell nproc 2>/dev/null || python -c "import multiprocessing as mp; print (mp.cpu_count())" 2>/dev/null || echo 4)
Expand All @@ -58,7 +59,7 @@ SR_TEST_REDIS_MODE := Clustered
SR_TEST_UDS_FILE := /tmp/redis.sock
SR_TEST_PORT := 6379
SR_TEST_NODES := 3
SR_TEST_REDISAI_VER := v1.2.3
SR_TEST_REDISAI_VER := v1.2.7
SR_TEST_DEVICE := cpu
SR_TEST_PYTEST_FLAGS := -vv -s

Expand Down Expand Up @@ -596,12 +597,13 @@ third-party/catch/single_include/catch2/catch.hpp:

# LCOV (hidden test target)
.PHONY: lcov
lcov: third-party/lcov/install/usr/local/bin/lcov
third-party/lcov/install/usr/local/bin/lcov:
lcov: third-party/lcov/install/bin/lcov
third-party/lcov/install/bin/lcov:
@echo Installing LCOV
@mkdir -p third-party
@cd third-party && \
git clone $(LCOV_URL) lcov --branch $(LCOV_VER) --depth=1
@cd third-party/lcov && \
mkdir -p install && \
CC=gcc CXX=g++ DESTDIR="install/" make install && \
CC=gcc CXX=g++ make PREFIX=$(CWD)/third-party/lcov/install/ install && \
echo "Finished installing LCOV"
27 changes: 26 additions & 1 deletion doc/changelog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,30 @@ To be released at some future point in time

Description

- Added support for model chunking
- Updated the third-party RedisAI component
- Updated the third-party lcov component
- Add link to contributing guidelines

Detailed Notes

- Models will now be automatically chunked when sent to/received from the backed database. This allows use of models greater than 511MB in size. (PR404_)
- Updated from RedisAI v1.2.3 (test target)/v1.2.4 and v1.2.5 (CI/CD pipeline) to v1.2.7 (PR402_)
- Updated lcov from version 1.15 to 2.0 (PR396_)
- Create CONTRIBUTIONS.md file that points to the contribution guideline for both SmartSim and SmartRedis (PR395_)

.. _PR404: https://github.com/CrayLabs/SmartRedis/pull/404
.. _PR402: https://github.com/CrayLabs/SmartRedis/pull/402
.. _PR396: https://github.com/CrayLabs/SmartRedis/pull/396
.. _PR395: https://github.com/CrayLabs/SmartRedis/pull/395

0.4.2
-----

Released on September 13, 2023

Description

- Reduced number of suppressed lint errors
- Expanded documentation of aggregation lists
- Updated third-party software dependencies to current versions
Expand All @@ -31,7 +55,7 @@ Detailed Notes
- Fix incorrect link to installation documentation (PR380_)
- Update language support matrix in documentation to reflect updates from the last release (PR379_)
- Fix typo causing startup failure in utility script for unit tests (PR378_)
- Update pylint configuration and version, mitigate most errors, execute in CI/CD pipeline (PR371_)
- Update pylint configuration and version, mitigate most errors, execute in CI/CD pipeline (PR371_, PR382_)
- Deleted obsolete build and testing files that are no longer needed with the new build and test system (PR366_)
- Reuse existing redis connection when mapping the Redis cluster (PR364_)

Expand All @@ -40,6 +64,7 @@ Detailed Notes
.. _PR389: https://github.com/CrayLabs/SmartRedis/pull/389
.. _PR388: https://github.com/CrayLabs/SmartRedis/pull/388
.. _PR386: https://github.com/CrayLabs/SmartRedis/pull/386
.. _PR382: https://github.com/CrayLabs/SmartRedis/pull/382
.. _PR381: https://github.com/CrayLabs/SmartRedis/pull/381
.. _PR380: https://github.com/CrayLabs/SmartRedis/pull/380
.. _PR379: https://github.com/CrayLabs/SmartRedis/pull/379
Expand Down
15 changes: 15 additions & 0 deletions include/client.h
Original file line number Diff line number Diff line change
Expand Up @@ -1269,6 +1269,21 @@ class Client : public SRObject
const int start_index,
const int end_index);

/*!
* \brief Reconfigure the chunking size that Redis uses for model
* serialization, replication, and the model_get command.
* \details This method triggers the AI.CONFIG method in the Redis
* database to change the model chunking size.
*
* NOTE: The default size of 511MB should be fine for most
* applications, so it is expected to be very rare that a
* client calls this method. It is not necessary to call
* this method a model to be chunked.
* \param chunk_size The new chunk size in bytes
* \throw SmartRedis::Exception if the command fails.
*/
void set_model_chunk_size(int chunk_size);

/*!
* \brief Create a string representation of the client
* \returns A string containing client details
Expand Down
15 changes: 15 additions & 0 deletions include/command.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,21 @@ class Command
return *this;
}

/*!
* \brief Add a vector of string_views to the command.
* \details The string values are copied to the command.
* To add a vector of keys, use the add_keys()
* method.
* \param fields The strings to add to the command
* \returns The command object, for chaining.
*/
virtual Command& operator<<(const std::vector<std::string_view>& fields) {
for (size_t i = 0; i < fields.size(); i++) {
add_field_ptr(fields[i]);
}
return *this;
}

/*!
* \brief Add a vector of strings to the command.
* \details The string values are copied to the command.
Expand Down
8 changes: 8 additions & 0 deletions include/commandreply.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,14 @@ class CommandReply {
*/
std::string redis_reply_type();

/*!
* \brief Determine whether the response is an array
* \returns true iff the response is of type REDIS_REPLY_ARRAY
*/
bool is_array() {
return _reply->type == REDIS_REPLY_ARRAY;
}

/*!
* \brief Print the reply structure of the CommandReply
*/
Expand Down
15 changes: 15 additions & 0 deletions include/pyclient.h
Original file line number Diff line number Diff line change
Expand Up @@ -925,6 +925,21 @@ class PyClient : public PySRObject
const int start_index,
const int end_index);

/*!
* \brief Reconfigure the chunking size that Redis uses for model
* serialization, replication, and the model_get command.
* \details This method triggers the AI.CONFIG method in the Redis
* database to change the model chunking size.
*
* NOTE: The default size of 511MB should be fine for most
* applications, so it is expected to be very rare that a
* client calls this method. It is not necessary to call
* this method a model to be chunked.
* \param chunk_size The new chunk size in bytes
* \throw SmartRedis::Exception if the command fails.
*/
void set_model_chunk_size(int chunk_size);

/*!
* \brief Create a string representation of the Client
* \returns A string representation of the Client
Expand Down
29 changes: 25 additions & 4 deletions include/redis.h
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ class Redis : public RedisServer
* \brief Set a model from std::string_view buffer in the
* database for future execution
* \param key The key to associate with the model
* \param model The model as a continuous buffer string_view
* \param model The model as a sequence of buffer string_view chunks
* \param backend The name of the backend
* (TF, TFLITE, TORCH, ONNX)
* \param device The name of the device for execution
Expand All @@ -292,7 +292,7 @@ class Redis : public RedisServer
* \throw RuntimeException for all client errors
*/
virtual CommandReply set_model(const std::string& key,
std::string_view model,
const std::vector<std::string_view>& model,
const std::string& backend,
const std::string& device,
int batch_size = 0,
Expand All @@ -307,7 +307,7 @@ class Redis : public RedisServer
* \brief Set a model from std::string_view buffer in the
* database for future execution in a multi-GPU system
* \param name The name to associate with the model
* \param model The model as a continuous buffer string_view
* \param model The model as a sequence of buffer string_view chunks
* \param backend The name of the backend
* (TF, TFLITE, TORCH, ONNX)
* \param first_gpu The first GPU to use with this model
Expand All @@ -322,7 +322,7 @@ class Redis : public RedisServer
* \throw RuntimeException for all client errors
*/
virtual void set_model_multigpu(const std::string& name,
const std::string_view& model,
const std::vector<std::string_view>& model,
const std::string& backend,
int first_gpu,
int num_gpus,
Expand Down Expand Up @@ -505,6 +505,27 @@ class Redis : public RedisServer
const std::string& key,
const bool reset_stat);

/*!
* \brief Retrieve the current model chunk size
* \returns The size in bytes for model chunking
*/
virtual int get_model_chunk_size();

/*!
* \brief Reconfigure the chunking size that Redis uses for model
* serialization, replication, and the model_get command.
* \details This method triggers the AI.CONFIG method in the Redis
* database to change the model chunking size.
*
* NOTE: The default size of 511MB should be fine for most
* applications, so it is expected to be very rare that a
* client calls this method. It is not necessary to call
* this method a model to be chunked.
* \param chunk_size The new chunk size in bytes
* \throw SmartRedis::Exception if the command fails.
*/
virtual void set_model_chunk_size(int chunk_size);

/*!
* \brief Run a CommandList via a Pipeline
* \param cmdlist The list of commands to run
Expand Down
28 changes: 24 additions & 4 deletions include/rediscluster.h
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ class RedisCluster : public RedisServer
* \brief Set a model from std::string_view buffer in the
* database for future execution
* \param key The key to associate with the model
* \param model The model as a continuous buffer string_view
* \param model The model as a sequence of buffer string_view chunks
* \param backend The name of the backend
* (TF, TFLITE, TORCH, ONNX)
* \param device The name of the device for execution
Expand All @@ -312,7 +312,7 @@ class RedisCluster : public RedisServer
* \throw RuntimeException for all client errors
*/
virtual CommandReply set_model(const std::string& key,
std::string_view model,
const std::vector<std::string_view>& model,
const std::string& backend,
const std::string& device,
int batch_size = 0,
Expand All @@ -327,7 +327,7 @@ class RedisCluster : public RedisServer
* \brief Set a model from std::string_view buffer in the
* database for future execution in a multi-GPU system
* \param name The name to associate with the model
* \param model The model as a continuous buffer string_view
* \param model The model as a sequence of buffer string_view chunks
* \param backend The name of the backend
* (TF, TFLITE, TORCH, ONNX)
* \param first_gpu The first GPU to use with this model
Expand All @@ -344,7 +344,7 @@ class RedisCluster : public RedisServer
* \throw RuntimeException for all client errors
*/
virtual void set_model_multigpu(const std::string& name,
const std::string_view& model,
const std::vector<std::string_view>& model,
const std::string& backend,
int first_gpu,
int num_gpus,
Expand Down Expand Up @@ -527,6 +527,11 @@ class RedisCluster : public RedisServer
get_model_script_ai_info(const std::string& address,
const std::string& key,
const bool reset_stat);
/*!
* \brief Retrieve the current model chunk size
* \returns The size in bytes for model chunking
*/
virtual int get_model_chunk_size();

/*!
* \brief Run a CommandList via a Pipeline.
Expand Down Expand Up @@ -741,6 +746,21 @@ class RedisCluster : public RedisServer
std::vector<std::string>& inputs,
std::vector<std::string>& outputs);

/*!
* \brief Reconfigure the chunking size that Redis uses for model
* serialization, replication, and the model_get command.
* \details This method triggers the AI.CONFIG method in the Redis
* database to change the model chunking size.
*
* NOTE: The default size of 511MB should be fine for most
* applications, so it is expected to be very rare that a
* client calls this method. It is not necessary to call
* this method a model to be chunked.
* \param chunk_size The new chunk size in bytes
* \throw SmartRedis::Exception if the command fails.
*/
virtual void set_model_chunk_size(int chunk_size);

/*!
* \brief Execute a pipeline for the provided commands.
* The provided commands MUST be executable on a single
Expand Down
Loading

0 comments on commit a20fa6a

Please sign in to comment.