Skip to content

Commit

Permalink
fix for snapshot variables missing/null (#3328)
Browse files Browse the repository at this point in the history
* testing adding startup

* testing adding default

* testing newer docker

* adding default values for model

* testing specific docker

* testing specific docker image

* fixed format

* merge ready

* adding source of truth

* adding source of truth

* fixing tests

* format

* removing defaults

---------

Co-authored-by: Ankith Gunapal <[email protected]>
  • Loading branch information
udaij12 and agunapal authored Sep 23, 2024
1 parent 6881ec5 commit c161926
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
public class ModelConfig {
private static final Logger logger = LoggerFactory.getLogger(ModelConfig.class);

public static final int defaultStartupTimeout = 120; // unit: sec
public static final int defaultResponseTimeout = 120; // unit: sec

/** the minimum number of workers of a model */
private int minWorkers;
/** the maximum number of workers of a model */
Expand All @@ -20,9 +23,9 @@ public class ModelConfig {
/** the maximum delay in msec of a batch of a model */
private int maxBatchDelay;
/** the timeout in sec of a specific model's response. */
private int responseTimeout = 120; // unit: sec
private int responseTimeout = defaultResponseTimeout;
/** the timeout in sec of a specific model's startup. */
private int startupTimeout = 120; // unit: sec
private int startupTimeout = defaultStartupTimeout;
/**
* the device type where the model is loaded. It can be gpu, cpu. The model is loaded on CPU if
* deviceType: "cpu" is set on a GPU host.
Expand Down
12 changes: 10 additions & 2 deletions frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,17 @@ public void setModelState(JsonObject modelInfo) {
minWorkers = modelInfo.get(MIN_WORKERS).getAsInt();
maxWorkers = modelInfo.get(MAX_WORKERS).getAsInt();
maxBatchDelay = modelInfo.get(MAX_BATCH_DELAY).getAsInt();
responseTimeout = modelInfo.get(RESPONSE_TIMEOUT).getAsInt();
startupTimeout = modelInfo.get(STARTUP_TIMEOUT).getAsInt();
batchSize = modelInfo.get(BATCH_SIZE).getAsInt();
responseTimeout =
modelInfo.has(RESPONSE_TIMEOUT) && !modelInfo.get(RESPONSE_TIMEOUT).isJsonNull()
? modelInfo.get(RESPONSE_TIMEOUT).getAsInt()
: modelArchive.getModelConfig()
.defaultResponseTimeout; // default value for responseTimeout
startupTimeout =
modelInfo.has(STARTUP_TIMEOUT) && !modelInfo.get(STARTUP_TIMEOUT).isJsonNull()
? modelInfo.get(STARTUP_TIMEOUT).getAsInt()
: modelArchive.getModelConfig()
.defaultStartupTimeout; // default value for startupTimeout

JsonElement runtime = modelInfo.get(RUNTIME_TYPE);
String runtime_str = Manifest.RuntimeType.PYTHON.getValue();
Expand Down

0 comments on commit c161926

Please sign in to comment.