You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I'm trying to build OpenVINO as a static library along with any other dependencies due to the nature of my project. Unfortunately, TBB cannot be built statically, so I hope to use THREADING=OMP instead.
However, when doing so, every time I get to the cmake config step I get an error which brings this process to a standstill.
I've cloned the OpenVINO/2024.0.0 recipe from conan-center-index and altered it to support this. I believe I've done it mostly correctly, But I'm not an expert.
Here in the description I've put the recipe conanfile for OpenVino. In the "Steps to Reproduce" I've included all of the other files I've been using. In logs, you can see the config error.
Click to expand altered OpenVino recipe conanfile.py
from conan import ConanFile
from conan.errors import ConanException, ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
import functools
import os
import yaml
required_conan_version = ">=1.60.0 <2.0 || >=2.0.8"
class OpenvinoConan(ConanFile):
name = "openvino"
# Optional metadata
license = "Apache-2.0"
homepage = "https://github.com/openvinotoolkit/openvino"
url = "https://github.com/conan-io/conan-center-index"
description = "Open Visual Inference And Optimization toolkit for AI inference"
topics = ("nlp", "natural-language-processing", "ai", "computer-vision", "deep-learning", "transformers", "inference",
"speech-recognition", "yolo", "performance-boost", "diffusion-models", "recommendation-system", "stable-diffusion",
"generative-ai", "llm-inference", "optimize-ai", "deploy-ai")
package_id_non_embed_mode = "patch_mode"
package_type = "library"
short_paths = True
no_copy_source = True
# Binary configuration
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
# HW plugins
"enable_cpu": [True, False],
"enable_gpu": [True, False],
# SW plugins
"enable_auto": [True, False],
"enable_hetero": [True, False],
"enable_auto_batch": [True, False],
# Frontends
"enable_ir_frontend": [True, False],
"enable_onnx_frontend": [True, False],
"enable_tf_frontend": [True, False],
"enable_tf_lite_frontend": [True, False],
"enable_paddle_frontend": [True, False],
"enable_pytorch_frontend": [True, False],
# Threading
"threading": ["tbb", "omp"]
}
default_options = {
"shared": False,
"fPIC": True,
# HW plugins
"enable_cpu": True,
"enable_gpu": True,
# SW plugins
"enable_auto": True,
"enable_hetero": True,
"enable_auto_batch": True,
# Frontends
"enable_ir_frontend": True,
"enable_onnx_frontend": True,
"enable_tf_frontend": True,
"enable_tf_lite_frontend": True,
"enable_paddle_frontend": True,
"enable_pytorch_frontend": True,
# Threading
"threading": "omp"
}
@property
def _dependencies_filename(self):
return f"dependencies-{self.version}.yml"
@property
@functools.lru_cache(1)
def _dependencies_versions(self):
dependencies_filepath = os.path.join(self.recipe_folder, "dependencies", self._dependencies_filename)
if not os.path.isfile(dependencies_filepath):
raise ConanException(f"Cannot find {dependencies_filepath}")
cached_dependencies = yaml.safe_load(open(dependencies_filepath, encoding='UTF-8'))
return cached_dependencies
def _require(self, dependency):
if dependency not in self._dependencies_versions:
raise ConanException(f"{dependency} is missing in {self._dependencies_filename}")
return f"{dependency}/{self._dependencies_versions[dependency]}"
@property
def _protobuf_required(self):
return self.options.enable_tf_frontend or self.options.enable_onnx_frontend or self.options.enable_paddle_frontend
@property
def _target_arm(self):
return "arm" in self.settings.arch
@property
def _target_x86_64(self):
return self.settings.arch == "x86_64"
@property
def _gna_option_available(self):
return self.settings.os in ["Linux", "Windows"] and self._target_x86_64 and Version(self.version) < "2024.0.0"
@property
def _gpu_option_available(self):
return self.settings.os != "Macos" and self._target_x86_64
@property
def _preprocessing_available(self):
return "ade" in self._dependencies_versions
@property
def _compilers_minimum_version(self):
return {
"gcc": "7",
"clang": "9",
"apple-clang": "11",
"Visual Studio": "16",
"msvc": "192",
}
@property
def _is_legacy_one_profile(self):
return not hasattr(self, "settings_build")
def source(self):
get(self, **self.conan_data["sources"][self.version]["openvino"], strip_root=True)
get(self, **self.conan_data["sources"][self.version]["onednn_cpu"], strip_root=True,
destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/onednn")
get(self, **self.conan_data["sources"][self.version]["mlas"], strip_root=True,
destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/mlas")
get(self, **self.conan_data["sources"][self.version]["arm_compute"], strip_root=True,
destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/ComputeLibrary")
get(self, **self.conan_data["sources"][self.version]["onednn_gpu"], strip_root=True,
destination=f"{self.source_folder}/src/plugins/intel_gpu/thirdparty/onednn_gpu")
rmdir(self, f"{self.source_folder}/src/plugins/intel_gpu/thirdparty/rapidjson")
apply_conandata_patches(self)
def export(self):
copy(self, f"dependencies/{self._dependencies_filename}", self.recipe_folder, self.export_folder)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if not self._gpu_option_available:
del self.options.enable_gpu
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
if self._protobuf_required:
# even though OpenVINO can work with dynamic protobuf, it's still recommended to use static
self.options["protobuf"].shared = False
def build_requirements(self):
if self._target_arm:
self.tool_requires("scons/4.3.0")
if not self._is_legacy_one_profile:
if self._protobuf_required:
self.tool_requires("protobuf/<host_version>")
if self.options.enable_tf_lite_frontend:
self.tool_requires("flatbuffers/<host_version>")
if not self.options.shared:
self.tool_requires("cmake/[>=3.18 <4]")
def requirements(self):
if self.options.threading == "tbb":
self.requires("onetbb/2021.10.0")
self.requires("pugixml/1.14")
if self._target_x86_64:
self.requires("xbyak/6.73")
if self.options.get_safe("enable_gpu"):
self.requires("opencl-icd-loader/2023.04.17")
self.requires("rapidjson/cci.20220822")
if self._protobuf_required:
self.requires("protobuf/3.21.12")
if self.options.enable_tf_frontend:
self.requires("snappy/1.1.10")
if self.options.enable_onnx_frontend:
self.requires(self._require("onnx"))
if self.options.enable_tf_lite_frontend:
self.requires("flatbuffers/23.5.26")
if self._preprocessing_available:
self.requires(self._require("ade"))
def layout(self):
cmake_layout(self, src_folder="src")
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if self._is_legacy_one_profile:
env = VirtualRunEnv(self)
env.generate(scope="build")
deps = CMakeDeps(self)
deps.generate()
toolchain = CMakeToolchain(self)
# HW plugins
toolchain.cache_variables["ENABLE_INTEL_CPU"] = self.options.enable_cpu
if self._gpu_option_available:
toolchain.cache_variables["ENABLE_INTEL_GPU"] = self.options.enable_gpu
toolchain.cache_variables["ENABLE_ONEDNN_FOR_GPU"] = self.options.shared or not self.options.enable_cpu
if self._gna_option_available:
toolchain.cache_variables["ENABLE_INTEL_GNA"] = False
# SW plugins
toolchain.cache_variables["ENABLE_AUTO"] = self.options.enable_auto
toolchain.cache_variables["ENABLE_MULTI"] = self.options.enable_auto
toolchain.cache_variables["ENABLE_AUTO_BATCH"] = self.options.enable_auto_batch
toolchain.cache_variables["ENABLE_HETERO"] = self.options.enable_hetero
# Frontends
toolchain.cache_variables["ENABLE_OV_IR_FRONTEND"] = self.options.enable_ir_frontend
toolchain.cache_variables["ENABLE_OV_PADDLE_FRONTEND"] = self.options.enable_paddle_frontend
toolchain.cache_variables["ENABLE_OV_TF_FRONTEND"] = self.options.enable_tf_frontend
toolchain.cache_variables["ENABLE_OV_TF_LITE_FRONTEND"] = self.options.enable_tf_lite_frontend
toolchain.cache_variables["ENABLE_OV_ONNX_FRONTEND"] = self.options.enable_onnx_frontend
toolchain.cache_variables["ENABLE_OV_PYTORCH_FRONTEND"] = self.options.enable_pytorch_frontend
# Dependencies
if self.options.threading == "tbb":
toolchain.cache_variables["ENABLE_SYSTEM_TBB"] = True
toolchain.cache_variables["ENABLE_TBBBIND_2_5"] = False
toolchain.cache_variables["THREADING"] = "TBB"
else:
toolchain.cache_variables["ENABLE_SYSTEM_TBB"] = False
toolchain.cache_variables["ENABLE_TBBBIND_2_5"] = False
toolchain.cache_variables["THREADING"] = "OMP"
toolchain.cache_variables["ENABLE_SYSTEM_PUGIXML"] = True
if self._protobuf_required:
toolchain.cache_variables["ENABLE_SYSTEM_PROTOBUF"] = True
if self.options.enable_tf_frontend:
toolchain.cache_variables["ENABLE_SYSTEM_SNAPPY"] = True
if self.options.enable_tf_lite_frontend:
toolchain.cache_variables["ENABLE_SYSTEM_FLATBUFFERS"] = True
if self.options.get_safe("enable_gpu"):
toolchain.cache_variables["ENABLE_SYSTEM_OPENCL"] = True
# misc
if self._preprocessing_available:
toolchain.cache_variables["ENABLE_GAPI_PREPROCESSING"] = True
toolchain.cache_variables["BUILD_SHARED_LIBS"] = self.options.shared
toolchain.cache_variables["CPACK_GENERATOR"] = "CONAN"
toolchain.cache_variables["ENABLE_PROFILING_ITT"] = False
toolchain.cache_variables["ENABLE_PYTHON"] = False
toolchain.cache_variables["ENABLE_PROXY"] = False
toolchain.cache_variables["ENABLE_WHEEL"] = False
toolchain.cache_variables["ENABLE_CPPLINT"] = False
toolchain.cache_variables["ENABLE_NCC_STYLE"] = False
toolchain.cache_variables["ENABLE_SAMPLES"] = False
toolchain.cache_variables["ENABLE_TEMPLATE"] = False
toolchain.generate()
def validate_build(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, "11")
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
compiler_version = Version(self.settings.compiler.version)
if minimum_version and compiler_version < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires {self.settings.compiler} ver. {minimum_version}, provided ver. {compiler_version}.",
)
# OpenVINO has unresolved symbols, when clang is used with libc++
if self.settings.compiler == "clang" and self.settings.compiler.libcxx == "libc++":
raise ConanInvalidConfiguration(
f"{self.ref} cannot be built with clang and libc++ due to unresolved symbols. "
f"Please, use libstdc++ instead."
)
if self.settings.os == "Emscripten":
raise ConanInvalidConfiguration(f"{self.ref} does not support Emscripten")
# TODO: resolve it later, since it is not critical for now
# Conan Center CI fails with our of memory error when building OpenVINO
if self.settings.build_type == "Debug":
raise ConanInvalidConfiguration(f"{self.ref} does not support Debug build type")
def validate(self):
if self.options.get_safe("enable_gpu") and not self.options.shared and self.options.enable_cpu:
# GPU and CPU plugins cannot be simultaneously built statically, because they use different oneDNN versions
self.output.warning(f"{self.name} recipe builds GPU plugin without oneDNN (dGPU) support during static build, "
"because CPU plugin compiled with different oneDNN version may cause ODR violation. "
"To enable oneDNN support for GPU plugin, please, either use shared build configuration "
"or disable CPU plugin by setting 'enable_cpu' option to False.")
def build(self):
cmake = CMake(self)
cmake.configure()
for target in ["ov_frontends", "ov_plugins", "openvino_c"]:
cmake.build(target=target)
def package(self):
cmake = CMake(self)
cmake.install()
# remove cmake and .pc files, since they will be generated later by Conan itself in package_info()
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.set_property("cmake_find_mode", "config")
self.cpp_info.set_property("cmake_file_name", "OpenVINO")
self.cpp_info.set_property("pkg_config_name", "openvino")
openvino_runtime = self.cpp_info.components["Runtime"]
openvino_runtime.set_property("cmake_target_name", "openvino::runtime")
if self.options.threading == "tbb":
openvino_runtime.requires = ["onetbb::libtbb", "pugixml::pugixml"]
else:
openvino_runtime.requires = ["pugixml::pugixml"]
openvino_runtime.libs = ["openvino"]
if self._preprocessing_available:
openvino_runtime.requires.append("ade::ade")
if self._target_x86_64:
openvino_runtime.requires.append("xbyak::xbyak")
if self.settings.os in ["Linux", "Android", "FreeBSD", "SunOS", "AIX"]:
openvino_runtime.system_libs = ["m", "dl", "pthread"]
if self.settings.os == "Windows":
openvino_runtime.system_libs.append("shlwapi")
if self._preprocessing_available:
openvino_runtime.system_libs.extend(["wsock32", "ws2_32"])
if Version(self.version) < "2024.0.0":
openvino_runtime.includedirs.append(os.path.join("include", "ie"))
# Have to expose all internal libraries for static libraries case
if not self.options.shared:
# HW plugins
if self.options.enable_cpu:
openvino_runtime.libs.append("openvino_arm_cpu_plugin" if self._target_arm else \
"openvino_intel_cpu_plugin")
openvino_runtime.libs.extend(["openvino_onednn_cpu", "openvino_snippets", "mlas"])
if self._target_arm:
openvino_runtime.libs.append("arm_compute-static")
if self.options.get_safe("enable_gpu"):
openvino_runtime.libs.extend(["openvino_intel_gpu_plugin", "openvino_intel_gpu_graph",
"openvino_intel_gpu_runtime", "openvino_intel_gpu_kernels"])
if not self.options.enable_cpu:
openvino_runtime.libs.append("openvino_onednn_gpu")
# SW plugins
if self.options.enable_auto:
openvino_runtime.libs.append("openvino_auto_plugin")
if self.options.enable_hetero:
openvino_runtime.libs.append("openvino_hetero_plugin")
if self.options.enable_auto_batch:
openvino_runtime.libs.append("openvino_auto_batch_plugin")
# Preprocessing should come after plugins, because plugins depend on it
if self._preprocessing_available:
openvino_runtime.libs.extend(["openvino_gapi_preproc", "fluid"])
# Frontends
if self.options.enable_ir_frontend:
openvino_runtime.libs.append("openvino_ir_frontend")
if self.options.enable_onnx_frontend:
openvino_runtime.libs.extend(["openvino_onnx_frontend", "openvino_onnx_common"])
openvino_runtime.requires.extend(["protobuf::libprotobuf", "onnx::onnx"])
if self.options.enable_tf_frontend:
openvino_runtime.libs.extend(["openvino_tensorflow_frontend", "openvino_tensorflow_common"])
openvino_runtime.requires.extend(["protobuf::libprotobuf", "snappy::snappy"])
if self.options.enable_tf_lite_frontend:
openvino_runtime.libs.extend(["openvino_tensorflow_lite_frontend", "openvino_tensorflow_common"])
openvino_runtime.requires.extend(["flatbuffers::flatbuffers"])
if self.options.enable_paddle_frontend:
openvino_runtime.libs.append("openvino_paddle_frontend")
openvino_runtime.requires.append("protobuf::libprotobuf")
if self.options.enable_pytorch_frontend:
openvino_runtime.libs.append("openvino_pytorch_frontend")
# Common private dependencies should go last, because they satisfy dependencies for all other libraries
if Version(self.version) < "2024.0.0":
openvino_runtime.libs.append("openvino_builders")
openvino_runtime.libs.extend(["openvino_reference", "openvino_shape_inference", "openvino_itt",
# utils goes last since all others depend on it
"openvino_util"])
# set 'openvino' once again for transformations objects files (cyclic dependency)
# openvino_runtime.libs.append("openvino")
full_openvino_lib_path = os.path.join(self.package_folder, "lib", "openvino.lib").replace("\\", "/") if self.settings.os == "Windows" else \
os.path.join(self.package_folder, "lib", "libopenvino.a")
openvino_runtime.system_libs.insert(0, full_openvino_lib_path)
# Add definition to prevent symbols importing
openvino_runtime.defines = ["OPENVINO_STATIC_LIBRARY"]
if self.options.get_safe("enable_gpu"):
openvino_runtime.requires.extend(["opencl-icd-loader::opencl-icd-loader", "rapidjson::rapidjson"])
if self.settings.os == "Windows":
openvino_runtime.system_libs.append("setupapi")
openvino_runtime_c = self.cpp_info.components["Runtime_C"]
openvino_runtime_c.set_property("cmake_target_name", "openvino::runtime::c")
openvino_runtime_c.libs = ["openvino_c"]
openvino_runtime_c.requires = ["Runtime"]
if self.options.enable_onnx_frontend:
openvino_onnx = self.cpp_info.components["ONNX"]
openvino_onnx.set_property("cmake_target_name", "openvino::frontend::onnx")
openvino_onnx.libs = ["openvino_onnx_frontend"]
openvino_onnx.requires = ["Runtime", "onnx::onnx", "protobuf::libprotobuf"]
if self.options.enable_paddle_frontend:
openvino_paddle = self.cpp_info.components["Paddle"]
openvino_paddle.set_property("cmake_target_name", "openvino::frontend::paddle")
openvino_paddle.libs = ["openvino_paddle_frontend"]
openvino_paddle.requires = ["Runtime", "protobuf::libprotobuf"]
if self.options.enable_tf_frontend:
openvino_tensorflow = self.cpp_info.components["TensorFlow"]
openvino_tensorflow.set_property("cmake_target_name", "openvino::frontend::tensorflow")
openvino_tensorflow.libs = ["openvino_tensorflow_frontend"]
openvino_tensorflow.requires = ["Runtime", "protobuf::libprotobuf", "snappy::snappy"]
if self.options.enable_pytorch_frontend:
openvino_pytorch = self.cpp_info.components["PyTorch"]
openvino_pytorch.set_property("cmake_target_name", "openvino::frontend::pytorch")
openvino_pytorch.libs = ["openvino_pytorch_frontend"]
openvino_pytorch.requires = ["Runtime"]
if self.options.enable_tf_lite_frontend:
openvino_tensorflow_lite = self.cpp_info.components["TensorFlowLite"]
openvino_tensorflow_lite.set_property("cmake_target_name", "openvino::frontend::tensorflow_lite")
openvino_tensorflow_lite.libs = ["openvino_tensorflow_lite_frontend"]
openvino_tensorflow_lite.requires = ["Runtime", "flatbuffers::flatbuffers"]
I set up the conan-center clone of OpenVino's recipe with:
PS C:\Users\pmcelroy\conan-center-index\recipes\openvino\all> conan source --version=2024.0.0 .
conanfile.py (openvino/2024.0.0): Calling source() in C:\Users\pmcelroy\conan-center-index\recipes\openvino\all\src
conanfile.py (openvino/2024.0.0): Downloading 50.6MB 2024.0.0.tar.gz
conanfile.py (openvino/2024.0.0): Downloading 11.7MB f82148befdbdc9576ec721c9d500155ee4de8060.tar.gz
conanfile.py (openvino/2024.0.0): Downloading 13.9MB v23.08.tar.gz
conanfile.py (openvino/2024.0.0): Downloading 12.9MB 494af5f9921bdae98f1a0e2955fa7d76ff386c4f.tar.gz
conanfile.py (openvino/2024.0.0): Apply patch (portability): Include mutex for std::call_once
conanfile.py (openvino/2024.0.0): Apply patch (portability): Include tensor for dev api
conanfile.py (openvino/2024.0.0): Apply patch (portability): Add support to OpenVINO to build it with standards newer than cpp11
PS C:\Users\pmcelroy\conan-center-index\recipes\openvino\all> conan editable add --version=2024.0.0 .
Reference 'openvino/2024.0.0' in editable mode
Test Project directory:
c:\Users\pmcelroy\git\conan-openvino-static-test\build>conan install . --output-folder=. --build=openvino/2024.0.0 --build="missing"
<conan install and openvino build success>
c:\Users\pmcelroy\git\conan-openvino-static-test\build>cmake .. -G "Visual Studio 17 2022" -DCMAKE_TOOLCHAIN_FILE=c:\Users\pmcelroy\git\conan-openvino-static-test\build\generators\conan_toolchain.cmake -DCMAKE_POLICY_DEFAULT_CMP0091=NEW
-- Using Conan toolchain: C:/Users/pmcelroy/git/conan-openvino-static-test/build/generators/conan_toolchain.cmake
-- Conan toolchain: C++ Standard 17 with extensions OFF
-- Selecting Windows SDK version 10.0.20348.0 to target Windows 10.0.22621.
-- The C compiler identification is MSVC 19.32.31342.0
-- The CXX compiler identification is MSVC 19.32.31342.0
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Tools/MSVC/14.32.31326/bin/Hostx64/x64/cl.exe - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Tools/MSVC/14.32.31326/bin/Hostx64/x64/cl.exe - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Conan: Component target declared 'openvino::runtime'
-- Conan: Component target declared 'openvino::runtime::c'
-- Conan: Component target declared 'openvino::frontend::onnx'
-- Conan: Component target declared 'openvino::frontend::paddle'
-- Conan: Component target declared 'openvino::frontend::tensorflow'
-- Conan: Component target declared 'openvino::frontend::pytorch'
-- Conan: Component target declared 'openvino::frontend::tensorflow_lite'
-- Conan: Target declared 'openvino::openvino'
CMake Error at build/generators/cmakedeps_macros.cmake:67 (message):
Library 'openvino_tensorflow_lite_frontend' not found in package. If
'openvino_tensorflow_lite_frontend' is a system library, declare it with
'cpp_info.system_libs' property
Call Stack (most recent call first):
build/generators/OpenVINO-Target-release.cmake:24 (conan_package_library_targets)
build/generators/OpenVINOTargets.cmake:26 (include)
build/generators/OpenVINOConfig.cmake:16 (include)
CMakeLists.txt:5 (find_package)
-- Configuring incomplete, errors occurred!
The text was updated successfully, but these errors were encountered:
I know you just added support for 2024.0.0, but I was hoping there could be an addition of support for the THREADING cmake flag for OpenVino such that everything can be compiled statically.
@Greendogo
Could you please maybe send a PR to CCI with your changes and we can see issues on CI?
OpenVINO does not advocate for OMP for a while and it can be broken
@Greendogo Could you please maybe send a PR to CCI with your changes and we can see issues on CI? OpenVINO does not advocate for OMP for a while and it can be broken
Description
I'm trying to build OpenVINO as a static library along with any other dependencies due to the nature of my project. Unfortunately, TBB cannot be built statically, so I hope to use THREADING=OMP instead.
However, when doing so, every time I get to the cmake config step I get an error which brings this process to a standstill.
I've cloned the OpenVINO/2024.0.0 recipe from conan-center-index and altered it to support this. I believe I've done it mostly correctly, But I'm not an expert.
Here in the description I've put the recipe conanfile for OpenVino. In the "Steps to Reproduce" I've included all of the other files I've been using. In logs, you can see the config error.
Click to expand altered OpenVino recipe conanfile.py
Test Project Files
Click to expand src/main.cpp
Click to expand cmakelists.txt
Click to expand conanfile.py
Package and Environment Details
Conan profile
[settings]
arch=x86_64
build_type=Release
compiler=msvc
compiler.cppstd=17
compiler.runtime=static
compiler.version=193
os=Windows
Steps to reproduce
I set up the conan-center clone of OpenVino's recipe with:
Test Project directory:
The text was updated successfully, but these errors were encountered: