Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
696dd44
Add support for clang as the host complier
ptheywood Aug 10, 2021
2596a9d
clang-ci-fixup
ptheywood Oct 19, 2022
e99b062
Add clang to readme
ptheywood Oct 19, 2022
46519c9
Add clang-9 ci for testing purposes. Not sure which will be kept
ptheywood Oct 19, 2022
ae69bcc
Try some other clang/cuda combos
ptheywood Oct 19, 2022
547630b
Clang 13+ check for newer warning
ptheywood Oct 19, 2022
34027ec
Address additional clang warnings (when combined with vis hash)
ptheywood Oct 20, 2022
aa5a5d4
Add extra clang CI tests for now, to check compat
ptheywood Oct 21, 2022
1ee0ca5
Do not manually build and install swig if not using gcc as the host c…
ptheywood Oct 21, 2022
71e0f4b
Better swig error if not found and using clang
ptheywood Jan 5, 2024
fd782eb
Fix clang pessimizing move warnings
ptheywood Jan 5, 2024
fe0da5d
another std::move warning
ptheywood Jan 8, 2024
36549d0
Adjust ubuntu clang matrix to be the major version of the max support…
ptheywood Jan 8, 2024
cd77eec
missing <cstdint> for clang
ptheywood Jan 8, 2024
769d8b4
fixup
ptheywood Jan 8, 2024
bf0aafd
Clang: Fix implicit-const-int-float-conversion warnigns in test suite…
ptheywood Jan 8, 2024
36d7cbe
Try downgrading clang on ci for cu 117 and 118
ptheywood Jan 9, 2024
0215827
Try more clangs with cuda 12.0
ptheywood Jan 9, 2024
5da64c8
refine clang ci matrix
ptheywood Jan 9, 2024
49f1bc5
ci typo fix
ptheywood Jan 9, 2024
279c443
Try older clang with cuda 12.0 via ubuntu 20.04 CI - this would only …
ptheywood Jan 11, 2024
15f2e7d
Switch old-clang ci to clang 8 on 20.04. Clang 7 + nvcc doesn't know …
ptheywood Jan 11, 2024
c23bc25
try cuda 11.8 + clang 8 again just to check
ptheywood Jan 11, 2024
0d583d3
try cuda 11.x with clang8 on 20.04 again just to double check its not…
ptheywood Jan 11, 2024
4553387
output log on configure errors.
ptheywood Jan 11, 2024
5cf5b86
fixup
ptheywood Jan 11, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 79 additions & 1 deletion .github/workflows/Ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,52 @@ jobs:
cuda_arch: "35"
hostcxx: gcc-8
os: ubuntu-20.04
# newest cuda with newest clang on ubuntu 22.04
- cuda: "12.3"
cuda_arch: "50"
hostcxx: clang-14 # 7-16
os: ubuntu-22.04
# cuda 12.0 with oldest clang on ubuntu 20.04 which supports c++17
- cuda: "12.0"
cuda_arch: "50"
hostcxx: clang-8 # 7-15
os: ubuntu-20.04
- cuda: "11.8"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.7"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.6"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.5"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.4"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.3"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.2"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.1"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
- cuda: "11.0"
cuda_arch: "50"
hostcxx: clang-8
os: ubuntu-20.04
python:
- "3.12"
config:
Expand All @@ -67,13 +113,26 @@ jobs:
cuda: "11.0"
config:
name: "Beltsoff"
# Exclude beltsoff builds with clang's
- cudacxx:
hostcxx: "clang-14"
config:
name: "Beltsoff"
- cudacxx:
hostcxx: "clang-8"
config:
name: "Beltsoff"
# Exclude vis with older clang
- cudacxx:
hostcxx: "clang-8"
VISUALISATION: "ON"
# Exclude beltsoff vis builds to keep the matrix lighter.
- config:
name: "Beltsoff"
VISUALISATION: "ON"

# Name the job based on matrix/env options
name: "build (${{ matrix.cudacxx.cuda }}, ${{matrix.python}}, ${{ matrix.VISUALISATION }}, ${{ matrix.config.name }}, ${{ matrix.cudacxx.os }})"
name: "build (${{ matrix.cudacxx.cuda }}, ${{ matrix.cudacxx.hostcxx }}, ${{matrix.python}}, ${{ matrix.VISUALISATION }}, ${{ matrix.config.name }}, ${{ matrix.cudacxx.os }})"

# Define job-wide env constants, and promote matrix elements to env constants for portable steps.
env:
Expand Down Expand Up @@ -113,6 +172,16 @@ jobs:
echo "CXX=/usr/bin/g++-${gcc_version}" >> $GITHUB_ENV
echo "CUDAHOSTCXX=/usr/bin/g++-${gcc_version}" >> $GITHUB_ENV

- name: Install/Select clang and clang++
if: ${{ startsWith(env.HOSTCXX, 'clang-')}}
run: |
clang=${{ env.HOSTCXX }}
clang_version=${clang/clang-/}
sudo apt-get install -y clang-${clang_version} clang-tools-${clang_version}
echo "CC=/usr/bin/clang-${clang_version}" >> $GITHUB_ENV
echo "CXX=/usr/bin/clang++-${clang_version}" >> $GITHUB_ENV
echo "CUDAHOSTCXX=/usr/bin/clang++-${clang_version}" >> $GITHUB_ENV

- name: Select Python
if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }}
uses: actions/setup-python@v4
Expand Down Expand Up @@ -172,6 +241,7 @@ jobs:
run: git config --global --add safe.directory $GITHUB_WORKSPACE

- name: Configure cmake
id: configure
run: >
cmake . -B "${{ env.BUILD_DIR }}"
-DCMAKE_BUILD_TYPE="${{ env.CONFIG }}"
Expand All @@ -185,6 +255,14 @@ jobs:
-DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}"
-DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}"
-DFLAMEGPU_ENABLE_NVTX="ON"

- name: Log Configure Errors
if: ${{ success() || (failure() && steps.configure.conclusion == 'failure') }}
run: |
echo "${{ env.BUILD_DIR }}/CMakeFiles/CMakeOutput.log:"
cat ${{ env.BUILD_DIR }}/CMakeFiles/CMakeOutput.log || true
echo "${{ env.BUILD_DIR }}/CMakeFiles/CMakeError.log:"
cat ${{ env.BUILD_DIR }}/CMakeFiles/CMakeError.log || true

- name: Build static library
working-directory: ${{ env.BUILD_DIR }}
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ Building FLAME GPU has the following requirements. There are also optional depen
+ C++17 capable C++ compiler (host), compatible with the installed CUDA version
+ [Microsoft Visual Studio 2019 or 2022](https://visualstudio.microsoft.com/) (Windows)
+ *Note:* Visual Studio must be installed before the CUDA toolkit is installed. See the [CUDA installation guide for Windows](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) for more information.
+ [make](https://www.gnu.org/software/make/) and [GCC](https://gcc.gnu.org/) `>= 8.1` (Linux)
+ [make](https://www.gnu.org/software/make/) and [GCC](https://gcc.gnu.org/) `>= 8.1` or [Clang](https://clang.llvm.org/) `>= 9` (Linux)
+ [git](https://git-scm.com/)

Optionally:
Expand Down
2 changes: 1 addition & 1 deletion cmake/dependencies/flamegpu2-visualiser.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ include(FetchContent)
cmake_policy(SET CMP0079 NEW)

# Set the visualiser repo and tag to use unless overridden by the user.
set(DEFAULT_FLAMEGPU_VISUALISATION_GIT_VERSION "flamegpu-2.0.0-rc.1")
set(DEFAULT_FLAMEGPU_VISUALISATION_GIT_VERSION "b8e875208c1799916e8f2d826a05eb98ef8273ff")
set(DEFAULT_FLAMEGPU_VISUALISATION_REPOSITORY "https://github.com/FLAMEGPU/FLAMEGPU2-visualiser.git")

# Set a VISUSLAITION_ROOT cache entry so it is available in the GUI to override the location if required
Expand Down
14 changes: 12 additions & 2 deletions cmake/warnings.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,18 @@ if(NOT COMMAND flamegpu_suppress_some_compiler_warnings)
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.6.0)
target_compile_definitions(${SSCW_TARGET} PRIVATE "__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING")
endif()
else()
# Linux specific warning suppressions
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# Suppress unused function warnigns raised by clang on some vis headers
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:-Xcompiler -Wno-unused-function>")
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unused-function>")
# Suppress unused-private-field warnings on Clang, which are falsely emitted in some cases where a private member is used in device code (i.e. ArrayMessage)
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:-Xcompiler -Wno-unused-private-field>")
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unused-private-field>")
# Suppress unused-but-set-variable which triggers on some device code, clang 13+
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13.0)
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:SHELL:-Xcompiler -Wno-unused-but-set-variable>")
target_compile_options(${SSCW_TARGET} PRIVATE "$<$<COMPILE_LANGUAGE:C,CXX>:-Wno-unused-but-set-variable>")
endif()
endif()
# Generic OS/host compiler warning suppressions
# Ensure NVCC outputs warning numbers
Expand Down
1 change: 1 addition & 0 deletions include/flamegpu/detail/cuda.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include <cuda_runtime.h>
#include <cuda.h>
#include <cstdint>
#include <limits>
#include "flamegpu/exception/FLAMEGPUException.h"

Expand Down
6 changes: 3 additions & 3 deletions include/flamegpu/simulation/detail/CUDAAgent.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,17 +315,17 @@ class CUDAAgent : public AgentInterface {
* @param state_name Agent state to affect
* @param d_vec The DeviceAgentVector to be stored
*/
void setPopulationVec(const std::string& state_name, const std::shared_ptr<DeviceAgentVector_impl>& d_vec);
void setPopulationVec(const std::string& state_name, const std::shared_ptr<DeviceAgentVector_impl>& d_vec) override;
/**
* Used to allow HostAgentAPI to retrieve a stored DeviceAgentVector
* @param state_name Agent state to affect
*/
std::shared_ptr<DeviceAgentVector_impl> getPopulationVec(const std::string& state_name);
std::shared_ptr<DeviceAgentVector_impl> getPopulationVec(const std::string& state_name) override;
/**
* Used to allow HostAgentAPI to clear the stored DeviceAgentVector
* Any changes will be synchronised first
*/
void resetPopulationVecs();
void resetPopulationVecs() override;

private:
/**
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/model/AgentDescription.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace flamegpu {
CAgentDescription::CAgentDescription(std::shared_ptr<AgentData> data)
: agent(std::move(data)) { }
CAgentDescription::CAgentDescription(std::shared_ptr<const AgentData> data)
: agent(std::move(std::const_pointer_cast<AgentData>(data))) { }
: agent(std::const_pointer_cast<AgentData>(data)) { }

bool CAgentDescription::operator==(const CAgentDescription& rhs) const {
return *this->agent == *rhs.agent; // Compare content is functionally the same
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/model/AgentFunctionDescription.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace flamegpu {
CAgentFunctionDescription::CAgentFunctionDescription(std::shared_ptr<AgentFunctionData> data)
: function(std::move(data)) { }
CAgentFunctionDescription::CAgentFunctionDescription(std::shared_ptr<const AgentFunctionData> data)
: function(std::move(std::const_pointer_cast<AgentFunctionData>(data))) { }
: function(std::const_pointer_cast<AgentFunctionData>(data)) { }

bool CAgentFunctionDescription::operator==(const CAgentFunctionDescription& rhs) const {
return *this->function == *rhs.function; // Compare content is functionally the same
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/model/SubAgentDescription.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace flamegpu {
CSubAgentDescription::CSubAgentDescription(std::shared_ptr<SubAgentData> data)
: subagent(std::move(data)) { }
CSubAgentDescription::CSubAgentDescription(std::shared_ptr<const SubAgentData> data)
: subagent(std::move(std::const_pointer_cast<SubAgentData>(data))) { }
: subagent(std::const_pointer_cast<SubAgentData>(data)) { }

bool CSubAgentDescription::operator==(const CSubAgentDescription& rhs) const {
return *this->subagent == *rhs.subagent; // Compare content is functionally the same
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/model/SubEnvironmentDescription.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ namespace flamegpu {
CSubEnvironmentDescription::CSubEnvironmentDescription(std::shared_ptr<SubEnvironmentData> data)
: subenvironment(std::move(data)) { }
CSubEnvironmentDescription::CSubEnvironmentDescription(std::shared_ptr<const SubEnvironmentData> data)
: subenvironment(std::move(std::const_pointer_cast<SubEnvironmentData>(data))) { }
: subenvironment(std::const_pointer_cast<SubEnvironmentData>(data)) { }

bool CSubEnvironmentDescription::operator==(const CSubEnvironmentDescription& rhs) const {
return *this->subenvironment == *rhs.subenvironment; // Compare content is functionally the same
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/model/SubModelDescription.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace flamegpu {
CSubModelDescription::CSubModelDescription(std::shared_ptr<SubModelData> data)
: submodel(std::move(data)) { }
CSubModelDescription::CSubModelDescription(std::shared_ptr<const SubModelData> data)
: submodel(std::move(std::const_pointer_cast<SubModelData>(data))) { }
: submodel(std::const_pointer_cast<SubModelData>(data)) { }

bool CSubModelDescription::operator==(const CSubModelDescription& rhs) const {
return *this->submodel == *rhs.submodel; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageArray.cu
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,9 @@ void MessageArray::CUDAModelHandler::buildIndex(detail::CUDAScatter &scatter, un
/// CDescription
/// </summary>
MessageArray::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
: MessageBruteForce::CDescription(std::static_pointer_cast<MessageBruteForce::Data>(data)) { }
MessageArray::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageArray::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageArray2D.cu
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ void MessageArray2D::CUDAModelHandler::buildIndex(detail::CUDAScatter &scatter,
/// CDescription
/// </summary>
MessageArray2D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
: MessageBruteForce::CDescription(std::static_pointer_cast<MessageBruteForce::Data>(data)) { }
MessageArray2D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageArray2D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageArray3D.cu
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ void MessageArray3D::CUDAModelHandler::buildIndex(detail::CUDAScatter &scatter,
/// CDescription
/// </summary>
MessageArray3D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
: MessageBruteForce::CDescription(std::static_pointer_cast<MessageBruteForce::Data>(data)) { }
MessageArray3D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageArray3D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
2 changes: 1 addition & 1 deletion src/flamegpu/runtime/messaging/MessageBruteForce.cu
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ std::type_index MessageBruteForce::Data::getType() const { return std::type_inde
MessageBruteForce::CDescription::CDescription(std::shared_ptr<Data> data)
: message(std::move(data)) { }
MessageBruteForce::CDescription::CDescription(std::shared_ptr<const Data> data)
: message(std::move(std::const_pointer_cast<Data>(data))) { }
: message(std::const_pointer_cast<Data>(data)) { }

bool MessageBruteForce::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageBucket.cu
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ void MessageBucket::CUDAModelHandler::resizeKeysVals(const unsigned int newSize)
/// CDescription
/// </summary>
MessageBucket::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
: MessageBruteForce::CDescription(std::static_pointer_cast<MessageBruteForce::Data>(data)) { }
MessageBucket::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageBucket::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageSpatial2D.cu
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,9 @@ void MessageSpatial2D::CUDAModelHandler::resizeKeysVals(const unsigned int newSi
/// CDescription
/// </summary>
MessageSpatial2D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageBruteForce::CDescription(std::move(std::static_pointer_cast<MessageBruteForce::Data>(data))) { }
: MessageBruteForce::CDescription(std::static_pointer_cast<MessageBruteForce::Data>(data)) { }
MessageSpatial2D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageSpatial2D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/runtime/messaging/MessageSpatial3D.cu
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,9 @@ void MessageSpatial3D::CUDAModelHandler::resizeKeysVals(const unsigned int newSi
/// CDescription
/// </summary>
MessageSpatial3D::CDescription::CDescription(std::shared_ptr<Data> data)
: MessageSpatial2D::CDescription(std::move(std::static_pointer_cast<MessageSpatial2D::Data>(data))) { }
: MessageSpatial2D::CDescription(std::static_pointer_cast<MessageSpatial2D::Data>(data)) { }
MessageSpatial3D::CDescription::CDescription(std::shared_ptr<const Data> data)
: CDescription(std::move(std::const_pointer_cast<Data>(data))) { }
: CDescription(std::const_pointer_cast<Data>(data)) { }

bool MessageSpatial3D::CDescription::operator==(const CDescription& rhs) const {
return *this->message == *rhs.message; // Compare content is functionally the same
Expand Down
6 changes: 5 additions & 1 deletion swig/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ if(NOT SWIG_FOUND)
set(SWIG_EXECUTABLE "${swig_SOURCE_DIR}/swig.exe")
set(SWIG_EXECUTABLE "${swig_SOURCE_DIR}/swig.exe" CACHE FILEPATH "Path to SWIG executable")
endif()
else()
# If under linux, and using gcc download, extract, build and install swig
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# Under linux, download the .tar.gz, extract, build and install.
# This must be done at configure time, as FindSwig requires the swig executable.
# FetchContent allows download at configure time, but must use execute_process to run commands at configure time.
Expand Down Expand Up @@ -147,6 +148,9 @@ if(NOT SWIG_FOUND)
set(SWIG_EXECUTABLE "${swig_BINARY_DIR}/bin/swig")
set(SWIG_EXECUTABLE "${swig_BINARY_DIR}/bin/swig" CACHE FILEPATH "Path to SWIG executable")
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(FATAL_ERROR " Unable to build swig from source with some Clang versions.\n"
" Please install swig >= ${SWIG_MINIMUM_SUPPORTED_VERSION} manually.")
endif()
# Attempt to find swig again, but as REQUIRED.
find_package(SWIG ${SWIG_MINIMUM_SUPPORTED_VERSION} REQUIRED)
Expand Down
8 changes: 4 additions & 4 deletions tests/test_cases/runtime/random/test_host_random.cu
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ FLAMEGPU_STEP_FUNCTION(step_uniform_int_range) {
FLAMEGPU_STEP_FUNCTION(step_uniform_ulonglong_range) {
for (auto &i : unsigned_longlong_out)
ASSERT_NO_THROW(i = FLAMEGPU->random.uniform<uint64_t>(
static_cast<uint64_t>(UINT64_MAX * 0.25),
static_cast<uint64_t>(UINT64_MAX * 0.75)));
static_cast<uint64_t>(static_cast<double>(UINT64_MAX) * 0.25),
static_cast<uint64_t>(static_cast<double>(UINT64_MAX) * 0.75)));
}
FLAMEGPU_STEP_FUNCTION(step_uniform_longlong_range) {
for (auto &i : longlong_out)
Expand Down Expand Up @@ -1048,8 +1048,8 @@ TEST_F(HostRandomTest, UniformULongLongRange) {
ms->model.addStepFunction(step_uniform_ulonglong_range);
ms->run();
for (auto &i : unsigned_longlong_out) {
EXPECT_GE(i, static_cast<uint64_t>(UINT64_MAX*0.25));
EXPECT_LE(i, static_cast<uint64_t>(UINT64_MAX*0.75));
EXPECT_GE(i, static_cast<uint64_t>(static_cast<double>(UINT64_MAX)*0.25));
EXPECT_LE(i, static_cast<uint64_t>(static_cast<double>(UINT64_MAX)*0.75));
}
}
TEST_F(HostRandomTest, UniformLongLongRange) {
Expand Down