Skip to content

Commit

Permalink
Merge branch 'devel'
Browse files Browse the repository at this point in the history
  • Loading branch information
tomdeakin committed Jan 30, 2017
2 parents edd65da + b9c514f commit 7465f96
Show file tree
Hide file tree
Showing 23 changed files with 532 additions and 300 deletions.
2 changes: 0 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@

common.h

gpu-stream-cuda
gpu-stream-ocl
gpu-stream-acc
Expand Down
36 changes: 30 additions & 6 deletions ACCStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,19 @@ ACCStream<T>::~ACCStream()
}

template <class T>
void ACCStream<T>::write_arrays(const std::vector<T>& h_a, const std::vector<T>& h_b, const std::vector<T>& h_c)
void ACCStream<T>::init_arrays(T initA, T initB, T initC)
{
T *a = this->a;
T *b = this->b;
T *c = this->c;
#pragma acc update device(a[0:array_size], b[0:array_size], c[0:array_size])
{}
unsigned int array_size = this->array_size;
T * restrict a = this->a;
T * restrict b = this->b;
T * restrict c = this->c;
#pragma acc kernels present(a[0:array_size], b[0:array_size], c[0:array_size]) wait
for (int i = 0; i < array_size; i++)
{
a[i] = initA;
b[i] = initB;
c[i] = initC;
}
}

template <class T>
Expand Down Expand Up @@ -112,6 +118,24 @@ void ACCStream<T>::triad()
a[i] = b[i] + scalar * c[i];
}
}

template <class T>
T ACCStream<T>::dot()
{
T sum = 0.0;

unsigned int array_size = this->array_size;
T * restrict a = this->a;
T * restrict b = this->b;
#pragma acc kernels present(a[0:array_size], b[0:array_size]) wait
for (int i = 0; i < array_size; i++)
{
sum += a[i] * b[i];
}

return sum;
}

void listDevices(void)
{
// Get number of devices
Expand Down
3 changes: 2 additions & 1 deletion ACCStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,9 @@ class ACCStream : public Stream<T>
virtual void add() override;
virtual void mul() override;
virtual void triad() override;
virtual T dot() override;

virtual void write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c) override;
virtual void init_arrays(T initA, T initB, T initC) override;
virtual void read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c) override;


Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ include(CheckIncludeFileCXX)
include(CheckCXXCompilerFlag)

set(gpu-stream_VERSION_MAJOR 2)
set(gpu-stream_VERSION_MINOR 1)
set(gpu-stream_VERSION_MINOR 2)

configure_file(common.h.in common.h)
include_directories(${CMAKE_BINARY_DIR})
Expand Down
72 changes: 64 additions & 8 deletions CUDAStream.cu
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

#include "CUDAStream.h"

#define TBSIZE 1024

void check_error(void)
{
cudaError_t err = cudaGetLastError();
Expand Down Expand Up @@ -47,6 +45,9 @@ CUDAStream<T>::CUDAStream(const unsigned int ARRAY_SIZE, const int device_index)

array_size = ARRAY_SIZE;

// Allocate the host array for partial sums for dot kernels
sums = (T*)malloc(sizeof(T) * DOT_NUM_BLOCKS);

// Check buffers fit on the device
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
Expand All @@ -60,29 +61,42 @@ CUDAStream<T>::CUDAStream(const unsigned int ARRAY_SIZE, const int device_index)
check_error();
cudaMalloc(&d_c, ARRAY_SIZE*sizeof(T));
check_error();
cudaMalloc(&d_sum, DOT_NUM_BLOCKS*sizeof(T));
check_error();
}


template <class T>
CUDAStream<T>::~CUDAStream()
{
free(sums);

cudaFree(d_a);
check_error();
cudaFree(d_b);
check_error();
cudaFree(d_c);
check_error();
cudaFree(d_sum);
check_error();
}


template <typename T>
__global__ void init_kernel(T * a, T * b, T * c, T initA, T initB, T initC)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = initA;
b[i] = initB;
c[i] = initC;
}

template <class T>
void CUDAStream<T>::write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c)
void CUDAStream<T>::init_arrays(T initA, T initB, T initC)
{
// Copy host memory to device
cudaMemcpy(d_a, a.data(), a.size()*sizeof(T), cudaMemcpyHostToDevice);
init_kernel<<<array_size/TBSIZE, TBSIZE>>>(d_a, d_b, d_c, initA, initB, initC);
check_error();
cudaMemcpy(d_b, b.data(), b.size()*sizeof(T), cudaMemcpyHostToDevice);
check_error();
cudaMemcpy(d_c, c.data(), c.size()*sizeof(T), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
check_error();
}

Expand Down Expand Up @@ -165,6 +179,48 @@ void CUDAStream<T>::triad()
check_error();
}

template <class T>
__global__ void dot_kernel(const T * a, const T * b, T * sum, unsigned int array_size)
{

extern __shared__ __align__(sizeof(T)) unsigned char smem[];
T *tb_sum = reinterpret_cast<T*>(smem);

int i = blockDim.x * blockIdx.x + threadIdx.x;
const size_t local_i = threadIdx.x;

tb_sum[local_i] = 0.0;
for (; i < array_size; i += blockDim.x*gridDim.x)
tb_sum[local_i] += a[i] * b[i];

for (int offset = blockDim.x / 2; offset > 0; offset /= 2)
{
__syncthreads();
if (local_i < offset)
{
tb_sum[local_i] += tb_sum[local_i+offset];
}
}

if (local_i == 0)
sum[blockIdx.x] = tb_sum[local_i];
}

template <class T>
T CUDAStream<T>::dot()
{
dot_kernel<<<DOT_NUM_BLOCKS, TBSIZE, sizeof(T)*TBSIZE>>>(d_a, d_b, d_sum, array_size);
check_error();

cudaMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), cudaMemcpyDeviceToHost);
check_error();

T sum = 0.0;
for (int i = 0; i < DOT_NUM_BLOCKS; i++)
sum += sums[i];

return sum;
}

void listDevices(void)
{
Expand Down
11 changes: 10 additions & 1 deletion CUDAStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,24 @@

#define IMPLEMENTATION_STRING "CUDA"

#define TBSIZE 1024
#define DOT_NUM_BLOCKS 256

template <class T>
class CUDAStream : public Stream<T>
{
protected:
// Size of arrays
unsigned int array_size;

// Host array for partial sums for dot kernel
T *sums;

// Device side pointers to arrays
T *d_a;
T *d_b;
T *d_c;
T *d_sum;


public:
Expand All @@ -36,8 +44,9 @@ class CUDAStream : public Stream<T>
virtual void add() override;
virtual void mul() override;
virtual void triad() override;
virtual T dot() override;

virtual void write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c) override;
virtual void init_arrays(T initA, T initB, T initC) override;
virtual void read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c) override;

};
18 changes: 12 additions & 6 deletions HIPStream.cu
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,21 @@ HIPStream<T>::~HIPStream()
check_error();
}

template <typename T>
__global__ void init_kernel(hipLaunchParm lp, T * a, T * b, T * c, T initA, T initB, T initC)
{
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
a[i] = initA;
b[i] = initB;
c[i] = initC;
}

template <class T>
void HIPStream<T>::write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c)
void HIPStream<T>::init_arrays(T initA, T initB, T initC)
{
// Copy host memory to device
hipMemcpy(d_a, a.data(), a.size()*sizeof(T), hipMemcpyHostToDevice);
check_error();
hipMemcpy(d_b, b.data(), b.size()*sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernel(HIP_KERNEL_NAME(init_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c, initA, initB, initC);
check_error();
hipMemcpy(d_c, c.data(), c.size()*sizeof(T), hipMemcpyHostToDevice);
hipDeviceSynchronize();
check_error();
}

Expand Down
2 changes: 1 addition & 1 deletion HIPStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class HIPStream : public Stream<T>
virtual void mul() override;
virtual void triad() override;

virtual void write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c) override;
virtual void init_arrays(T initA, T initB, T initC) override;
virtual void read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c) override;

};
37 changes: 27 additions & 10 deletions KOKKOSStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,18 @@ KOKKOSStream<T>::~KOKKOSStream()
}

template <class T>
void KOKKOSStream<T>::write_arrays(
const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c)
void KOKKOSStream<T>::init_arrays(T initA, T initB, T initC)
{
for(int ii = 0; ii < array_size; ++ii)
View<double*, DEVICE> a(*d_a);
View<double*, DEVICE> b(*d_b);
View<double*, DEVICE> c(*d_c);
parallel_for(array_size, KOKKOS_LAMBDA (const int index)
{
(*hm_a)(ii) = a[ii];
(*hm_b)(ii) = b[ii];
(*hm_c)(ii) = c[ii];
}
deep_copy(*d_a, *hm_a);
deep_copy(*d_b, *hm_b);
deep_copy(*d_c, *hm_c);
a[index] = initA;
b[index] - initB;
c[index] = initC;
});
Kokkos::fence();
}

template <class T>
Expand Down Expand Up @@ -121,6 +121,23 @@ void KOKKOSStream<T>::triad()
Kokkos::fence();
}

template <class T>
T KOKKOSStream<T>::dot()
{
View<double *, DEVICE> a(*d_a);
View<double *, DEVICE> b(*d_b);

T sum = 0.0;

parallel_reduce(array_size, KOKKOS_LAMBDA (const int index, double &tmp)
{
tmp += a[index] * b[index];
}, sum);

return sum;

}

void listDevices(void)
{
std::cout << "This is not the device you are looking for.";
Expand Down
4 changes: 2 additions & 2 deletions KOKKOSStream.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ class KOKKOSStream : public Stream<T>
virtual void add() override;
virtual void mul() override;
virtual void triad() override;
virtual T dot() override;

virtual void write_arrays(
const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c) override;
virtual void init_arrays(T initA, T initB, T initC) override;
virtual void read_arrays(
std::vector<T>& a, std::vector<T>& b, std::vector<T>& c) override;
};
Expand Down
Loading

0 comments on commit 7465f96

Please sign in to comment.