Skip to content

Commit

Permalink
format: apply clang-format and autopep
Browse files Browse the repository at this point in the history
  • Loading branch information
changlan committed Jul 9, 2019
1 parent c6885e7 commit 4b54979
Show file tree
Hide file tree
Showing 60 changed files with 2,753 additions and 2,752 deletions.
5 changes: 5 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
BasedOnStyle: Google
---
Language: Cpp
ColumnLimit: 80
2 changes: 1 addition & 1 deletion byteps/_keras/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,4 +99,4 @@ def load_model(keras, wrap_optimizer, filepath, custom_optimizers, custom_object
if custom_objects is not None:
byteps_objects.update(custom_objects)

return keras.models.load_model(filepath, custom_objects=byteps_objects)
return keras.models.load_model(filepath, custom_objects=byteps_objects)
4 changes: 2 additions & 2 deletions byteps/_keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def _adjust_learning_rate(self, epoch):
# See the paper cited above for more information about momentum correction.
self.restore_momentum = self.backend.get_value(self.model.optimizer.momentum)
self.backend.set_value(self.model.optimizer.momentum,
self.restore_momentum * new_lr / old_lr)
self.restore_momentum * new_lr / old_lr)

def _restore_momentum_if_needed(self):
if self.restore_momentum:
Expand Down Expand Up @@ -168,4 +168,4 @@ def on_epoch_end(self, epoch, logs=None):
if epoch == self.end_epoch - 1 and self.verbose > 0:
new_lr = self.backend.get_value(self.model.optimizer.lr)
print('\nEpoch %d: finished gradual learning rate warmup to %g.' %
(epoch + 1, new_lr))
(epoch + 1, new_lr))
38 changes: 11 additions & 27 deletions byteps/common/common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
// limitations under the License.
// =============================================================================

#include <sstream>
#include <cassert>
#include <sstream>

#include "common.h"
#include "logging.h"
Expand All @@ -30,9 +30,7 @@ Status::Status(StatusType type, std::string reason) {
reason_ = reason;
}

Status Status::OK() {
return Status();
}
Status Status::OK() { return Status(); }

Status Status::UnknownError(std::string message) {
return Status(StatusType::UNKNOWN_ERROR, message);
Expand All @@ -50,29 +48,17 @@ Status Status::InvalidArgument(std::string message) {
return Status(StatusType::INVALID_ARGUMENT, message);
}

Status Status::InProgress() {
return Status(StatusType::IN_PROGRESS, "");
}
Status Status::InProgress() { return Status(StatusType::IN_PROGRESS, ""); }

bool Status::ok() const {
return type_ == StatusType::OK;
}
bool Status::ok() const { return type_ == StatusType::OK; }

bool Status::in_progress() const {
return type_ == StatusType::IN_PROGRESS;
}
bool Status::in_progress() const { return type_ == StatusType::IN_PROGRESS; }

StatusType Status::type() const {
return type_;
}
StatusType Status::type() const { return type_; }

const std::string& Status::reason() const {
return reason_;
}
const std::string& Status::reason() const { return reason_; }

void TensorShape::AddDim(int64_t dim) {
shape_.push_back(dim);
}
void TensorShape::AddDim(int64_t dim) { shape_.push_back(dim); }

void TensorShape::AppendShape(TensorShape& other) {
for (auto dim : other.shape_) {
Expand All @@ -93,9 +79,7 @@ const std::string TensorShape::DebugString() const {
return args.str();
}

int TensorShape::dims() const {
return (int)shape_.size();
}
int TensorShape::dims() const { return (int)shape_.size(); }

int64_t TensorShape::dim_size(int idx) const {
assert(idx >= 0);
Expand Down Expand Up @@ -157,5 +141,5 @@ int getDataTypeLength(int dtype) {
return 4;
}

} // namespace common
} // namespace byteps
} // namespace common
} // namespace byteps
96 changes: 57 additions & 39 deletions byteps/common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@
#ifndef BYTEPS_COMMON_H
#define BYTEPS_COMMON_H

#include <cuda_runtime.h>
#include <nccl.h>
#include <atomic>
#include <functional>
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <atomic>
#include <vector>
#include <mutex>
#include <nccl.h>
#include <cuda_runtime.h>

namespace byteps {
namespace common {
Expand Down Expand Up @@ -54,25 +54,41 @@ enum DataType {
// List of supported frameworks.
enum Framework { TENSORFLOW, PYTORCH, MXNET };

enum StatusType { OK, UNKNOWN_ERROR, PRECONDITION_ERROR, ABORTED, INVALID_ARGUMENT, IN_PROGRESS };
enum StatusType {
OK,
UNKNOWN_ERROR,
PRECONDITION_ERROR,
ABORTED,
INVALID_ARGUMENT,
IN_PROGRESS
};

enum DeviceType { CPU, GPU };

enum QueueType { COORDINATE_REDUCE, REDUCE, COPYD2H,
PCIE_REDUCE, COORDINATE_PUSH, PUSH, PULL,
COPYH2D, COORDINATE_BROADCAST, BROADCAST,
QUEUE_NUM_AND_NOT_A_REAL_QUEUE_TYPE_AND_MUST_BE_THE_LAST };
enum QueueType {
COORDINATE_REDUCE,
REDUCE,
COPYD2H,
PCIE_REDUCE,
COORDINATE_PUSH,
PUSH,
PULL,
COPYH2D,
COORDINATE_BROADCAST,
BROADCAST,
QUEUE_NUM_AND_NOT_A_REAL_QUEUE_TYPE_AND_MUST_BE_THE_LAST
};

const int QueueNum = (int)QUEUE_NUM_AND_NOT_A_REAL_QUEUE_TYPE_AND_MUST_BE_THE_LAST;
const int QueueNum =
(int)QUEUE_NUM_AND_NOT_A_REAL_QUEUE_TYPE_AND_MUST_BE_THE_LAST;

const std::vector<std::string> LogStrings = {
"COORDINATE_REDUCE", "REDUCE", "COPYD2H",
"PCIE_REDUCE", "COORDINATE_PUSH", "PUSH", "PULL",
"COPYH2D", "COORDINATE_BROADCAST", "BROADCAST"
};
"COORDINATE_REDUCE", "REDUCE", "COPYD2H", "PCIE_REDUCE",
"COORDINATE_PUSH", "PUSH", "PULL", "COPYH2D",
"COORDINATE_BROADCAST", "BROADCAST"};

class Status {
public:
public:
Status();
static Status OK();
static Status UnknownError(std::string message);
Expand All @@ -85,14 +101,14 @@ class Status {
StatusType type() const;
const std::string& reason() const;

private:
private:
StatusType type_ = StatusType::OK;
std::string reason_ = "";
Status(StatusType type, std::string reason);
};

class TensorShape {
public:
public:
void AddDim(int64_t dim);
void AppendShape(TensorShape& other);

Expand All @@ -109,36 +125,36 @@ class TensorShape {
return shape_ != rhs.shape_;
}

private:
private:
std::vector<int64_t> shape_;
};

class ReadyEvent {
public:
public:
virtual bool Ready() const = 0;
virtual ~ReadyEvent() = default;
};

typedef struct BytePSContext {
bool initialized;
std::mutex init_mutex;
// tensor name
std::string tensor_name;
// using ps::Key = uint64_t
uint64_t declared_key;
// the actual keys being used
std::vector<uint64_t> key_list;
// a copy on CPU
void* cpubuff;
// GPU ptr if the tensor is on CPU
void* gpu_ptr;
// CPU buffer for cross-PCIe-switch merging
std::vector<void*> pcie_cpubuff;
size_t buff_len;
bool initialized;
std::mutex init_mutex;
// tensor name
std::string tensor_name;
// using ps::Key = uint64_t
uint64_t declared_key;
// the actual keys being used
std::vector<uint64_t> key_list;
// a copy on CPU
void* cpubuff;
// GPU ptr if the tensor is on CPU
void* gpu_ptr;
// CPU buffer for cross-PCIe-switch merging
std::vector<void*> pcie_cpubuff;
size_t buff_len;
} BPSContext;

class Tensor {
public:
public:
virtual const DataType dtype() const = 0;
virtual const TensorShape shape() const = 0;
virtual const void* data() const = 0;
Expand Down Expand Up @@ -194,7 +210,9 @@ struct TensorTableEntry {
using TensorTable = std::unordered_map<std::string, TensorTableEntry>;

enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
kDefaultPushPull,
kRowSparsePushPull,
kCompressedPushPull
};

int GetCommandType(RequestType requestType, int d);
Expand All @@ -203,7 +221,7 @@ ncclDataType_t getNcclDataType(DataType dtype);

int getDataTypeLength(int dtype);

} // namespace common
} // namespace byteps
} // namespace common
} // namespace byteps

#endif // BYTEPS_COMMON_H
#endif // BYTEPS_COMMON_H
Loading

0 comments on commit 4b54979

Please sign in to comment.