Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 15 additions & 24 deletions src/Native/LdaNative/lda_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ namespace lda {
printf("using %d thread(s) to do train/test\n", num_threads_);

bAlphaSumMultiplied = false;
atomic_stats_ = new LDAEngineAtomics();
model_block_ = new LDAModelBlock();
data_block_ = new LDADataBlock(num_threads_);
process_barrier_ = new SimpleBarrier(num_threads_);
samplerQueue_ = new CBlockedIntQueue();
atomic_stats_.reset(new LDAEngineAtomics());
model_block_.reset(new LDAModelBlock());
data_block_.reset(new LDADataBlock(num_threads_));
process_barrier_.reset(new SimpleBarrier(num_threads_));
samplerQueue_.reset(new CBlockedIntQueue());
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

📝 Using reset instead of std::make_shared because some of the builds only have C++ 11 (the latter is C++ 14).


document_buffer_ = new int32_t*[num_threads_];
for (int i = 0; i < num_threads_; i++)
Expand Down Expand Up @@ -105,11 +105,11 @@ namespace lda {
num_threads_ = std::max(1, (int)(uNumCPU - 2));
}
bAlphaSumMultiplied = false;
process_barrier_ = new SimpleBarrier(num_threads_);
atomic_stats_ = new LDAEngineAtomics();
data_block_ = new LDADataBlock(num_threads_);
model_block_ = new LDAModelBlock();
samplerQueue_ = new CBlockedIntQueue();
process_barrier_.reset(new SimpleBarrier(num_threads_));
atomic_stats_.reset(new LDAEngineAtomics());
data_block_.reset(new LDADataBlock(num_threads_));
model_block_.reset(new LDAModelBlock());
samplerQueue_.reset(new CBlockedIntQueue());

document_buffer_ = new int32_t*[num_threads_];
for (int i = 0; i < num_threads_; i++)
Expand All @@ -123,20 +123,11 @@ namespace lda {
LdaEngine::~LdaEngine()
{
//delete memory space
delete process_barrier_;
process_barrier_ = nullptr;

delete data_block_;
data_block_ = nullptr;

delete atomic_stats_;
atomic_stats_ = nullptr;

delete model_block_;
model_block_ = nullptr;

delete samplerQueue_;
samplerQueue_ = nullptr;
process_barrier_.reset(nullptr);
data_block_.reset(nullptr);
atomic_stats_.reset(nullptr);
model_block_.reset(nullptr);
samplerQueue_.reset(nullptr);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

📝 These calls to reset are likely not required, but by including them it ensures the objects are destroyed in the same order the old code destroyed them.


for (int i = 0; i < num_threads_; ++i)
{
Expand Down
10 changes: 5 additions & 5 deletions src/Native/LdaNative/lda_engine.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,11 @@ namespace lda {
bool bAlphaSumMultiplied; //used to check whether alpha_sum_ is real alpha sum but not alpha
std::vector<int32_t> word_range_for_each_thread_;

LDAEngineAtomics* atomic_stats_;
SimpleBarrier* process_barrier_; // Local barrier across threads.
std::unique_ptr<LDAEngineAtomics> atomic_stats_;
std::unique_ptr<SimpleBarrier> process_barrier_; // Local barrier across threads.

LDADataBlock* data_block_;
LDAModelBlock* model_block_;
std::unique_ptr<LDADataBlock> data_block_;
std::unique_ptr<LDAModelBlock> model_block_;

std::vector<lda::hybrid_map> global_word_topic_table_;
std::vector<lda::hybrid_alias_map> global_alias_k_v_;
Expand All @@ -133,6 +133,6 @@ namespace lda {
int32_t **document_buffer_;

wood::xorshift_rng rng_;
CBlockedIntQueue *samplerQueue_;
std::unique_ptr<CBlockedIntQueue> samplerQueue_;
};
} // namespace lda