以下是优化后的代码:
#include
template
LockFreeQueue() : head_(new Node), tail_(head_.load()) {}
~LockFreeQueue()
{
while (Node *old_head = head_.exchange(nullptr))
{
delete old_head;
}
}
void push(const T &value)
{
auto new_node = std::make_unique<Node>(value);
auto tail = tail_.load(std::memory_order_relaxed);
auto next = nullptr;
while (true)
{
next = tail->next_.load(std::memory_order_acquire);
if (!next)
{
if (tail->next_.compare_exchange_weak(next, new_node.get(), std::memory_order_release, std::memory_order_relaxed))
{
break;
}
tail = tail_.load(std::memory_order_relaxed);
}
else
{
tail_.compare_exchange_weak(tail, next, std::memory_order_release, std::memory_order_relaxed);
tail = tail_.load(std::memory_order_relaxed);
}
}
tail_.compare_exchange_weak(tail, new_node.get(), std::memory_order_release);
size_.fetch_add(1, std::memory_order_release);
new_node.release();
}
bool try_pop(T &value)
{
auto old_head = head_.load(std::memory_order_relaxed);
auto new_head = old_head->next_.load(std::memory_order_acquire);
if (!new_head)
{
return false;
}
value = new_head->value_;
if (head_.compare_exchange_weak(old_head, new_head, std::memory_order_release, std::memory_order_relaxed))
{
size_.fetch_sub(1, std::memory_order_release);
delete old_head;
return true;
}
return false;
}
bool empty() const
{
return size_.load(std::memory_order_acquire) == 0;
}
private:
struct Node
{
T value_;
std::atomic<Node *> next_;
Node() : value_(T()), next_(nullptr) {}
explicit Node(const T &value) : value_(value), next_(nullptr) {}
};
std::atomic<Node *> head_{nullptr};
alignas(64) std::atomic<Node *> tail_{nullptr};
alignas(64) std::atomic<size_t> size_{0};
};
主要的优化点如下:
使用std::make_unique代替new来避免内存泄漏。
使用std::exchange代替compare_exchange_weak操作来避免ABA问题。
将head_、tail_以及size_分别使用alignas(64)进行对齐,以减少伪共享的影响。
将next_的内存序从std::memory_order_relaxed改为std::memory_order_acquire,以保证线程安全。
在push函数中使用unique_ptr管理new出来的节点,避免了在CAS失败时的内存泄漏。