Async API
The async module provides non-blocking asynchronous logging with configurable buffering, background processing, and high-throughput capabilities.
Overview
const logly = @import("logly");
const AsyncLogger = logly.AsyncLogger;
const AsyncFileWriter = logly.AsyncFileWriter;Centralized Configuration
Async logging can be enabled through the central Config struct:
var config = logly.Config.default();
config.async_config = .{
.enabled = true,
.buffer_size = 16384,
.batch_size = 128,
.flush_interval_ms = 50,
};
const logger = try logly.Logger.initWithConfig(allocator, config);Or use the fluent API:
const config = logly.Config.default().withAsync();Types
AsyncLogger
The main async logging struct with ring buffer and background worker.
pub const AsyncLogger = struct {
allocator: std.mem.Allocator,
config: AsyncConfig,
buffer: RingBuffer,
stats: AsyncStats,
worker_thread: ?std.Thread,
sinks: std.ArrayList(*Sink),
};AsyncConfig (Centralized)
Configuration available through Config.AsyncConfig:
pub const AsyncConfig = struct {
/// Enable async logging.
enabled: bool = false,
/// Buffer size for async queue.
buffer_size: usize = 8192,
/// Batch size for flushing.
batch_size: usize = 100,
/// Flush interval in milliseconds.
flush_interval_ms: u64 = 100,
/// What to do when buffer is full.
overflow_policy: OverflowPolicy = .drop_oldest,
/// Auto-start worker thread.
auto_start: bool = true,
pub const OverflowPolicy = enum {
drop_oldest,
drop_newest,
block,
};
};Module-specific AsyncConfig
The AsyncLogger module also has its own detailed config:
pub const AsyncConfig = struct {
/// Size of the ring buffer in number of entries
buffer_size: usize = 8192,
/// Maximum time to wait before flushing (in milliseconds)
flush_interval_ms: u64 = 100,
/// Batch size for writing to sinks
batch_size: usize = 64,
/// Behavior when buffer is full
overflow_policy: OverflowPolicy = .drop_oldest,
/// Enable background worker thread
background_worker: bool = true,
/// Worker thread priority (if supported)
worker_priority: WorkerPriority = .normal,
/// Shutdown timeout in milliseconds
shutdown_timeout_ms: u64 = 5000,
/// Enable metrics collection
enable_metrics: bool = true,
/// Pre-allocate formatted message buffers
preallocate_buffers: bool = true,
/// Maximum message size
max_message_size: usize = 4096,
};OverflowPolicy
What to do when the buffer is full.
pub const OverflowPolicy = enum {
/// Drop the oldest entries to make room
drop_oldest,
/// Drop new entries (block if blocking enabled)
drop_newest,
/// Block until space is available
block,
/// Expand buffer dynamically
expand,
};WorkerPriority
Worker thread priority levels.
pub const WorkerPriority = enum {
low,
normal,
high,
realtime,
};AsyncStats
Statistics for async operations.
pub const AsyncStats = struct {
records_queued: std.atomic.Value(u64),
records_written: std.atomic.Value(u64),
records_dropped: std.atomic.Value(u64),
flush_count: std.atomic.Value(u64),
total_latency_ns: std.atomic.Value(u64),
max_latency_ns: std.atomic.Value(u64),
buffer_high_watermark: std.atomic.Value(u64),
};RingBuffer
Lock-free ring buffer for async message queuing.
pub const RingBuffer = struct {
entries: []BufferEntry,
head: std.atomic.Value(usize),
tail: std.atomic.Value(usize),
capacity: usize,
};AsyncFileWriter
Optimized async file writer with buffering.
pub const AsyncFileWriter = struct {
allocator: std.mem.Allocator,
config: FileWriterConfig,
write_buffer: []u8,
buffer_pos: usize,
stats: FileWriterStats,
background_thread: ?std.Thread,
};FileWriterConfig
Configuration for async file writing.
pub const FileWriterConfig = struct {
/// Path to the log file
file_path: []const u8,
/// Write buffer size
buffer_size: usize = 64 * 1024, // 64KB
/// Auto-flush interval in milliseconds
flush_interval_ms: u64 = 1000,
/// Sync to disk on flush
sync_on_flush: bool = false,
/// Enable direct I/O (bypass OS cache)
direct_io: bool = false,
/// Create parent directories if needed
create_dirs: bool = true,
/// Append to existing file
append: bool = true,
};AsyncLogger Methods
init
Create a new async logger.
pub fn init(allocator: std.mem.Allocator, config: AsyncConfig) !AsyncLoggerdeinit
Clean up resources and stop worker thread.
pub fn deinit(self: *AsyncLogger) voidstart
Start the background worker thread.
pub fn start(self: *AsyncLogger) !voidstop
Stop the background worker and flush remaining logs.
pub fn stop(self: *AsyncLogger) voidlog
Queue a log record for async processing.
pub fn log(self: *AsyncLogger, record: *const Record) !voidaddSink
Add a sink for the async logger to write to.
pub fn addSink(self: *AsyncLogger, sink: *Sink) !voidflush
Force flush all pending records.
pub fn flush(self: *AsyncLogger) voidgetStats
Get current async statistics.
pub fn getStats(self: *const AsyncLogger) AsyncStatsAsyncStats Methods
dropRate
Calculate the percentage of dropped records.
pub fn dropRate(self: *const AsyncStats) f64averageLatencyNs
Get average latency in nanoseconds.
pub fn averageLatencyNs(self: *const AsyncStats) u64AsyncFileWriter Methods
init
Create a new async file writer.
pub fn init(allocator: std.mem.Allocator, config: FileWriterConfig) !AsyncFileWriterdeinit
Clean up and close file.
pub fn deinit(self: *AsyncFileWriter) voidwrite
Write data to the buffer (async).
pub fn write(self: *AsyncFileWriter, data: []const u8) !voidflush
Flush buffer to file.
pub fn flush(self: *AsyncFileWriter) !voidPresets
highThroughput
Optimized for maximum throughput.
pub fn highThroughput() AsyncConfig {
return .{
.buffer_size = 65536,
.flush_interval_ms = 500,
.batch_size = 256,
.overflow_policy = .drop_oldest,
.preallocate_buffers = true,
};
}lowLatency
Optimized for minimum latency.
pub fn lowLatency() AsyncConfig {
return .{
.buffer_size = 1024,
.flush_interval_ms = 10,
.batch_size = 16,
.overflow_policy = .block,
};
}balanced
Balance between throughput and latency.
pub fn balanced() AsyncConfig {
return .{
.buffer_size = 8192,
.flush_interval_ms = 100,
.batch_size = 64,
};
}noDrop
Never drop messages (may block).
pub fn noDrop() AsyncConfig {
return .{
.buffer_size = 16384,
.overflow_policy = .block,
};
}Usage Example
const std = @import("std");
const logly = @import("logly");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
// Create async logger with high throughput config
var async_logger = try logly.AsyncLogger.init(
allocator,
logly.AsyncPresets.highThroughput(),
);
defer async_logger.deinit();
// Start the background worker
try async_logger.start();
defer async_logger.stop();
// Log messages (non-blocking)
for (0..1000) |i| {
_ = i;
// async_logger.log(&record);
}
// Check statistics
const stats = async_logger.getStats();
std.debug.print("Queued: {d}, Written: {d}, Dropped: {d}\n", .{
stats.records_queued.load(.monotonic),
stats.records_written.load(.monotonic),
stats.records_dropped.load(.monotonic),
});
std.debug.print("Drop rate: {d:.2}%\n", .{stats.dropRate() * 100});
}See Also
- Async Logging Guide - In-depth async logging guide
- Thread Pool API - Parallel logging
- Configuration Guide - Full configuration options