1
0
mirror of https://github.com/chylex/Apache-Prometheus-Exporter.git synced 2025-09-15 08:32:10 +02:00

Compare commits

..

1 Commits

Author SHA1 Message Date
589eaf4bc7 wip 2023-01-13 16:27:48 +01:00
11 changed files with 994 additions and 692 deletions

1050
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,14 +7,11 @@ edition = "2021"
name = "apache_prometheus_exporter"
path = "src/main.rs"
[profile.release]
strip = true
lto = true
codegen-units = 1
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
hyper = { version = "0.14.27", default-features = false, features = ["http1", "server", "runtime"] }
notify = { version = "6.1.1", default-features = false, features = ["macos_kqueue"] }
actix-web = "4.1.0"
linemux = "0.2.4"
path-slash = "0.2.1"
prometheus-client = "0.21.2"
tokio = { version = "1.32.0", features = ["fs", "io-util", "macros", "rt", "signal"] }
prometheus-client = "0.18.0"
tokio = { version = "1", features = ["rt", "macros", "signal"] }

View File

@@ -82,21 +82,17 @@ The wildcard must not include any prefix or suffix, so `/*/` is accepted, but `/
#### Notes
> The exporter only searches for files when it starts. If you need the exporter to watch a new file or forget a deleted file, you must restart it.
> At least one access log file and one error log file must be found when the exporter starts, otherwise the exporter immediately exits with an error.
> If a log file is deleted, the exporter will automatically resume watching it if it is re-created later. If you want the exporter to forget about deleted log files, restart the exporter.
## 4. Launch the Exporter
Start the exporter. The standard output will show which log files have been found, the web server host, and the metrics endpoint URL.
If no errors are shown, the exporter will begin reading the found log files from the end, and printing each line to the standard output. When a log file is rotated, the exporter will begin reading it from the beginning.
Press `Ctrl-C` to stop the exporter.
Press `Ctrl-C` to stop the exporter. Signals other than `SIGINT` are ignored.
#### Notes
> The exporter is designed to work and tested with the `rotatelogs` tool in a Linux container. Any other tools or operating systems are unsupported.
> If an error occurs while reading a file or re-opening a rotated file, the exporter will stop watching it and print the error to standard output.
**Important:** Due to library bugs, the exporter will currently not watch rotated log files. If you want to use this project right now, you will need to add the `-c` flag to `rotatelogs`, and restart the exporter after every rotation.
## 5. Collect Prometheus Metrics

View File

@@ -12,9 +12,9 @@ This configuration will create a Docker volume for the logs, and the following c
- **User** : `admin`
- **Password** : `admin`
3. **Prometheus** configured with the exporter's endpoint.
4. **Exporter** built using the source code from this repository, with its metrics endpoint exposed as: http://localhost:2004/metrics
4. **Exporter** built using the source code from this repository.
This example is unsuitable for production. You can use it as inspiration, but you will have to modify it in order to persist container data and follow the latest security practices:
This example is not suitable for production. You can use it as inspiration, but you will have to modify it in order to persist container data and follow the latest security practices:
- Create Docker volumes for persistent storage of container data and configuration files
- Create a dedicated user for each container instead of running as `root`

View File

@@ -38,15 +38,14 @@ services:
exporter:
container_name: ape_dev_exporter
build: "../"
ports:
- "127.0.0.1:2004:9240"
expose:
- "9240"
volumes:
- logs:/logs
environment:
HTTP_HOST: "0.0.0.0"
ACCESS_LOG_FILE_PATTERN: "/logs/*.access.log"
ERROR_LOG_FILE_PATTERN: "/logs/*.error.log"
stop_signal: SIGINT
restart: "always"
volumes:

View File

@@ -2,7 +2,7 @@ use prometheus_client::metrics::counter::Counter;
use prometheus_client::metrics::family::Family;
use prometheus_client::registry::Registry;
type SingleLabel = [(&'static str, String); 1];
type SingleLabel = (&'static str, String);
#[derive(Clone)]
pub struct ApacheMetrics {
@@ -19,9 +19,9 @@ impl ApacheMetrics {
errors_total: Family::<SingleLabel, Counter>::default()
};
registry.register("apache_requests", "Number of received requests", metrics.requests_total.clone());
registry.register("apache_errors", "Number of logged errors", metrics.errors_total.clone());
registry.register("apache_requests", "Number of received requests", Box::new(metrics.requests_total.clone()));
registry.register("apache_errors", "Number of logged errors", Box::new(metrics.errors_total.clone()));
(registry, metrics)
return (registry, metrics);
}
}

View File

@@ -1,62 +0,0 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use notify::{ErrorKind, Event, recommended_watcher, RecommendedWatcher, RecursiveMode, Result, Watcher};
use tokio::sync::mpsc::Sender;
use tokio::sync::Mutex;
pub struct FsWatcher {
watcher: Mutex<RecommendedWatcher>,
}
impl FsWatcher {
pub fn new(callbacks: FsEventCallbacks) -> Result<Self> {
let watcher = recommended_watcher(move |event| callbacks.handle_event(event))?;
let watcher = Mutex::new(watcher);
Ok(Self { watcher })
}
pub async fn watch(&self, path: &Path) -> Result<()> {
let mut watcher = self.watcher.lock().await;
if let Err(e) = watcher.unwatch(path) {
if !matches!(e.kind, ErrorKind::WatchNotFound) {
return Err(e);
}
}
watcher.watch(path, RecursiveMode::NonRecursive)
}
}
pub struct FsEventCallbacks {
senders: HashMap<PathBuf, Sender<Event>>,
}
impl FsEventCallbacks {
pub fn new() -> Self {
Self { senders: HashMap::new() }
}
pub fn register(&mut self, path: &Path, sender: Sender<Event>) {
self.senders.insert(path.to_path_buf(), sender);
}
fn handle_event(&self, event: Result<Event>) {
match event {
Ok(event) => {
for path in &event.paths {
if let Some(sender) = self.senders.get(path) {
if let Err(e) = sender.try_send(event.clone()) {
println!("[FsWatcher] Error sending filesystem event for path \"{}\": {}", path.to_string_lossy(), e);
}
}
}
}
Err(e) => {
println!("[FsWatcher] Error receiving filesystem event: {}", e);
}
}
}
}

View File

@@ -14,7 +14,7 @@ use path_slash::PathExt;
/// 2. A path with a wildcard anywhere in the file name.
/// 3. A path with a standalone wildcard component (i.e. no prefix or suffix in the folder name).
pub fn parse_log_file_pattern_from_env(variable_name: &str) -> Result<LogFilePattern, String> {
match env::var(variable_name) {
return match env::var(variable_name) {
Ok(str) => {
let pattern_str = Path::new(&str).to_slash().ok_or(format!("Environment variable {} contains an invalid path.", variable_name))?;
parse_log_file_pattern_from_str(&pattern_str)
@@ -23,7 +23,7 @@ pub fn parse_log_file_pattern_from_env(variable_name: &str) -> Result<LogFilePat
VarError::NotPresent => Err(format!("Environment variable {} must be set.", variable_name)),
VarError::NotUnicode(_) => Err(format!("Environment variable {} contains invalid characters.", variable_name))
}
}
};
}
fn parse_log_file_pattern_from_str(pattern_str: &str) -> Result<LogFilePattern, String> {
@@ -31,11 +31,11 @@ fn parse_log_file_pattern_from_str(pattern_str: &str) -> Result<LogFilePattern,
return Err(String::from("Path is empty."));
}
if let Some((left, right)) = pattern_str.split_once('*') {
return if let Some((left, right)) = pattern_str.split_once('*') {
parse_log_file_pattern_split_on_wildcard(left, right)
} else {
Ok(LogFilePattern::WithoutWildcard(pattern_str.to_string()))
}
};
}
fn parse_log_file_pattern_split_on_wildcard(left: &str, right: &str) -> Result<LogFilePattern, String> {
@@ -54,7 +54,7 @@ fn parse_log_file_pattern_split_on_wildcard(left: &str, right: &str) -> Result<L
return Err(String::from("Path has a folder wildcard with a prefix or suffix."));
}
if let Some((folder_path, file_name_prefix)) = left.rsplit_once('/') {
return if let Some((folder_path, file_name_prefix)) = left.rsplit_once('/') {
Ok(LogFilePattern::WithFileNameWildcard(PatternWithFileNameWildcard {
path: folder_path.to_string(),
file_name_prefix: file_name_prefix.to_string(),
@@ -66,7 +66,7 @@ fn parse_log_file_pattern_split_on_wildcard(left: &str, right: &str) -> Result<L
file_name_prefix: left.to_string(),
file_name_suffix: right.to_string(),
}))
}
};
}
#[derive(Debug)]
@@ -82,10 +82,11 @@ impl PatternWithFileNameWildcard {
}
fn match_wildcard_on_dir_entry(&self, dir_entry: &DirEntry) -> Option<String> {
dir_entry.file_name()
.to_str()
.and_then(|file_name| self.match_wildcard(file_name))
.map(|wildcard_match| wildcard_match.to_string())
return if let Some(wildcard_match) = dir_entry.file_name().to_str().and_then(|file_name| self.match_wildcard(file_name)) {
Some(wildcard_match.to_string())
} else {
None
};
}
}
@@ -114,22 +115,22 @@ pub enum LogFilePattern {
impl LogFilePattern {
pub fn search(&self) -> Result<Vec<LogFilePath>, io::Error> { // TODO error message
match self {
return match self {
Self::WithoutWildcard(path) => Self::search_without_wildcard(path),
Self::WithFileNameWildcard(pattern) => Self::search_with_file_name_wildcard(pattern),
Self::WithFolderNameWildcard(pattern) => Self::search_with_folder_name_wildcard(pattern)
}
};
}
fn search_without_wildcard(path_str: &String) -> Result<Vec<LogFilePath>, io::Error> {
let path = Path::new(path_str);
let is_valid = path.is_file() || matches!(path.parent(), Some(parent) if parent.is_dir());
if is_valid {
return if is_valid {
Ok(vec![LogFilePath::with_empty_label(path_str)])
} else {
Err(io::Error::from(ErrorKind::NotFound))
}
};
}
fn search_with_file_name_wildcard(pattern: &PatternWithFileNameWildcard) -> Result<Vec<LogFilePath>, io::Error> {
@@ -142,7 +143,7 @@ impl LogFilePattern {
}
}
Ok(result)
return Ok(result);
}
fn search_with_folder_name_wildcard(pattern: &PatternWithFolderNameWildcard) -> Result<Vec<LogFilePath>, io::Error> {
@@ -158,7 +159,7 @@ impl LogFilePattern {
}
}
Ok(result)
return Ok(result);
}
}
@@ -169,10 +170,10 @@ pub struct LogFilePath {
impl LogFilePath {
fn with_empty_label(s: &String) -> LogFilePath {
LogFilePath {
return LogFilePath {
path: PathBuf::from(s),
label: String::default(),
}
};
}
}
@@ -208,12 +209,12 @@ mod tests {
#[test]
fn valid_with_file_name_wildcard_prefix() {
assert!(matches!(parse_log_file_pattern_from_str("/path/to/files/access_*"), Ok(LogFilePattern::WithFileNameWildcard(pattern)) if pattern.path == "/path/to/files" && pattern.file_name_prefix == "access_" && pattern.file_name_suffix.is_empty()));
assert!(matches!(parse_log_file_pattern_from_str("/path/to/files/access_*"), Ok(LogFilePattern::WithFileNameWildcard(pattern)) if pattern.path == "/path/to/files" && pattern.file_name_prefix == "access_" && pattern.file_name_suffix == ""));
}
#[test]
fn valid_with_file_name_wildcard_suffix() {
assert!(matches!(parse_log_file_pattern_from_str("/path/to/files/*_access.log"), Ok(LogFilePattern::WithFileNameWildcard(pattern)) if pattern.path == "/path/to/files" && pattern.file_name_prefix.is_empty() && pattern.file_name_suffix == "_access.log"));
assert!(matches!(parse_log_file_pattern_from_str("/path/to/files/*_access.log"), Ok(LogFilePattern::WithFileNameWildcard(pattern)) if pattern.path == "/path/to/files" && pattern.file_name_prefix == "" && pattern.file_name_suffix == "_access.log"));
}
#[test]

View File

@@ -1,16 +1,12 @@
use std::cmp::max;
use std::collections::HashMap;
use std::io;
use std::io::{Error, ErrorKind};
use std::path::PathBuf;
use std::sync::Arc;
use notify::{Event, EventKind};
use notify::event::{CreateKind, ModifyKind};
use tokio::fs::File;
use tokio::io::{AsyncBufReadExt, BufReader, Lines};
use tokio::sync::mpsc;
use tokio::sync::mpsc::Receiver;
use linemux::{Line, MuxedLines};
use tokio::sync::mpsc::UnboundedSender;
use crate::ApacheMetrics;
use crate::fs_watcher::{FsEventCallbacks, FsWatcher};
use crate::{ApacheMetrics, log_parser};
use crate::log_file_pattern::LogFilePath;
#[derive(Copy, Clone, PartialEq)]
@@ -19,274 +15,109 @@ enum LogFileKind {
Error,
}
struct LogFileMetadata {
struct LogFileInfo<'a> {
pub kind: LogFileKind,
pub label: String,
pub label: &'a String,
}
impl LogFileMetadata {
fn get_label_set(&self) -> [(&'static str, String); 1] {
[("file", self.label.clone())]
impl<'a> LogFileInfo<'a> {
fn get_label_set(&self) -> (&'static str, String) {
return ("file", self.label.clone());
}
}
pub async fn start_log_watcher(access_log_files: Vec<LogFilePath>, error_log_files: Vec<LogFilePath>, metrics: ApacheMetrics) -> bool {
let mut watcher = LogWatcherConfiguration::new();
for log_file in access_log_files.into_iter() {
watcher.add_file(log_file, LogFileKind::Access);
pub async fn watch_logs_task(access_log_files: Vec<LogFilePath>, error_log_files: Vec<LogFilePath>, metrics: ApacheMetrics, shutdown_send: UnboundedSender<()>) {
if let Err(error) = watch_logs(access_log_files, error_log_files, metrics).await {
println!("[LogWatcher] Error reading logs: {}", error);
shutdown_send.send(()).unwrap();
}
for log_file in error_log_files.into_iter() {
watcher.add_file(log_file, LogFileKind::Error);
}
watcher.start(&metrics).await
}
struct LogWatcherConfiguration {
files: Vec<(PathBuf, LogFileMetadata)>,
struct LogWatcher<'a> {
reader: MuxedLines,
files: HashMap<PathBuf, LogFileInfo<'a>>,
}
impl LogWatcherConfiguration {
fn new() -> LogWatcherConfiguration {
LogWatcherConfiguration { files: Vec::new() }
impl<'a> LogWatcher<'a> {
fn new() -> io::Result<LogWatcher<'a>> {
return Ok(LogWatcher {
reader: MuxedLines::new()?,
files: HashMap::new(),
});
}
fn count_files_of_kind(&self, kind: LogFileKind) -> usize {
return self.files.iter().filter(|(_, metadata)| metadata.kind == kind).count();
return self.files.values().filter(|info| info.kind == kind).count();
}
fn add_file(&mut self, log_file: LogFilePath, kind: LogFileKind) {
let path = log_file.path;
let label = log_file.label;
let metadata = LogFileMetadata { kind, label };
self.files.push((path, metadata));
async fn add_file(&mut self, log_file: &'a LogFilePath, kind: LogFileKind) -> io::Result<()> {
let lookup_key = self.reader.add_file(&log_file.path).await?;
self.files.insert(lookup_key, LogFileInfo { kind, label: &log_file.label });
Ok(())
}
async fn start(self, metrics: &ApacheMetrics) -> bool {
async fn start_watching(&mut self, metrics: &ApacheMetrics) -> io::Result<()> {
if self.files.is_empty() {
println!("[LogWatcher] No log files provided.");
return false;
return Err(Error::from(ErrorKind::Unsupported));
}
println!("[LogWatcher] Watching {} access log file(s) and {} error log file(s).", self.count_files_of_kind(LogFileKind::Access), self.count_files_of_kind(LogFileKind::Error));
struct PreparedFile {
path: PathBuf,
metadata: LogFileMetadata,
fs_event_receiver: Receiver<Event>,
}
let mut prepared_files = Vec::new();
let mut fs_callbacks = FsEventCallbacks::new();
for (path, metadata) in self.files {
let (fs_event_sender, fs_event_receiver) = mpsc::channel(20);
fs_callbacks.register(&path, fs_event_sender);
prepared_files.push(PreparedFile { path, metadata, fs_event_receiver });
}
let fs_watcher = match FsWatcher::new(fs_callbacks) {
Ok(fs_watcher) => fs_watcher,
Err(e) => {
println!("[LogWatcher] Error creating filesystem watcher: {}", e);
return false;
}
};
for file in &prepared_files {
let file_path = &file.path;
if !file_path.is_absolute() {
println!("[LogWatcher] Error creating filesystem watcher, path is not absolute: {}", file_path.to_string_lossy());
return false;
}
let parent_path = if let Some(parent) = file_path.parent() {
parent
} else {
println!("[LogWatcher] Error creating filesystem watcher for parent directory of file \"{}\", parent directory does not exist", file_path.to_string_lossy());
return false;
};
if let Err(e) = fs_watcher.watch(parent_path).await {
println!("[LogWatcher] Error creating filesystem watcher for directory \"{}\": {}", parent_path.to_string_lossy(), e);
return false;
}
}
let fs_watcher = Arc::new(fs_watcher);
for file in prepared_files {
let label_set = file.metadata.get_label_set();
for metadata in self.files.values() {
let label_set = metadata.get_label_set();
let _ = metrics.requests_total.get_or_create(&label_set);
let _ = metrics.errors_total.get_or_create(&label_set);
let log_watcher = match LogWatcher::create(file.path, file.metadata, metrics.clone(), Arc::clone(&fs_watcher), file.fs_event_receiver).await {
Some(log_watcher) => log_watcher,
None => return false,
};
tokio::spawn(log_watcher.watch());
}
true
}
}
struct LogWatcher {
state: LogWatchingState,
processor: LogLineProcessor,
fs_event_receiver: Receiver<Event>,
}
impl LogWatcher {
async fn create(path: PathBuf, metadata: LogFileMetadata, metrics: ApacheMetrics, fs_watcher: Arc<FsWatcher>, fs_event_receiver: Receiver<Event>) -> Option<Self> {
let state = match LogWatchingState::initialize(path.clone(), fs_watcher).await {
Some(state) => state,
None => return None,
};
let processor = LogLineProcessor { path, metadata, metrics };
Some(LogWatcher { state, processor, fs_event_receiver })
}
async fn watch(mut self) {
while let Ok(Some(_)) = self.state.lines.next_line().await {
// Skip lines that already existed.
}
let path = &self.processor.path;
'read_loop:
loop {
if !self.processor.process_lines(&mut self.state.lines).await {
break 'read_loop;
}
'event_loop:
loop {
let mut next_event = CoalescedFsEvent::None;
match self.fs_event_receiver.recv().await {
None => break 'read_loop,
Some(event) => {
next_event = next_event.merge(event);
while let Ok(event) = self.fs_event_receiver.try_recv() {
next_event = next_event.merge(event);
}
}
}
match next_event {
CoalescedFsEvent::None => continue 'event_loop,
CoalescedFsEvent::NewData => continue 'read_loop,
CoalescedFsEvent::NewFile => {
println!("[LogWatcher] File recreated: {}", path.to_string_lossy());
if !self.processor.process_lines(&mut self.state.lines).await {
break 'read_loop;
}
self.state = match self.state.reinitialize().await {
Some(state) => state,
None => break 'read_loop,
};
continue 'read_loop;
}
}
}
}
println!("[LogWatcher] Stopping log watcher for: {}", path.to_string_lossy());
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
enum CoalescedFsEvent {
None = 0,
NewData = 1,
NewFile = 2,
}
impl CoalescedFsEvent {
fn merge(self, event: Event) -> CoalescedFsEvent {
match event.kind {
EventKind::Modify(ModifyKind::Data(_)) => {
max(self, CoalescedFsEvent::NewData)
}
EventKind::Create(CreateKind::File) => {
max(self, CoalescedFsEvent::NewFile)
}
_ => self
}
}
}
struct LogWatchingState {
path: PathBuf,
lines: Lines<BufReader<File>>,
fs_watcher: Arc<FsWatcher>,
}
impl LogWatchingState {
const DEFAULT_BUFFER_CAPACITY: usize = 1024 * 4;
async fn initialize(path: PathBuf, fs_watcher: Arc<FsWatcher>) -> Option<LogWatchingState> {
if let Err(e) = fs_watcher.watch(&path).await {
println!("[LogWatcher] Error creating filesystem watcher for file \"{}\": {}", path.to_string_lossy(), e);
return None;
}
let lines = match File::open(&path).await {
Ok(file) => BufReader::with_capacity(Self::DEFAULT_BUFFER_CAPACITY, file).lines(),
Err(e) => {
println!("[LogWatcher] Error opening file \"{}\": {}", path.to_string_lossy(), e);
return None;
}
};
Some(LogWatchingState { path, lines, fs_watcher })
}
async fn reinitialize(self) -> Option<LogWatchingState> {
LogWatchingState::initialize(self.path, self.fs_watcher).await
}
}
struct LogLineProcessor {
path: PathBuf,
metadata: LogFileMetadata,
metrics: ApacheMetrics,
}
impl LogLineProcessor {
async fn process_lines(&self, reader: &mut Lines<BufReader<File>>) -> bool {
loop {
match reader.next_line().await {
Ok(maybe_line) => match maybe_line {
Some(line) => self.handle_line(line),
None => return true,
},
Err(e) => {
println!("[LogWatcher] Error reading from file \"{}\": {}", self.path.to_string_lossy(), e);
return false;
}
if let Some(event) = self.reader.next_line().await? {
self.handle_line(event, metrics);
}
}
}
fn handle_line(&self, line: String) {
let (kind, family) = match self.metadata.kind {
LogFileKind::Access => ("access log", &self.metrics.requests_total),
LogFileKind::Error => ("error log", &self.metrics.errors_total),
};
fn handle_line(&self, event: Line, metrics: &ApacheMetrics) {
if let Some(file) = self.files.get(event.source()) {
match file.kind {
LogFileKind::Access => self.handle_access_log_line(event.line(), file, metrics),
LogFileKind::Error => self.handle_error_log_line(event.line(), file, metrics),
}
} else {
println!("[LogWatcher] Received line from unknown file: {}", event.source().display());
}
}
fn handle_access_log_line(&self, line: &str, file: &LogFileInfo, metrics: &ApacheMetrics) {
match log_parser::AccessLogLineParts::parse(line) {
Ok(parts) => {
println!("[LogWatcher] Received access log line from \"{}\": {}", file.label, parts)
}
Err(err) => {
println!("[LogWatcher] Received access log line from \"{}\" with invalid format ({:?}): {}", file.label, err, line)
}
}
println!("[LogWatcher] Received {} line from \"{}\": {}", kind, self.metadata.label, line);
family.get_or_create(&self.metadata.get_label_set()).inc();
metrics.requests_total.get_or_create(&file.get_label_set()).inc();
}
fn handle_error_log_line(&self, line: &str, file: &LogFileInfo, metrics: &ApacheMetrics) {
println!("[LogWatcher] Received error log line from \"{}\": {}", file.label, line);
metrics.errors_total.get_or_create(&file.get_label_set()).inc();
}
}
async fn watch_logs(access_log_files: Vec<LogFilePath>, error_log_files: Vec<LogFilePath>, metrics: ApacheMetrics) -> io::Result<()> {
let mut watcher = LogWatcher::new()?;
for log_file in &access_log_files {
watcher.add_file(log_file, LogFileKind::Access).await?;
}
for log_file in &error_log_files {
watcher.add_file(log_file, LogFileKind::Error).await?;
}
watcher.start_watching(&metrics).await?;
Ok(())
}

View File

@@ -1,25 +1,23 @@
use std::env;
use std::net::{IpAddr, SocketAddr};
use std::process::ExitCode;
use std::str::FromStr;
use std::sync::Mutex;
use tokio::signal;
use tokio::sync::mpsc;
use crate::apache_metrics::ApacheMetrics;
use crate::log_file_pattern::{LogFilePath, parse_log_file_pattern_from_env};
use crate::log_watcher::start_log_watcher;
use crate::web_server::WebServer;
use crate::log_watcher::watch_logs_task;
use crate::web_server::{create_web_server, run_web_server};
mod apache_metrics;
mod fs_watcher;
mod log_file_pattern;
mod log_parser;
mod log_watcher;
mod web_server;
const ACCESS_LOG_FILE_PATTERN: &str = "ACCESS_LOG_FILE_PATTERN";
const ERROR_LOG_FILE_PATTERN: &str = "ERROR_LOG_FILE_PATTERN";
const ACCESS_LOG_FILE_PATTERN: &'static str = "ACCESS_LOG_FILE_PATTERN";
const ERROR_LOG_FILE_PATTERN: &'static str = "ERROR_LOG_FILE_PATTERN";
fn find_log_files(environment_variable_name: &str, log_kind: &str) -> Option<Vec<LogFilePath>> {
let log_file_pattern = match parse_log_file_pattern_from_env(environment_variable_name) {
@@ -47,19 +45,12 @@ fn find_log_files(environment_variable_name: &str, log_kind: &str) -> Option<Vec
println!("Found {} file: {} (label \"{}\")", log_kind, log_file.path.display(), log_file.label);
}
Some(log_files)
return Some(log_files);
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> ExitCode {
let host = env::var("HTTP_HOST").unwrap_or(String::from("127.0.0.1"));
let bind_ip = match IpAddr::from_str(&host) {
Ok(addr) => addr,
Err(_) => {
println!("Invalid HTTP host: {}", host);
return ExitCode::FAILURE;
}
};
println!("Initializing exporter...");
@@ -73,27 +64,23 @@ async fn main() -> ExitCode {
None => return ExitCode::FAILURE,
};
let server = match WebServer::try_bind(SocketAddr::new(bind_ip, 9240)) {
Some(server) => server,
None => return ExitCode::FAILURE
};
let (metrics_registry, metrics) = ApacheMetrics::new();
let (shutdown_send, mut shutdown_recv) = mpsc::unbounded_channel();
if !start_log_watcher(access_log_files, error_log_files, metrics).await {
return ExitCode::FAILURE;
}
tokio::spawn(watch_logs_task(access_log_files, error_log_files, metrics.clone(), shutdown_send.clone()));
tokio::spawn(run_web_server(create_web_server(host.as_str(), 9240, Mutex::new(metrics_registry))));
tokio::spawn(server.serve(Mutex::new(metrics_registry)));
drop(shutdown_send);
match signal::ctrl_c().await {
Ok(_) => {
println!("Received CTRL-C, shutting down...");
ExitCode::SUCCESS
tokio::select! {
_ = signal::ctrl_c() => {
println!("Received CTRL-C, shutting down...")
}
Err(e) => {
println!("Error registering CTRL-C handler: {}", e);
ExitCode::FAILURE
_ = shutdown_recv.recv() => {
println!("Shutting down...");
}
}
ExitCode::SUCCESS
}

View File

@@ -1,95 +1,54 @@
use std::fmt;
use std::net::SocketAddr;
use std::sync::{Arc, Mutex};
use std::str;
use std::sync::Mutex;
use std::time::Duration;
use hyper::{Body, Error, header, Method, Request, Response, Server, StatusCode};
use hyper::http::Result;
use hyper::server::Builder;
use hyper::server::conn::AddrIncoming;
use hyper::service::{make_service_fn, service_fn};
use actix_web::{App, HttpResponse, HttpServer, Result, web};
use actix_web::dev::Server;
use prometheus_client::encoding::text::encode;
use prometheus_client::registry::Registry;
const MAX_BUFFER_SIZE: usize = 1024 * 32;
pub struct WebServer {
builder: Builder<AddrIncoming>,
}
impl WebServer {
//noinspection HttpUrlsUsage
pub fn try_bind(addr: SocketAddr) -> Option<WebServer> {
println!("[WebServer] Starting web server on {0} with metrics endpoint: http://{0}/metrics", addr);
let builder = match Server::try_bind(&addr) {
Ok(builder) => builder,
Err(e) => {
println!("[WebServer] Could not bind to {}: {}", addr, e);
return None;
}
};
let builder = builder.tcp_keepalive(Some(Duration::from_secs(60)));
let builder = builder.http1_only(true);
let builder = builder.http1_keepalive(true);
let builder = builder.http1_max_buf_size(MAX_BUFFER_SIZE);
let builder = builder.http1_header_read_timeout(Duration::from_secs(10));
Some(WebServer { builder })
}
//noinspection HttpUrlsUsage
pub fn create_web_server(host: &str, port: u16, metrics_registry: Mutex<Registry>) -> Server {
let metrics_registry = web::Data::new(metrics_registry);
pub async fn serve(self, metrics_registry: Mutex<Registry>) {
let metrics_registry = Arc::new(metrics_registry);
let service = make_service_fn(move |_| {
let metrics_registry = Arc::clone(&metrics_registry);
async move {
Ok::<_, Error>(service_fn(move |req| handle_request(req, Arc::clone(&metrics_registry))))
}
});
if let Err(e) = self.builder.serve(service).await {
println!("[WebServer] Error starting web server: {}", e);
}
let server = HttpServer::new(move || {
App::new()
.app_data(metrics_registry.clone())
.service(web::resource("/metrics").route(web::get().to(metrics_handler)))
});
let server = server.keep_alive(Duration::from_secs(60));
let server = server.shutdown_timeout(0);
let server = server.disable_signals();
let server = server.workers(1);
let server = server.bind((host, port));
println!("[WebServer] Starting web server on {0}:{1} with metrics endpoint: http://{0}:{1}/metrics", host, port);
return server.unwrap().run();
}
pub async fn run_web_server(server: Server) {
if let Err(e) = server.await {
println!("[WebServer] Error running web server: {}", e);
}
}
async fn handle_request(req: Request<Body>, metrics_registry: Arc<Mutex<Registry>>) -> Result<Response<Body>> {
if req.method() == Method::GET && req.uri().path() == "/metrics" {
metrics_handler(Arc::clone(&metrics_registry)).await
} else {
Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty())
}
}
//noinspection SpellCheckingInspection
async fn metrics_handler(metrics_registry: Arc<Mutex<Registry>>) -> Result<Response<Body>> {
match encode_metrics(metrics_registry) {
MetricsEncodeResult::Ok(buf) => {
Response::builder().status(StatusCode::OK).header(header::CONTENT_TYPE, "application/openmetrics-text; version=1.0.0; charset=utf-8").body(Body::from(buf))
}
MetricsEncodeResult::FailedAcquiringRegistryLock => {
async fn metrics_handler(metrics_registry: web::Data<Mutex<Registry>>) -> Result<HttpResponse> {
let mut buf = Vec::new();
{
if let Ok(metrics_registry) = metrics_registry.lock() {
encode(&mut buf, &metrics_registry)?;
} else {
println!("[WebServer] Failed acquiring lock on registry.");
Response::builder().status(StatusCode::INTERNAL_SERVER_ERROR).body(Body::empty())
}
MetricsEncodeResult::FailedEncodingMetrics(e) => {
println!("[WebServer] Error encoding metrics: {}", e);
Response::builder().status(StatusCode::INTERNAL_SERVER_ERROR).body(Body::empty())
return Ok(HttpResponse::InternalServerError().body(""));
}
}
}
enum MetricsEncodeResult {
Ok(String),
FailedAcquiringRegistryLock,
FailedEncodingMetrics(fmt::Error),
}
fn encode_metrics(metrics_registry: Arc<Mutex<Registry>>) -> MetricsEncodeResult {
let mut buf = String::new();
return if let Ok(metrics_registry) = metrics_registry.lock() {
encode(&mut buf, &metrics_registry).map_or_else(MetricsEncodeResult::FailedEncodingMetrics, |_| MetricsEncodeResult::Ok(buf))
if let Ok(buf) = String::from_utf8(buf) {
Ok(HttpResponse::Ok().content_type("application/openmetrics-text; version=1.0.0; charset=utf-8").body(buf))
} else {
MetricsEncodeResult::FailedAcquiringRegistryLock
};
println!("[WebServer] Failed converting buffer to UTF-8.");
Ok(HttpResponse::InternalServerError().body(""))
}
}