hunter/worker: simplify error case, hardcode interval_after_error

This commit is contained in:
Astro 2022-11-05 03:25:30 +01:00
parent 36adb5f324
commit 44a3d68201
5 changed files with 11 additions and 11 deletions

View File

@ -26,5 +26,4 @@ hosts:
- ubuntu.social - ubuntu.social
- uwu.social - uwu.social
interval_after_error: 7200 max_workers: 16
max_workers: 128

View File

@ -2,7 +2,6 @@
pub struct Config { pub struct Config {
pub redis: String, pub redis: String,
pub hosts: Vec<String>, pub hosts: Vec<String>,
pub interval_after_error: u64,
pub max_workers: usize, pub max_workers: usize,
} }

View File

@ -85,10 +85,6 @@ async fn main() {
workers_active -= 1; workers_active -= 1;
scheduler.enqueue(host, new_posts > 0, next_interval); scheduler.enqueue(host, new_posts > 0, next_interval);
} }
Message::Error { host } => {
workers_active -= 1;
scheduler.enqueue(host, false, Duration::from_secs(config.interval_after_error));
}
Message::IntroduceHosts { hosts } => { Message::IntroduceHosts { hosts } => {
for host in hosts.into_iter() { for host in hosts.into_iter() {
scheduler.introduce(host).await; scheduler.introduce(host).await;

View File

@ -2,8 +2,12 @@ use std::collections::HashSet;
use std::time::Duration; use std::time::Duration;
use crate::feed::Feed; use crate::feed::Feed;
const DEFAULT_INTERVAL: Duration = Duration::from_secs(60); // timeouts are fairly low as they will be multiplied with the amount
// of sequential fetches without new posts by the scheduler.
const DEFAULT_INTERVAL: Duration = Duration::from_secs(30);
const MIN_INTERVAL: Duration = Duration::from_secs(10); const MIN_INTERVAL: Duration = Duration::from_secs(10);
const ERROR_INTERVAL: Duration = Duration::from_secs(180);
#[derive(Debug)] #[derive(Debug)]
pub enum Message { pub enum Message {
@ -12,7 +16,6 @@ pub enum Message {
new_posts: usize, new_posts: usize,
next_interval: Duration, next_interval: Duration,
}, },
Error { host: String },
IntroduceHosts { hosts: Vec<String> }, IntroduceHosts { hosts: Vec<String> },
} }
@ -78,7 +81,11 @@ pub fn fetch_and_process(
} }
Err(e) => { Err(e) => {
log::error!("Failed fetching {}: {}", host, e); log::error!("Failed fetching {}: {}", host, e);
message_tx.send(Message::Error { host }).unwrap(); message_tx.send(Message::Fetched {
host,
new_posts: 0,
next_interval: ERROR_INTERVAL,
}).unwrap();
} }
} }
}); });

View File

@ -6,7 +6,6 @@ let
hunterDefaultSettings = { hunterDefaultSettings = {
redis = "redis://127.0.0.1:${toString cfg.redis.port}/"; redis = "redis://127.0.0.1:${toString cfg.redis.port}/";
hosts = [ "mastodon.social" "fosstodon.org" "chaos.social" "dresden.network" ]; hosts = [ "mastodon.social" "fosstodon.org" "chaos.social" "dresden.network" ];
interval_after_error = 7200;
max_workers = 16; max_workers = 16;
}; };