summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--config.toml4
-rw-r--r--src/commands/build.rs43
-rw-r--r--src/config/not_validated.rs4
-rw-r--r--src/config/util.rs4
-rw-r--r--src/orchestrator/orchestrator.rs20
5 files changed, 66 insertions, 9 deletions
diff --git a/config.toml b/config.toml
index abc9c70..b19288c 100644
--- a/config.toml
+++ b/config.toml
@@ -15,6 +15,10 @@ progress_format = "[{elapsed_precise}] ({percent:>3}%): {bar:40.cyan/blue} | {ms
# Can be overwritten temporarily via CLI
script_shebang = "#!/bin/bash"
+# The number of log lines to show if a build fails.
+# Defaults to 10
+build_error_lines = 10
+
# The theme for the highlighting engine when printing the script that ran inside
# a container.
#
diff --git a/src/commands/build.rs b/src/commands/build.rs
index 2184269..5170a8f 100644
--- a/src/commands/build.rs
+++ b/src/commands/build.rs
@@ -8,7 +8,11 @@ use anyhow::Error;
use anyhow::Result;
use anyhow::anyhow;
use clap::ArgMatches;
+use colored::Colorize;
+use diesel::ExpressionMethods;
use diesel::PgConnection;
+use diesel::QueryDsl;
+use diesel::RunQueryDsl;
use log::{debug, info, warn, trace};
use tokio::stream::StreamExt;
use tokio::sync::RwLock;
@@ -19,12 +23,14 @@ use crate::filestore::StagingStore;
use crate::filestore::path::StoreRoot;
use crate::job::JobResource;
use crate::job::JobSet;
+use crate::log::LogItem;
use crate::orchestrator::OrchestratorSetup;
use crate::package::PackageName;
use crate::package::PackageVersion;
use crate::package::Shebang;
use crate::package::Tree;
use crate::repository::Repository;
+use crate::schema;
use crate::source::SourceCache;
use crate::util::EnvironmentVariableName;
use crate::util::docker::ImageName;
@@ -42,6 +48,7 @@ pub async fn build(matches: &ArgMatches,
use crate::db::models::{
EnvVar,
Package,
+ Job,
GitHash,
Image,
Submit,
@@ -227,12 +234,13 @@ pub async fn build(matches: &ArgMatches,
trace!("Setting up job sets finished successfully");
trace!("Setting up Orchestrator");
+ let database_connection = Arc::new(database_connection);
let orch = OrchestratorSetup::builder()
.progress_generator(progressbars)
.endpoint_config(endpoint_configurations)
.staging_store(staging_store)
.release_store(release_dir)
- .database(database_connection)
+ .database(database_connection.clone())
.source_cache(source_cache)
.submit(submit)
.log_dir(if matches.is_present("write-log-file") { Some(config.log_dir().clone()) } else { None })
@@ -254,11 +262,42 @@ pub async fn build(matches: &ArgMatches,
.collect::<Result<_>>()?;
let mut had_error = false;
- for error in errors {
+ for (job_uuid, error) in errors {
had_error = true;
for cause in error.chain() {
writeln!(outlock, "{}", cause)?;
}
+
+ let data = schema::jobs::table
+ .filter(schema::jobs::dsl::uuid.eq(job_uuid))
+ .inner_join(schema::packages::table)
+ .first::<(Job, Package)>(database_connection.as_ref())?;
+
+ let number_log_lines = *config.build_error_lines();
+ writeln!(outlock, "Last {} lines of Job {}", number_log_lines, job_uuid)?;
+ writeln!(outlock, "for package {} {}\n\n", data.1.name, data.1.version)?;
+
+ let parsed_log = crate::log::ParsedLog::build_from(&data.0.log_text)?;
+ let lines = parsed_log.iter()
+ .map(|line_item| match line_item {
+ LogItem::Line(s) => Ok(String::from_utf8(s.to_vec())?.normal()),
+ LogItem::Progress(u) => Ok(format!("#BUTIDO:PROGRESS:{}", u).bright_black()),
+ LogItem::CurrentPhase(p) => Ok(format!("#BUTIDO:PHASE:{}", p).bright_black()),
+ LogItem::State(Ok(())) => Ok(format!("#BUTIDO:STATE:OK").green()),
+ LogItem::State(Err(s)) => Ok(format!("#BUTIDO:STATE:ERR:{}", s).red()),
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ lines
+ .iter()
+ .enumerate()
+ .skip(lines.len() - number_log_lines)
+ .map(|(i, line)| {
+ writeln!(outlock, "{:>4} | {}", i, line).map_err(Error::from)
+ })
+ .collect::<Result<()>>()?;
+
+ writeln!(outlock, "\n\n")?;
}
if had_error {
diff --git a/src/config/not_validated.rs b/src/config/not_validated.rs
index d8a4d8d..bdbce89 100644
--- a/src/config/not_validated.rs
+++ b/src/config/not_validated.rs
@@ -31,6 +31,10 @@ pub struct NotValidatedConfiguration {
#[getset(get = "pub")]
package_print_format: String,
+ #[serde(default = "default_build_error_lines")]
+ #[getset(get = "pub")]
+ build_error_lines: usize,
+
#[getset(get = "pub")]
script_highlight_theme: Option<String>,
diff --git a/src/config/util.rs b/src/config/util.rs
index bf7e542..27fc530 100644
--- a/src/config/util.rs
+++ b/src/config/util.rs
@@ -71,3 +71,7 @@ pub fn default_script_shebang() -> String {
String::from("#!/bin/bash")
}
+pub fn default_build_error_lines() -> usize {
+ 10
+}
+
diff --git a/src/orchestrator/orchestrator.rs b/src/orchestrator/orchestrator.rs
index 21ac133..1c401c3 100644
--- a/src/orchestrator/orchestrator.rs
+++ b/src/orchestrator/orchestrator.rs
@@ -9,6 +9,7 @@ use diesel::PgConnection;
use log::trace;
use tokio::sync::RwLock;
use typed_builder::TypedBuilder;
+use uuid::Uuid;
use crate::config::Configuration;
use crate::db::models::Artifact;
@@ -40,7 +41,7 @@ pub struct OrchestratorSetup<'a> {
release_store: Arc<RwLock<ReleaseStore>>,
source_cache: SourceCache,
jobsets: Vec<JobSet>,
- database: PgConnection,
+ database: Arc<PgConnection>,
submit: Submit,
log_dir: Option<PathBuf>,
config: &'a Configuration,
@@ -48,8 +49,7 @@ pub struct OrchestratorSetup<'a> {
impl<'a> OrchestratorSetup<'a> {
pub async fn setup(self) -> Result<Orchestrator<'a>> {
- let db = Arc::new(self.database);
- let scheduler = EndpointScheduler::setup(self.endpoint_config, self.staging_store.clone(), db, self.submit.clone(), self.log_dir).await?;
+ let scheduler = EndpointScheduler::setup(self.endpoint_config, self.staging_store.clone(), self.database, self.submit.clone(), self.log_dir).await?;
Ok(Orchestrator {
scheduler: scheduler,
@@ -64,7 +64,7 @@ impl<'a> OrchestratorSetup<'a> {
impl<'a> Orchestrator<'a> {
- pub async fn run(self, output: &mut Vec<Artifact>) -> Result<Vec<anyhow::Error>> {
+ pub async fn run(self, output: &mut Vec<Artifact>) -> Result<Vec<(Uuid, anyhow::Error)>> {
for jobset in self.jobsets.into_iter() {
let errs = Self::run_jobset(&self.scheduler,
&self.merged_stores,
@@ -91,7 +91,7 @@ impl<'a> Orchestrator<'a> {
progress_generator: &ProgressBars,
jobset: JobSet,
output: &mut Vec<Artifact>)
- -> Result<Vec<anyhow::Error>>
+ -> Result<Vec<(Uuid, anyhow::Error)>>
{
use tokio::stream::StreamExt;
@@ -102,10 +102,16 @@ impl<'a> Orchestrator<'a> {
.into_iter()
.map(|runnable| {
let bar = multibar.add(progress_generator.bar());
- Self::run_runnable(runnable, scheduler, bar)
+
+ async {
+ let uuid = runnable.uuid().clone();
+ Self::run_runnable(runnable, scheduler, bar)
+ .await
+ .map_err(|e| (uuid, e))
+ }
})
.collect::<futures::stream::FuturesUnordered<_>>()
- .collect::<Vec<Result<Vec<Artifact>>>>();
+ .collect::<Vec<std::result::Result<Vec<Artifact>, (Uuid, Error)>>>();
let multibar_block = tokio::task::spawn_blocking(move || multibar.join());