aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGalen Guyer <galen@galenguyer.com>2022-12-22 21:09:58 -0800
committerGalen Guyer <galen@galenguyer.com>2022-12-22 21:09:58 -0800
commit6e97c01eec4c2ce669b7b22b8edbc8669fcbac49 (patch)
tree5a377169452aa03895623f1e9bd7a66480218dc7
parent9e57b57d7fefcdc599e463f6c964fe0ca87a1caf (diff)
Daily update support
-rw-r--r--migrations/01-create-db.sql6
-rw-r--r--src/load.rs189
-rw-r--r--src/main.rs150
-rw-r--r--src/sql/insert-amateur.sql2
-rw-r--r--src/sql/insert-comment.sql2
-rw-r--r--src/sql/insert-entity.sql2
-rw-r--r--src/sql/insert-header.sql2
-rw-r--r--src/sql/insert-history.sql2
-rw-r--r--src/sql/insert-license-attachment.sql2
-rw-r--r--src/sql/insert-special-condition-code.sql2
-rw-r--r--src/sql/insert-special-condition-free-form.sql2
-rw-r--r--src/sql/insert-special-condition.sql2
12 files changed, 269 insertions, 94 deletions
diff --git a/migrations/01-create-db.sql b/migrations/01-create-db.sql
index 247a3d8..c3dc59b 100644
--- a/migrations/01-create-db.sql
+++ b/migrations/01-create-db.sql
@@ -9,7 +9,7 @@ CREATE TABLE IF NOT EXISTS updates (
CREATE TABLE IF NOT EXISTS amateurs (
record_type varchar(2) not null default 'AM',
- unique_system_identifier integer not null,
+ unique_system_identifier integer primary key,
uls_file_number varchar(14),
ebf_number varchar(30),
call_sign varchar(10),
@@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS comments (
);
CREATE TABLE IF NOT EXISTS entities (
record_type varchar(2) not null default 'EN',
- unique_system_identifier integer not null,
+ unique_system_identifier integer primary key,
uls_file_number varchar(14),
ebf_number varchar(30),
call_sign varchar(10),
@@ -71,7 +71,7 @@ CREATE TABLE IF NOT EXISTS entities (
);
CREATE TABLE IF NOT EXISTS headers (
record_type varchar(2) not null default 'HD',
- unique_system_identifier integer not null,
+ unique_system_identifier integer primary key,
uls_file_number varchar(14),
ebf_number varchar(30),
call_sign varchar(10),
diff --git a/src/load.rs b/src/load.rs
index b3bb6ee..7c69012 100644
--- a/src/load.rs
+++ b/src/load.rs
@@ -20,8 +20,13 @@ const INSERT_SPECIAL_CONDITION_CODE_SQL: &str =
const BIND_LIMIT: usize = 32766;
-pub async fn load_amateurs(db: &SqlitePool) {
- let amateurs_file = File::open("AM.dat").expect("Error opening file");
+pub async fn load_amateurs(db: &SqlitePool, clear_first: bool) {
+ let amateurs_file = File::open("AM.dat");
+ if amateurs_file.is_err() {
+ println!("AM.dat not found, skipping");
+ return;
+ }
+ let amateurs_file = amateurs_file.unwrap();
//let amateurs_file_meta = fs::metadata("AM.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&amateurs_file).lines().count();
drop(amateurs_file);
@@ -44,11 +49,13 @@ pub async fn load_amateurs(db: &SqlitePool) {
);
progress_bar.set_message("AM.dat");
- QueryBuilder::new("DELETE FROM amateurs")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting amateurs");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM amateurs")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting amateurs");
+ }
let chunk_size = BIND_LIMIT / 18;
for chunk in &reader.records().chunks(chunk_size) {
@@ -96,8 +103,13 @@ pub async fn load_amateurs(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_comments(db: &SqlitePool) {
- let comments_file = File::open("CO.dat").expect("Error opening file");
+pub async fn load_comments(db: &SqlitePool, clear_first: bool) {
+ let comments_file = File::open("CO.dat");
+ if comments_file.is_err() {
+ println!("CO.dat not found, skipping");
+ return;
+ }
+ let comments_file = comments_file.unwrap();
// let comments_file_meta = fs::metadata("CO.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&comments_file).lines().count();
drop(comments_file);
@@ -120,11 +132,13 @@ pub async fn load_comments(db: &SqlitePool) {
);
progress_bar.set_message("CO.dat");
- QueryBuilder::new("DELETE FROM comments")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting comments");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM comments")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting comments");
+ }
let chunk_size = BIND_LIMIT / 8;
for chunk in &reader.records().chunks(chunk_size) {
@@ -162,8 +176,13 @@ pub async fn load_comments(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_entities(db: &SqlitePool) {
- let entities_file = File::open("EN.dat").expect("Error opening file");
+pub async fn load_entities(db: &SqlitePool, clear_first: bool) {
+ let entities_file = File::open("EN.dat");
+ if entities_file.is_err() {
+ println!("EN.dat not found, skipping");
+ return;
+ }
+ let entities_file = entities_file.unwrap();
//let entities_file_meta = fs::metadata("EN.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&entities_file).lines().count();
drop(entities_file);
@@ -186,11 +205,13 @@ pub async fn load_entities(db: &SqlitePool) {
);
progress_bar.set_message("EN.dat");
- QueryBuilder::new("DELETE FROM entities")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting entities");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM entities")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting entities");
+ }
let chunk_size = BIND_LIMIT / 30;
for chunk in &reader.records().chunks(chunk_size) {
@@ -250,8 +271,13 @@ pub async fn load_entities(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_headers(db: &SqlitePool) {
- let headers_file = File::open("HD.dat").expect("Error opening file");
+pub async fn load_headers(db: &SqlitePool, clear_first: bool) {
+ let headers_file = File::open("HD.dat");
+ if headers_file.is_err() {
+ println!("HD.dat not found, skipping");
+ return;
+ }
+ let headers_file = headers_file.unwrap();
// let headers_file_meta = fs::metadata("HD.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&headers_file).lines().count();
drop(headers_file);
@@ -274,11 +300,13 @@ pub async fn load_headers(db: &SqlitePool) {
);
progress_bar.set_message("HD.dat");
- QueryBuilder::new("DELETE FROM headers")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting headers");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM headers")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting headers");
+ }
let chunk_size = BIND_LIMIT / 60;
for chunk in &reader.records().chunks(chunk_size) {
@@ -367,8 +395,13 @@ pub async fn load_headers(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_history(db: &SqlitePool) {
- let history_file = File::open("HS.dat").expect("Error opening file");
+pub async fn load_history(db: &SqlitePool, clear_first: bool) {
+ let history_file = File::open("HS.dat");
+ if history_file.is_err() {
+ println!("No HS.dat file found, skipping");
+ return;
+ }
+ let history_file = history_file.unwrap();
// let history_file_meta = fs::metadata("HS.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&history_file).lines().count();
drop(history_file);
@@ -391,11 +424,13 @@ pub async fn load_history(db: &SqlitePool) {
);
progress_bar.set_message("HS.dat");
- QueryBuilder::new("DELETE FROM history")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting history");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM history")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting history");
+ }
let chunk_size = BIND_LIMIT / 6;
for chunk in &reader.records().chunks(chunk_size) {
@@ -431,8 +466,13 @@ pub async fn load_history(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_license_attachments(db: &SqlitePool) {
- let attachments_file = File::open("LA.dat").expect("Error opening file");
+pub async fn load_license_attachments(db: &SqlitePool, clear_first: bool) {
+ let attachments_file = File::open("LA.dat");
+ if attachments_file.is_err() {
+ println!("No LA.dat file found, skipping");
+ return;
+ }
+ let attachments_file = attachments_file.unwrap();
// let attachments_file_meta = fs::metadata("LA.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&attachments_file).lines().count();
drop(attachments_file);
@@ -455,11 +495,13 @@ pub async fn load_license_attachments(db: &SqlitePool) {
);
progress_bar.set_message("LA.dat");
- QueryBuilder::new("DELETE FROM license_attachments")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting license_attachments");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM license_attachments")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting license_attachments");
+ }
let chunk_size = BIND_LIMIT / 8;
for chunk in &reader.records().chunks(chunk_size) {
@@ -499,8 +541,13 @@ pub async fn load_license_attachments(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_special_conditions(db: &SqlitePool) {
- let conditions_file = File::open("SC.dat").expect("Error opening file");
+pub async fn load_special_conditions(db: &SqlitePool, clear_first: bool) {
+ let conditions_file = File::open("SC.dat");
+ if conditions_file.is_err() {
+ println!("No SC.dat file found, skipping");
+ return;
+ }
+ let conditions_file = conditions_file.unwrap();
// let conditions_file_meta = fs::metadata("SC.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&conditions_file).lines().count();
drop(conditions_file);
@@ -523,11 +570,13 @@ pub async fn load_special_conditions(db: &SqlitePool) {
);
progress_bar.set_message("SC.dat");
- QueryBuilder::new("DELETE FROM special_conditions")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting special_conditions");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM special_conditions")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_conditions");
+ }
let chunk_size = BIND_LIMIT / 9;
for chunk in &reader.records().chunks(chunk_size) {
@@ -568,8 +617,13 @@ pub async fn load_special_conditions(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_special_conditions_free_form(db: &SqlitePool) {
- let conditions_file = File::open("SF.dat").expect("Error opening file");
+pub async fn load_special_conditions_free_form(db: &SqlitePool, clear_first: bool) {
+ let conditions_file = File::open("SF.dat");
+ if conditions_file.is_err() {
+ println!("No SF.dat file found, skipping");
+ return;
+ }
+ let conditions_file = conditions_file.unwrap();
// let conditions_file_meta = fs::metadata("SF.dat").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&conditions_file).lines().count();
drop(conditions_file);
@@ -592,11 +646,13 @@ pub async fn load_special_conditions_free_form(db: &SqlitePool) {
);
progress_bar.set_message("SF.dat");
- QueryBuilder::new("DELETE FROM special_conditions_free_form")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting special_conditions_free_form");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM special_conditions_free_form")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_conditions_free_form");
+ }
let chunk_size = BIND_LIMIT / 11;
for chunk in &reader.records().chunks(chunk_size) {
@@ -639,8 +695,13 @@ pub async fn load_special_conditions_free_form(db: &SqlitePool) {
progress_bar.finish();
}
-pub async fn load_special_condition_codes(db: &SqlitePool) {
- let codes_file = File::open("special_condition_codes.txt").expect("Error opening file");
+pub async fn load_special_condition_codes(db: &SqlitePool, clear_first: bool) {
+ let codes_file = File::open("special_condition_codes.txt");
+ if codes_file.is_err() {
+ println!("No special_condition_codes.txt file found, skipping");
+ return;
+ }
+ let codes_file = codes_file.unwrap();
// let history_file_meta = fs::metadata("special_condition_codes.txt").expect("Error getting file metadata");
let line_count = std::io::BufReader::new(&codes_file).lines().count();
drop(codes_file);
@@ -663,11 +724,13 @@ pub async fn load_special_condition_codes(db: &SqlitePool) {
);
progress_bar.set_message("special_condition_codes.txt");
- QueryBuilder::new("DELETE FROM special_condition_codes")
- .build()
- .execute(&mut transaction)
- .await
- .expect("Error deleting special_condition_codes");
+ if clear_first {
+ QueryBuilder::new("DELETE FROM special_condition_codes")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_condition_codes");
+ }
let chunk_size = BIND_LIMIT / 4;
for chunk in &reader.records().chunks(chunk_size) {
diff --git a/src/main.rs b/src/main.rs
index 4152dbf..adbf18b 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -46,6 +46,50 @@ impl FccUpdates {
saturday: get_last_updated_header(SATURDAY_DUMP_URL),
}
}
+
+ fn get_pending(&self, last_update: DateTime<Utc>) -> Vec<(DateTime<Utc>, String)> {
+ let mut pending = Vec::new();
+
+ if let Some(sunday) = self.sunday {
+ if sunday > last_update {
+ pending.push((sunday, SUNDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(monday) = self.monday {
+ if monday > last_update {
+ pending.push((monday, MONDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(tuesday) = self.tuesday {
+ if tuesday > last_update {
+ pending.push((tuesday, TUESDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(wednesday) = self.wednesday {
+ if wednesday > last_update {
+ pending.push((wednesday, WEDNESDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(thursday) = self.thursday {
+ if thursday > last_update {
+ pending.push((thursday, THURSDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(friday) = self.friday {
+ if friday > last_update {
+ pending.push((friday, FRIDAY_DUMP_URL.to_string()));
+ }
+ }
+ if let Some(saturday) = self.saturday {
+ if saturday > last_update {
+ pending.push((saturday, SATURDAY_DUMP_URL.to_string()));
+ }
+ }
+
+ pending.sort_by(|a, b| a.0.cmp(&b.0));
+
+ pending
+ }
}
fn get_last_updated_header(url: &str) -> Option<DateTime<Utc>> {
@@ -105,20 +149,69 @@ async fn load_weekly(db: &SqlitePool) -> chrono::DateTime<Utc> {
)
.expect("Error writing file");
- load::load_amateurs(db).await;
- load::load_comments(db).await;
- load::load_entities(db).await;
- load::load_headers(db).await;
- load::load_history(db).await;
- load::load_license_attachments(db).await;
- load::load_special_conditions(db).await;
- load::load_special_conditions_free_form(db).await;
+ load::load_amateurs(db, true).await;
+ load::load_comments(db, true).await;
+ load::load_entities(db, true).await;
+ load::load_headers(db, true).await;
+ load::load_history(db, true).await;
+ load::load_license_attachments(db, true).await;
+ load::load_special_conditions(db, true).await;
+ load::load_special_conditions_free_form(db, true).await;
- load::load_special_condition_codes(db).await;
+ load::load_special_condition_codes(db, true).await;
let meta = output_file.metadata().unwrap();
- std::fs::remove_file("l_amat.zip").expect("Error deleting l_amat.zip");
+ // std::fs::remove_file("l_amat.zip").expect("Error deleting l_amat.zip");
+ DateTime::<Utc>::from(
+ std::time::UNIX_EPOCH + Duration::from_secs(meta.mtime().try_into().unwrap()),
+ )
+}
+
+async fn load_daily(url: &str, db: &SqlitePool) -> chrono::DateTime<Utc> {
+ let parse_file_name_from_url = |url: &str| {
+ let output_file_name_regex = Regex::new(r"/(\w+\.?\w*)").expect("Error constructing regex");
+ let Some(file_name_captures) = output_file_name_regex.captures_iter(url).last() else {
+ panic!("Error parsing file name from URL");
+ };
+ let Some(maybe_match) = file_name_captures.iter().last() else {
+ panic!("Error parsing file name from URL");
+ };
+ let Some(file_name_match) = maybe_match else {
+ panic!("Error parsing file name from URL");
+ };
+ String::from(file_name_match.as_str())
+ };
+
+ let output_file =
+ download_file(url, None).expect("Error downloading weekly dump file");
+
+ unzip_file(&output_file).expect("Error unzipping file");
+ std::fs::remove_file("counts").expect("Error deleting counts file");
+
+ // Some idiot at the FCC decided that unescaped newlines in the middle of a field were cool
+ // Uncle Ted may have had some good ideas after all
+ let comments_regex = Regex::new(r"\s*\r\r\n").unwrap();
+ let comments = fs::read_to_string("CO.dat").expect("Error reading file");
+ fs::write(
+ "CO.dat",
+ comments_regex.replace_all(&comments, " ").to_string(),
+ )
+ .expect("Error writing file");
+
+ load::load_amateurs(db, false).await;
+ load::load_comments(db, false).await;
+ load::load_entities(db, false).await;
+ load::load_headers(db, false).await;
+ load::load_history(db, false).await;
+ load::load_license_attachments(db, false).await;
+ load::load_special_conditions(db, false).await;
+ load::load_special_conditions_free_form(db, false).await;
+
+ let meta = output_file.metadata().unwrap();
+
+ // let file_name = parse_file_name_from_url(url);
+ // std::fs::remove_file(&file_name).unwrap_or_else(|_| panic!("Error deleting {}", file_name));
DateTime::<Utc>::from(
std::time::UNIX_EPOCH + Duration::from_secs(meta.mtime().try_into().unwrap()),
)
@@ -137,7 +230,23 @@ async fn main() {
.expect("Error getting last weekly update");
// if this is the first time the database is being updated
- if last_weekly.is_none() {
+ if let Some(last_weekly) = last_weekly {
+ if fcc_updates.weekly.is_some() && fcc_updates.weekly.unwrap() > last_weekly.date {
+ println!("New weekly update found, loading weekly dump");
+ let update_date = load_weekly(&db).await;
+ meta::insert_update(
+ &db,
+ &Update {
+ id: 0, // placeholder
+ daily: false,
+ weekly: true,
+ date: update_date,
+ },
+ )
+ .await
+ .expect("Error inserting weekly update");
+ }
+ } else {
println!("No weekly updates found, loading weekly dump");
let update_date = load_weekly(&db).await;
meta::insert_update(
@@ -151,23 +260,26 @@ async fn main() {
)
.await
.expect("Error inserting update");
- return;
}
- let last_weekly = last_weekly.unwrap();
- if fcc_updates.weekly.is_some() && fcc_updates.weekly.unwrap() > last_weekly.date {
- println!("New weekly update found, loading weekly dump");
- let update_date = load_weekly(&db).await;
+ let last_update = meta::get_last_update(&db, meta::UpdateType::Any)
+ .await
+ .expect("Error getting last update")
+ .expect("No updates found");
+
+ let pending = dbg!(fcc_updates.get_pending(dbg!(last_update.date)));
+ for update in pending {
+ let update_date = load_daily(&dbg!(update.1), &db).await;
meta::insert_update(
&db,
&Update {
id: 0, // placeholder
- daily: false,
- weekly: true,
+ daily: true,
+ weekly: false,
date: update_date,
},
)
.await
- .expect("Error inserting update");
+ .expect("Error inserting daily update");
}
}
diff --git a/src/sql/insert-amateur.sql b/src/sql/insert-amateur.sql
index 44fd776..f17ce78 100644
--- a/src/sql/insert-amateur.sql
+++ b/src/sql/insert-amateur.sql
@@ -1,4 +1,4 @@
-INSERT INTO amateurs (
+INSERT OR REPLACE INTO amateurs (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-comment.sql b/src/sql/insert-comment.sql
index a99990f..899e449 100644
--- a/src/sql/insert-comment.sql
+++ b/src/sql/insert-comment.sql
@@ -1,4 +1,4 @@
-INSERT INTO comments (
+INSERT OR REPLACE INTO comments (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-entity.sql b/src/sql/insert-entity.sql
index aabb698..a890bdb 100644
--- a/src/sql/insert-entity.sql
+++ b/src/sql/insert-entity.sql
@@ -1,4 +1,4 @@
-INSERT INTO entities (
+INSERT OR REPLACE INTO entities (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-header.sql b/src/sql/insert-header.sql
index 87ddc23..39007e4 100644
--- a/src/sql/insert-header.sql
+++ b/src/sql/insert-header.sql
@@ -1,4 +1,4 @@
-INSERT INTO headers (
+INSERT OR REPLACE INTO headers (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-history.sql b/src/sql/insert-history.sql
index 51f43cd..5b5cd31 100644
--- a/src/sql/insert-history.sql
+++ b/src/sql/insert-history.sql
@@ -1,4 +1,4 @@
-INSERT INTO history (
+INSERT OR REPLACE INTO history (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-license-attachment.sql b/src/sql/insert-license-attachment.sql
index 8ba6bc2..5ceadd9 100644
--- a/src/sql/insert-license-attachment.sql
+++ b/src/sql/insert-license-attachment.sql
@@ -1,4 +1,4 @@
-INSERT INTO license_attachments (
+INSERT OR REPLACE INTO license_attachments (
record_type,
unique_system_identifier,
call_sign,
diff --git a/src/sql/insert-special-condition-code.sql b/src/sql/insert-special-condition-code.sql
index 7bd4c8d..09bc132 100644
--- a/src/sql/insert-special-condition-code.sql
+++ b/src/sql/insert-special-condition-code.sql
@@ -1 +1 @@
-INSERT INTO special_condition_codes (code, service, description, unknown) \ No newline at end of file
+INSERT OR REPLACE INTO special_condition_codes (code, service, description, unknown) \ No newline at end of file
diff --git a/src/sql/insert-special-condition-free-form.sql b/src/sql/insert-special-condition-free-form.sql
index a6bec3e..d0d6ea4 100644
--- a/src/sql/insert-special-condition-free-form.sql
+++ b/src/sql/insert-special-condition-free-form.sql
@@ -1,4 +1,4 @@
-INSERT INTO special_conditions_free_form (
+INSERT OR REPLACE INTO special_conditions_free_form (
record_type,
unique_system_identifier,
uls_file_number,
diff --git a/src/sql/insert-special-condition.sql b/src/sql/insert-special-condition.sql
index 127dc07..b89a1b4 100644
--- a/src/sql/insert-special-condition.sql
+++ b/src/sql/insert-special-condition.sql
@@ -1,4 +1,4 @@
-INSERT INTO special_conditions (
+INSERT OR REPLACE INTO special_conditions (
record_type,
unique_system_identifier,
uls_file_number,