aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGalen Guyer <galen@galenguyer.com>2022-12-06 12:30:29 -0500
committerGalen Guyer <galen@galenguyer.com>2022-12-06 12:30:29 -0500
commit0c15b0d4a2ee3cd52b4d9aecfcf007cd76a5b06d (patch)
treea56f102eef1ea714ab4b164faa13c90701482b5b
parent3094c184734c305d04c193371714e91170b6ac9f (diff)
SQL cleanup
-rw-r--r--migrations/01-create-db.sql27
-rw-r--r--src/file.rs199
-rw-r--r--src/load.rs77
-rw-r--r--src/main.rs217
-rw-r--r--src/sql/insert-amateur.sql20
-rw-r--r--src/sql/insert-comment.sql10
-rw-r--r--src/sql/insert-entity.sql32
-rw-r--r--src/sql/insert-header.sql61
-rw-r--r--src/sql/insert-history.sql8
-rw-r--r--src/sql/insert-license-attachment.sql10
-rw-r--r--src/sql/insert-special-condition-code.sql1
-rw-r--r--src/sql/insert-special-condition-free-form.sql13
-rw-r--r--src/sql/insert-special-condition.sql11
13 files changed, 458 insertions, 228 deletions
diff --git a/migrations/01-create-db.sql b/migrations/01-create-db.sql
index e81c1e0..d0c0cee 100644
--- a/migrations/01-create-db.sql
+++ b/migrations/01-create-db.sql
@@ -1,11 +1,12 @@
.echo on
-CREATE TABLE migrations (
+CREATE TABLE IF NOT EXISTS updates (
id SERIAL PRIMARY KEY,
- name VARCHAR(255) NOT NULL,
- created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
+ daily BOOLEAN NOT NULL,
+ date datetime NOT NULL,
);
-CREATE TABLE amateurs (
+
+CREATE TABLE IF NOT EXISTS amateurs (
record_type varchar(2) not null default 'AM',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -25,7 +26,7 @@ CREATE TABLE amateurs (
previous_operator_class varchar(1),
trustee_name varchar(50)
);
-CREATE TABLE comments (
+CREATE TABLE IF NOT EXISTS comments (
record_type varchar(2) not null default "CO",
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -35,7 +36,7 @@ CREATE TABLE comments (
status_code varchar(1),
status_date datetime
);
-CREATE TABLE entities (
+CREATE TABLE IF NOT EXISTS entities (
record_type varchar(2) not null default 'EN',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -67,7 +68,7 @@ CREATE TABLE entities (
linked_license_id integer null,
linked_callsign varchar(10)
);
-CREATE TABLE headers (
+CREATE TABLE IF NOT EXISTS headers (
record_type varchar(2) not null default 'HD',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -128,7 +129,7 @@ CREATE TABLE headers (
return_spectrum_cert_900 varchar(1),
payment_cert_900 varchar(1)
);
-CREATE TABLE history (
+CREATE TABLE IF NOT EXISTS history (
record_type varchar(2) not null default 'HS',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -136,7 +137,7 @@ CREATE TABLE history (
log_date datetime,
code varchar(6)
);
-CREATE TABLE license_attachments (
+CREATE TABLE IF NOT EXISTS license_attachments (
record_type varchar(2) not null default 'LA',
unique_system_identifier integer not null,
call_sign varchar(10),
@@ -146,7 +147,7 @@ CREATE TABLE license_attachments (
attachment_file_name varchar(60),
action_performed varchar(1)
);
-CREATE TABLE special_conditions (
+CREATE TABLE IF NOT EXISTS special_conditions (
record_type varchar(2) not null default 'SC',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -157,7 +158,7 @@ CREATE TABLE special_conditions (
status_code varchar(1),
status_date datetime
);
-CREATE TABLE special_conditions_free_form (
+CREATE TABLE IF NOT EXISTS special_conditions_free_form (
record_type varchar(2) not null default 'SF',
unique_system_identifier integer not null,
uls_file_number varchar(14),
@@ -170,11 +171,9 @@ CREATE TABLE special_conditions_free_form (
status_code varchar(1),
status_date datetime
);
-CREATE TABLE special_condition_codes (
+CREATE TABLE IF NOT EXISTS special_condition_codes (
code INTEGER PRIMARY KEY,
service varchar(2),
description TEXT,
unknown TEXT
);
-INSERT INTO migrations (name)
-VALUES ('01-create-db.sql');
diff --git a/src/file.rs b/src/file.rs
new file mode 100644
index 0000000..3dcccd5
--- /dev/null
+++ b/src/file.rs
@@ -0,0 +1,199 @@
+use chrono::DateTime;
+use filetime::{self, FileTime};
+use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
+use regex::Regex;
+
+use std::fs::{self, File};
+use std::io::{Read, Write};
+
+/// Downloads a file from the given URL to the given path
+///
+/// # Arguments
+///
+/// * `url` - The URL to download from
+/// * `path` - The path to download to. If None, try and use the Content-Disposition
+/// header to determine the filename, and fall back to the last segment of the URL
+///
+/// # Examples
+/// ```
+/// download_file("https://data.fcc.gov/download/pub/uls/complete/l_amat.zip", None);
+/// ```
+pub fn download_file(url: &str, file_name: Option<&str>) -> Result<File, ()> {
+ let resp = ureq::get(url).call().expect("Error downloading file");
+
+ // We can work on handling not having a Content-Length header later
+ assert!(resp.has("Content-Length"));
+ let len: u64 = resp
+ .header("Content-Length")
+ .unwrap()
+ .parse()
+ .expect("Error parsing Content-Length header");
+
+ let last_modified = match resp.header("Last-Modified") {
+ Some(last_mod) => match DateTime::parse_from_rfc2822(last_mod) {
+ Ok(dt) => Some(dt.timestamp()),
+ Err(_) => None,
+ },
+ None => None,
+ };
+
+ // Time to determine the file name!
+ // Start by seeing if we were told anything, that makes it easy
+ // This is just a helper. It should be its own function. lmao.
+ let parse_file_name_from_url = |url: &str| {
+ let output_file_name_regex = Regex::new(r"/(\w+\.?\w*)").expect("Error constructing regex");
+ let Some(file_name_captures) = output_file_name_regex.captures_iter(url).last() else {
+ panic!("Error parsing file name from URL");
+ };
+ let Some(maybe_match) = file_name_captures.iter().last() else {
+ panic!("Error parsing file name from URL");
+ };
+ let Some(file_name_match) = maybe_match else {
+ panic!("Error parsing file name from URL");
+ };
+ String::from(file_name_match.as_str())
+ };
+ let output_file_name = match file_name {
+ Some(n) => String::from(n),
+ None => {
+ // We weren't given a file name by the user, so we need to figure it out ourself
+ match resp.header("Content-Disposition") {
+ // A Content-Disposition header is present, so we can use that
+ Some(content_disposition) => {
+ let content_disposition_regex =
+ Regex::new(r#"filename="([\w\.]+)""#).expect("Error compiling regex");
+ // Check if the Content-Disposition header specifies a filename
+ match content_disposition_regex.captures(content_disposition) {
+ Some(cd_match) => {
+ // We have a filename, so use that
+ // TODO: Make less unwrappy
+ cd_match
+ .iter()
+ .last()
+ .unwrap()
+ .unwrap()
+ .as_str()
+ .to_string()
+ }
+ None => {
+ // It doesn't, so we have to fall back to the file name in the URL
+ parse_file_name_from_url(url)
+ }
+ }
+ }
+ // No Content-Disposition header, so we have to fall back to the file name in the URL
+ None => parse_file_name_from_url(url),
+ }
+ }
+ };
+
+ if std::path::Path::new(&output_file_name).exists() {
+ let file_metadata = fs::metadata(&output_file_name).expect("Error getting file metadata");
+ let mtime = FileTime::from_last_modification_time(&file_metadata);
+
+ match (
+ mtime.seconds() >= last_modified.unwrap_or(1),
+ file_metadata.len() == len,
+ ) {
+ (true, true) => {
+ println!("File already downloaded");
+ return Ok(File::open(output_file_name).expect("Error opening file"));
+ }
+ (true, false) => {
+ println!("File already downloaded, but is incomplete");
+ }
+ (false, _) => {
+ println!("File already downloaded, but is out of date");
+ }
+ }
+ } else {
+ println!("File does not exist, downloading");
+ }
+
+ let mut output_file = fs::File::create(&output_file_name).expect("Error creating output file");
+
+ let mut reader = resp.into_reader();
+ let chunk_size = len / 99;
+
+ let progress_bar = ProgressBar::new(len);
+ progress_bar.set_draw_target(ProgressDrawTarget::stderr_with_hz(1));
+ progress_bar.set_message(output_file_name.clone());
+ progress_bar.set_style(
+ ProgressStyle::with_template(
+ "[{elapsed}+{eta}/{duration}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}) {msg}",
+ )
+ .unwrap()
+ .progress_chars("#>-"),
+ );
+
+ loop {
+ let mut chunk = vec![0u8; chunk_size as usize];
+ let bytes_read = reader.read(&mut chunk[..]).expect("Error reading chunk");
+ chunk.truncate(bytes_read); // This way we don't end with a ton of leading 0s
+ if bytes_read > 0 {
+ output_file
+ .write_all(chunk.as_slice())
+ .expect("Error writing to output file");
+
+ progress_bar.inc(bytes_read as u64);
+ } else {
+ break;
+ }
+ }
+
+ output_file.flush().expect("Error flushing output file");
+ progress_bar.finish();
+
+ Ok(fs::File::open(&output_file_name).expect("Error opening output file"))
+}
+
+pub fn unzip_file(zip_file: File) -> Result<(), ()> {
+ let mut archive = zip::ZipArchive::new(zip_file).expect("Error opening zip archive");
+
+ let progress_bar = ProgressBar::new(archive.len().try_into().unwrap());
+ progress_bar.set_message("");
+ progress_bar.set_style(
+ ProgressStyle::with_template(
+ "[{elapsed}+{eta}/{duration}] [{bar:40.cyan/blue}] {human_pos}/{human_len} {msg}",
+ )
+ .unwrap()
+ .progress_chars("#>-"),
+ );
+
+ for i in 0..archive.len() {
+ let mut file = archive
+ .by_index(i)
+ .expect("Error getting file from archive");
+ let unzip_path = match file.enclosed_name() {
+ Some(path) => path.to_owned(),
+ None => continue,
+ };
+ progress_bar.set_message(format!("{}", unzip_path.display()));
+
+ if (*file.name()).ends_with('/') {
+ fs::create_dir_all(&unzip_path).expect("Error creating directory");
+ } else {
+ if let Some(p) = unzip_path.parent() {
+ if !p.exists() {
+ fs::create_dir_all(p).expect("Error creating directory");
+ }
+ }
+ let mut unzip_file = fs::File::create(&unzip_path).expect("Error creating file");
+ std::io::copy(&mut file, &mut unzip_file).expect("Error copying file");
+ }
+
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+
+ if let Some(mode) = file.unix_mode() {
+ fs::set_permissions(&unzip_path, fs::Permissions::from_mode(mode)).unwrap();
+ }
+ }
+ // TODO: Also set and check file mtime
+ progress_bar.set_position((i + 1).try_into().unwrap());
+ }
+
+ progress_bar.finish();
+ Ok(())
+}
diff --git a/src/load.rs b/src/load.rs
index 2fe04a6..cde0d62 100644
--- a/src/load.rs
+++ b/src/load.rs
@@ -6,16 +6,17 @@ use sqlx::{QueryBuilder, Sqlite, SqlitePool};
use std::fs::File;
use std::io::BufRead;
-const INSERT_AMATEUR_SQL: &str = r"INSERT INTO amateurs (record_type, unique_system_identifier, uls_file_number, ebf_number, call_sign, operator_class, group_code, region_code, trustee_call_sign, trustee_indicator, physician_certification, ve_signature, systematic_call_sign_change, vanity_call_sign_change, vainty_relationship, previous_call_sign, previous_operator_class, trustee_name) ";
-const INSERT_COMMENT_SQL: &str = r"INSERT INTO comments (record_type, unique_system_identifier, uls_file_number, call_sign, comment_date, description, status_code, status_date) ";
-const INSERT_ENTITY_SQL: &str = r"INSERT INTO entities (record_type, unique_system_identifier, uls_file_number, ebf_number, call_sign, entity_type, licensee_id, entity_name, first_name, mi, last_name, suffix, phone, fax, email, street_address, city, state, zip_code, po_box, attention_line, sgin, frn, applicant_type_code, applicant_type_other, status_code, status_date, lic_category_code, linked_license_id, linked_callsign) ";
-const INSERT_HEADER_SQL: &str = r"INSERT INTO headers (record_type, unique_system_identifier, uls_file_number, ebf_number, call_sign, license_status, radio_service_code, grant_date, expired_date, cancellation_date, eligibility_rule_number, reserved, alien, alien_government, alien_corporation, alien_officer, alien_control, revoked, convicted, adjudged, reserved2, common_carrier, non_common_carrier, private_comm, fixed, mobile, radiolocation, satellite, developmental_or_sta, interconnected_service, certifier_first_name, certifier_mi, certifier_last_name, certifier_suffix, certifier_title, gender, african_american, native_american, hawaiian, asian, white, ethnicity, effective_date, last_action_date, auction_id, reg_stat_broad_serv, band_manager, type_serv_broad_serv, alien_ruling, licensee_name_change, whitespace_ind, additional_cert_choice, additional_cert_answer, discontinuation_ind, regulatory_compliance_ind, eligibility_cert_900, transition_plan_cert_900, return_spectrum_cert_900, payment_cert_900) ";
-const INSERT_HISTORY_SQL: &str = r"INSERT INTO history (record_type, unique_system_identifier, uls_file_number, call_sign, log_date, code) ";
-const INSERT_LICENSE_ATTACHMENT_SQL: &str = r"INSERT INTO license_attachments (record_type, unique_system_identifier, call_sign, attachment_code, attachment_description, attachment_date, attachment_file_name, action_performed) ";
-const INSERT_SPECIAL_CONDITION_SQL: &str = r"INSERT INTO special_conditions (record_type, unique_system_identifier, uls_file_number, ebf_number, call_sign, special_conditions_type, special_conditions_code, status_code, status_date) ";
-const INSERT_SPECIAL_CONDITION_FREE_FORM_SQL: &str = r"INSERT INTO special_conditions_free_form (record_type, unique_system_identifier, uls_file_number, ebf_number, call_sign, license_free_form_type, unique_license_free_form_identifier, sequence_number, license_free_form_condition, status_code, status_date) ";
-const INSERT_SPECIAL_CONDITION_CODES_SQL: &str =
- r"INSERT INTO special_condition_codes (code, service, description, unknown) ";
+const INSERT_AMATEUR_SQL: &str = include_str!("sql/insert-amateur.sql");
+const INSERT_COMMENT_SQL: &str = include_str!("sql/insert-comment.sql");
+const INSERT_ENTITY_SQL: &str = include_str!("sql/insert-entity.sql");
+const INSERT_HEADER_SQL: &str = include_str!("sql/insert-header.sql");
+const INSERT_HISTORY_SQL: &str = include_str!("sql/insert-history.sql");
+const INSERT_LICENSE_ATTACHMENT_SQL: &str = include_str!("sql/insert-license-attachment.sql");
+const INSERT_SPECIAL_CONDITION_SQL: &str = include_str!("sql/insert-special-condition.sql");
+const INSERT_SPECIAL_CONDITION_FREE_FORM_SQL: &str =
+ include_str!("sql/insert-special-condition-free-form.sql");
+const INSERT_SPECIAL_CONDITION_CODE_SQL: &str =
+ include_str!("sql/insert-special-condition-code.sql");
const BIND_LIMIT: usize = 32766;
@@ -43,6 +44,12 @@ pub async fn load_amateurs(db: &SqlitePool) {
);
progress_bar.set_message("AM.dat");
+ QueryBuilder::new("DELETE FROM amateurs")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting amateurs");
+
let chunk_size = BIND_LIMIT / 18;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -112,6 +119,12 @@ pub async fn load_comments(db: &SqlitePool) {
);
progress_bar.set_message("CO.dat");
+ QueryBuilder::new("DELETE FROM comments")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting comments");
+
let chunk_size = BIND_LIMIT / 8;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -172,6 +185,12 @@ pub async fn load_entities(db: &SqlitePool) {
);
progress_bar.set_message("EN.dat");
+ QueryBuilder::new("DELETE FROM entities")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting entities");
+
let chunk_size = BIND_LIMIT / 30;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -253,6 +272,12 @@ pub async fn load_headers(db: &SqlitePool) {
);
progress_bar.set_message("HD.dat");
+ QueryBuilder::new("DELETE FROM headers")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting headers");
+
let chunk_size = BIND_LIMIT / 60;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -364,6 +389,12 @@ pub async fn load_history(db: &SqlitePool) {
);
progress_bar.set_message("HS.dat");
+ QueryBuilder::new("DELETE FROM history")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting history");
+
let chunk_size = BIND_LIMIT / 6;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -422,6 +453,12 @@ pub async fn load_license_attachments(db: &SqlitePool) {
);
progress_bar.set_message("LA.dat");
+ QueryBuilder::new("DELETE FROM license_attachments")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting license_attachments");
+
let chunk_size = BIND_LIMIT / 8;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -484,6 +521,12 @@ pub async fn load_special_conditions(db: &SqlitePool) {
);
progress_bar.set_message("SC.dat");
+ QueryBuilder::new("DELETE FROM special_conditions")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_conditions");
+
let chunk_size = BIND_LIMIT / 9;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -547,6 +590,12 @@ pub async fn load_special_conditions_free_form(db: &SqlitePool) {
);
progress_bar.set_message("SF.dat");
+ QueryBuilder::new("DELETE FROM special_conditions_free_form")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_conditions_free_form");
+
let chunk_size = BIND_LIMIT / 11;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
@@ -612,13 +661,19 @@ pub async fn load_special_condition_codes(db: &SqlitePool) {
);
progress_bar.set_message("special_condition_codes.txt");
+ QueryBuilder::new("DELETE FROM special_condition_codes")
+ .build()
+ .execute(&mut transaction)
+ .await
+ .expect("Error deleting special_condition_codes");
+
let chunk_size = BIND_LIMIT / 4;
for chunk in &reader.records().chunks(chunk_size) {
let chunk = chunk.collect::<Result<Vec<StringRecord>, _>>().unwrap();
let chunk = chunk.iter();
let mut query_builder: QueryBuilder<Sqlite> =
- QueryBuilder::new(INSERT_SPECIAL_CONDITION_CODES_SQL);
+ QueryBuilder::new(INSERT_SPECIAL_CONDITION_CODE_SQL);
query_builder.push_values(chunk, |mut builder, entry| {
builder
diff --git a/src/main.rs b/src/main.rs
index c81d08f..a240dd1 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,211 +1,17 @@
-use chrono::DateTime;
-use filetime::{self, FileTime};
-use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use regex::Regex;
use sqlx::sqlite::SqlitePool;
-use std::fs::{self, File};
-use std::io::{Read, Write};
+use std::fs;
+
mod fcc_date;
+mod file;
mod load;
mod types;
+use file::{download_file, unzip_file};
const WEEKLY_DUMP_URL: &str = "https://data.fcc.gov/download/pub/uls/complete/l_amat.zip";
const SPECIAL_CONDITIONS_URL: &str = "https://www.fcc.gov/file/20669/download";
-/// Downloads a file from the given URL to the given path
-///
-/// # Arguments
-///
-/// * `url` - The URL to download from
-/// * `path` - The path to download to. If None, try and use the Content-Disposition
-/// header to determine the filename, and fall back to the last segment of the URL
-///
-/// # Examples
-/// ```
-/// download_file("https://data.fcc.gov/download/pub/uls/complete/l_amat.zip", None);
-/// ```
-fn download_file(url: &str, file_name: Option<&str>) -> Result<File, ()> {
- let resp = ureq::get(url).call().expect("Error downloading file");
-
- // We can work on handling not having a Content-Length header later
- assert!(resp.has("Content-Length"));
- let len: u64 = resp
- .header("Content-Length")
- .unwrap()
- .parse()
- .expect("Error parsing Content-Length header");
-
- let last_modified = match resp.header("Last-Modified") {
- Some(last_mod) => match DateTime::parse_from_rfc2822(last_mod) {
- Ok(dt) => Some(dt.timestamp()),
- Err(_) => None,
- },
- None => None,
- };
-
- // Time to determine the file name!
- // Start by seeing if we were told anything, that makes it easy
- // This is just a helper. It should be its own function. lmao.
- let parse_file_name_from_url = |url: &str| {
- let output_file_name_regex = Regex::new(r"/(\w+\.?\w*)").expect("Error constructing regex");
- let Some(file_name_captures) = output_file_name_regex.captures_iter(url).last() else {
- panic!("Error parsing file name from URL");
- };
- let Some(maybe_match) = file_name_captures.iter().last() else {
- panic!("Error parsing file name from URL");
- };
- let Some(file_name_match) = maybe_match else {
- panic!("Error parsing file name from URL");
- };
- String::from(file_name_match.as_str())
- };
- let output_file_name = match file_name {
- Some(n) => String::from(n),
- None => {
- // We weren't given a file name by the user, so we need to figure it out ourself
- match resp.header("Content-Disposition") {
- // A Content-Disposition header is present, so we can use that
- Some(content_disposition) => {
- let content_disposition_regex =
- Regex::new(r#"filename="([\w\.]+)""#).expect("Error compiling regex");
- // Check if the Content-Disposition header specifies a filename
- match content_disposition_regex.captures(content_disposition) {
- Some(cd_match) => {
- // We have a filename, so use that
- // TODO: Make less unwrappy
- cd_match
- .iter()
- .last()
- .unwrap()
- .unwrap()
- .as_str()
- .to_string()
- }
- None => {
- // It doesn't, so we have to fall back to the file name in the URL
- parse_file_name_from_url(url)
- }
- }
- }
- // No Content-Disposition header, so we have to fall back to the file name in the URL
- None => parse_file_name_from_url(url),
- }
- }
- };
-
- if std::path::Path::new(&output_file_name).exists() {
- let file_metadata = fs::metadata(&output_file_name).expect("Error getting file metadata");
- let mtime = FileTime::from_last_modification_time(&file_metadata);
-
- match (
- mtime.seconds() >= last_modified.unwrap_or(1),
- file_metadata.len() == len,
- ) {
- (true, true) => {
- println!("File already downloaded");
- return Ok(File::open(output_file_name).expect("Error opening file"));
- }
- (true, false) => {
- println!("File already downloaded, but is incomplete");
- }
- (false, _) => {
- println!("File already downloaded, but is out of date");
- }
- }
- } else {
- println!("File does not exist, downloading");
- }
-
- let mut output_file = fs::File::create(&output_file_name).expect("Error creating output file");
-
- let mut reader = resp.into_reader();
- let chunk_size = len / 99;
-
- let progress_bar = ProgressBar::new(len);
- progress_bar.set_draw_target(ProgressDrawTarget::stderr_with_hz(1));
- progress_bar.set_message(output_file_name);
- progress_bar.set_style(
- ProgressStyle::with_template(
- "[{elapsed}+{eta}/{duration}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}) {msg}",
- )
- .unwrap()
- .progress_chars("#>-"),
- );
-
- loop {
- let mut chunk = vec![0u8; chunk_size as usize];
- let bytes_read = reader.read(&mut chunk[..]).expect("Error reading chunk");
- chunk.truncate(bytes_read); // This way we don't end with a ton of leading 0s
- if bytes_read > 0 {
- output_file
- .write_all(chunk.as_slice())
- .expect("Error writing to output file");
-
- progress_bar.inc(bytes_read as u64);
- } else {
- break;
- }
- }
-
- output_file.flush().expect("Error flushing output file");
- progress_bar.finish();
-
- Ok(output_file)
-}
-
-fn unzip_file(zip_file: File) -> Result<(), ()> {
- let mut archive = zip::ZipArchive::new(zip_file).expect("Error opening zip archive");
-
- let progress_bar = ProgressBar::new(archive.len().try_into().unwrap());
- progress_bar.set_message("");
- progress_bar.set_style(
- ProgressStyle::with_template(
- "[{elapsed}+{eta}/{duration}] [{bar:40.cyan/blue}] {human_pos}/{human_len} {msg}",
- )
- .unwrap()
- .progress_chars("#>-"),
- );
-
- for i in 0..archive.len() {
- let mut file = archive
- .by_index(i)
- .expect("Error getting file from archive");
- let unzip_path = match file.enclosed_name() {
- Some(path) => path.to_owned(),
- None => continue,
- };
- progress_bar.set_message(format!("{}", unzip_path.display()));
-
- if (*file.name()).ends_with('/') {
- fs::create_dir_all(&unzip_path).expect("Error creating directory");
- } else {
- if let Some(p) = unzip_path.parent() {
- if !p.exists() {
- fs::create_dir_all(p).expect("Error creating directory");
- }
- }
- let mut unzip_file = fs::File::create(&unzip_path).expect("Error creating file");
- std::io::copy(&mut file, &mut unzip_file).expect("Error copying file");
- }
-
- #[cfg(unix)]
- {
- use std::os::unix::fs::PermissionsExt;
-
- if let Some(mode) = file.unix_mode() {
- fs::set_permissions(&unzip_path, fs::Permissions::from_mode(mode)).unwrap();
- }
- }
- // TODO: Also set and check file mtime
- progress_bar.set_position((i + 1).try_into().unwrap());
- }
-
- progress_bar.finish();
- Ok(())
-}
-
-#[tokio::main]
-async fn main() {
+async fn load_weekly(db: &SqlitePool) {
let output_file =
download_file(WEEKLY_DUMP_URL, None).expect("Error downloading weekly dump file");
// Hardcoding this file name because it might change and I don't want to deal with that
@@ -237,10 +43,6 @@ async fn main() {
)
.expect("Error writing file");
- let db = SqlitePool::connect("sqlite://fcc.db")
- .await
- .expect("Error connecting to database");
-
load::load_amateurs(&db).await;
load::load_comments(&db).await;
load::load_entities(&db).await;
@@ -252,3 +54,12 @@ async fn main() {
load::load_special_condition_codes(&db).await;
}
+
+#[tokio::main]
+async fn main() {
+ let db = SqlitePool::connect("sqlite://fcc.db")
+ .await
+ .expect("Error connecting to database");
+
+ load_weekly(&db).await;
+}
diff --git a/src/sql/insert-amateur.sql b/src/sql/insert-amateur.sql
new file mode 100644
index 0000000..44fd776
--- /dev/null
+++ b/src/sql/insert-amateur.sql
@@ -0,0 +1,20 @@
+INSERT INTO amateurs (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ ebf_number,
+ call_sign,
+ operator_class,
+ group_code,
+ region_code,
+ trustee_call_sign,
+ trustee_indicator,
+ physician_certification,
+ ve_signature,
+ systematic_call_sign_change,
+ vanity_call_sign_change,
+ vainty_relationship,
+ previous_call_sign,
+ previous_operator_class,
+ trustee_name
+ ) \ No newline at end of file
diff --git a/src/sql/insert-comment.sql b/src/sql/insert-comment.sql
new file mode 100644
index 0000000..a99990f
--- /dev/null
+++ b/src/sql/insert-comment.sql
@@ -0,0 +1,10 @@
+INSERT INTO comments (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ call_sign,
+ comment_date,
+ description,
+ status_code,
+ status_date
+ ) \ No newline at end of file
diff --git a/src/sql/insert-entity.sql b/src/sql/insert-entity.sql
new file mode 100644
index 0000000..aabb698
--- /dev/null
+++ b/src/sql/insert-entity.sql
@@ -0,0 +1,32 @@
+INSERT INTO entities (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ ebf_number,
+ call_sign,
+ entity_type,
+ licensee_id,
+ entity_name,
+ first_name,
+ mi,
+ last_name,
+ suffix,
+ phone,
+ fax,
+ email,
+ street_address,
+ city,
+ state,
+ zip_code,
+ po_box,
+ attention_line,
+ sgin,
+ frn,
+ applicant_type_code,
+ applicant_type_other,
+ status_code,
+ status_date,
+ lic_category_code,
+ linked_license_id,
+ linked_callsign
+ ) \ No newline at end of file
diff --git a/src/sql/insert-header.sql b/src/sql/insert-header.sql
new file mode 100644
index 0000000..87ddc23
--- /dev/null
+++ b/src/sql/insert-header.sql
@@ -0,0 +1,61 @@
+INSERT INTO headers (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ ebf_number,
+ call_sign,
+ license_status,
+ radio_service_code,
+ grant_date,
+ expired_date,
+ cancellation_date,
+ eligibility_rule_number,
+ reserved,
+ alien,
+ alien_government,
+ alien_corporation,
+ alien_officer,
+ alien_control,
+ revoked,
+ convicted,
+ adjudged,
+ reserved2,
+ common_carrier,
+ non_common_carrier,
+ private_comm,
+ fixed,
+ mobile,
+ radiolocation,
+ satellite,
+ developmental_or_sta,
+ interconnected_service,
+ certifier_first_name,
+ certifier_mi,
+ certifier_last_name,
+ certifier_suffix,
+ certifier_title,
+ gender,
+ african_american,
+ native_american,
+ hawaiian,
+ asian,
+ white,
+ ethnicity,
+ effective_date,
+ last_action_date,
+ auction_id,
+ reg_stat_broad_serv,
+ band_manager,
+ type_serv_broad_serv,
+ alien_ruling,
+ licensee_name_change,
+ whitespace_ind,
+ additional_cert_choice,
+ additional_cert_answer,
+ discontinuation_ind,
+ regulatory_compliance_ind,
+ eligibility_cert_900,
+ transition_plan_cert_900,
+ return_spectrum_cert_900,
+ payment_cert_900
+ ) \ No newline at end of file
diff --git a/src/sql/insert-history.sql b/src/sql/insert-history.sql
new file mode 100644
index 0000000..51f43cd
--- /dev/null
+++ b/src/sql/insert-history.sql
@@ -0,0 +1,8 @@
+INSERT INTO history (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ call_sign,
+ log_date,
+ code
+ ) \ No newline at end of file
diff --git a/src/sql/insert-license-attachment.sql b/src/sql/insert-license-attachment.sql
new file mode 100644
index 0000000..8ba6bc2
--- /dev/null
+++ b/src/sql/insert-license-attachment.sql
@@ -0,0 +1,10 @@
+INSERT INTO license_attachments (
+ record_type,
+ unique_system_identifier,
+ call_sign,
+ attachment_code,
+ attachment_description,
+ attachment_date,
+ attachment_file_name,
+ action_performed
+ ) \ No newline at end of file
diff --git a/src/sql/insert-special-condition-code.sql b/src/sql/insert-special-condition-code.sql
new file mode 100644
index 0000000..7bd4c8d
--- /dev/null
+++ b/src/sql/insert-special-condition-code.sql
@@ -0,0 +1 @@
+INSERT INTO special_condition_codes (code, service, description, unknown) \ No newline at end of file
diff --git a/src/sql/insert-special-condition-free-form.sql b/src/sql/insert-special-condition-free-form.sql
new file mode 100644
index 0000000..a6bec3e
--- /dev/null
+++ b/src/sql/insert-special-condition-free-form.sql
@@ -0,0 +1,13 @@
+INSERT INTO special_conditions_free_form (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ ebf_number,
+ call_sign,
+ license_free_form_type,
+ unique_license_free_form_identifier,
+ sequence_number,
+ license_free_form_condition,
+ status_code,
+ status_date
+ ) \ No newline at end of file
diff --git a/src/sql/insert-special-condition.sql b/src/sql/insert-special-condition.sql
new file mode 100644
index 0000000..127dc07
--- /dev/null
+++ b/src/sql/insert-special-condition.sql
@@ -0,0 +1,11 @@
+INSERT INTO special_conditions (
+ record_type,
+ unique_system_identifier,
+ uls_file_number,
+ ebf_number,
+ call_sign,
+ special_conditions_type,
+ special_conditions_code,
+ status_code,
+ status_date
+ ) \ No newline at end of file