Skip to content

Commit

Permalink
run these real quick
Browse files Browse the repository at this point in the history
  • Loading branch information
diazrenata committed Apr 24, 2021
1 parent 9e5c7e0 commit f8e979a
Show file tree
Hide file tree
Showing 10 changed files with 77 additions and 77 deletions.
34 changes: 17 additions & 17 deletions analysis/bbs_pipeline.R
Original file line number Diff line number Diff line change
Expand Up @@ -53,27 +53,27 @@ cache$del(key = "lock", namespace = "session")

## Run the pipeline
nodename <- Sys.info()["nodename"]
if(grepl("ufhpc", nodename)) {
print("I know I am on the HiPerGator!")
library(clustermq)
options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
## Run the pipeline parallelized for HiPerGator
make(all,
force = TRUE,
cache = cache,
cache_log_file = here::here("analysis", "drake", "cache_log_bbs.txt"),
verbose = 1,
parallelism = "clustermq",
jobs = 100,
caching = "master",
memory_strategy = "autoclean",
garbage_collection = TRUE) # Important for DBI caches!
} else {
# if(grepl("ufhpc", nodename)) {
# print("I know I am on the HiPerGator!")
# library(clustermq)
# options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
# ## Run the pipeline parallelized for HiPerGator
# make(all,
# force = TRUE,
# cache = cache,
# cache_log_file = here::here("analysis", "drake", "cache_log_bbs.txt"),
# verbose = 1,
# parallelism = "clustermq",
# jobs = 100,
# caching = "master",
# memory_strategy = "autoclean",
# garbage_collection = TRUE) # Important for DBI caches!
# } else {
library(clustermq)
options(clustermq.scheduler = "multicore")
# Run the pipeline on multiple local cores
system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_bbs.txt"), verbose = 1, memory_strategy = "autoclean"))
}
#}

#system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_bbs.txt")))

Expand Down
34 changes: 17 additions & 17 deletions analysis/gentry_pipeline.R
Original file line number Diff line number Diff line change
Expand Up @@ -51,27 +51,27 @@ cache$del(key = "lock", namespace = "session")

## Run the pipeline
nodename <- Sys.info()["nodename"]
if(grepl("ufhpc", nodename)) {
print("I know I am on the HiPerGator!")
library(clustermq)
options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
## Run the pipeline parallelized for HiPerGator
make(all,
force = TRUE,
cache = cache,
cache_log_file = here::here("analysis", "drake", "cache_log_gentry.txt"),
verbose = 1,
parallelism = "clustermq",
jobs = 100,
caching = "master",
memory_strategy = "autoclean",
garbage_collection = TRUE) # Important for DBI caches!
} else {
# if(grepl("ufhpc", nodename)) {
# print("I know I am on the HiPerGator!")
# library(clustermq)
# options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
# ## Run the pipeline parallelized for HiPerGator
# make(all,
# force = TRUE,
# cache = cache,
# cache_log_file = here::here("analysis", "drake", "cache_log_gentry.txt"),
# verbose = 1,
# parallelism = "clustermq",
# jobs = 100,
# caching = "master",
# memory_strategy = "autoclean",
# garbage_collection = TRUE) # Important for DBI caches!
# } else {
library(clustermq)
options(clustermq.scheduler = "multicore")
# Run the pipeline on multiple local cores
system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_gentry.txt"), verbose = 1, memory_strategy = "autoclean"))
}
#}

#system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_gentry.txt")))

Expand Down
40 changes: 20 additions & 20 deletions analysis/mcdb_pipeline.R
Original file line number Diff line number Diff line change
Expand Up @@ -48,30 +48,30 @@ all <- drake_plan(
db <- DBI::dbConnect(RSQLite::SQLite(), here::here("analysis", "drake", "drake-cache-mcdb.sqlite"))
cache <- storr::storr_dbi("datatable", "keystable", db)
cache$del(key = "lock", namespace = "session")

## Run the pipeline
nodename <- Sys.info()["nodename"]
if(grepl("ufhpc", nodename)) {
print("I know I am on the HiPerGator!")
library(clustermq)
options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
## Run the pipeline parallelized for HiPerGator
make(all,
force = TRUE,
cache = cache,
cache_log_file = here::here("analysis", "drake", "cache_log_mcdb.txt"),
verbose = 1,
parallelism = "clustermq",
jobs = 20,
caching = "master",
memory_strategy = "autoclean",
garbage_collection = TRUE) # Important for DBI caches!
} else {
#
# ## Run the pipeline
# nodename <- Sys.info()["nodename"]
# if(grepl("ufhpc", nodename)) {
# print("I know I am on the HiPerGator!")
# library(clustermq)
# options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
# ## Run the pipeline parallelized for HiPerGator
# make(all,
# force = TRUE,
# cache = cache,
# cache_log_file = here::here("analysis", "drake", "cache_log_mcdb.txt"),
# verbose = 1,
# parallelism = "clustermq",
# jobs = 20,
# caching = "master",
# memory_strategy = "autoclean",
# garbage_collection = TRUE) # Important for DBI caches!
# } else {
library(clustermq)
options(clustermq.scheduler = "multicore")
# Run the pipeline on multiple local cores
system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_mcdb.txt"), verbose = 1, memory_strategy = "autoclean"))
}
#}

DBI::dbDisconnect(db)
rm(cache)
Expand Down
34 changes: 17 additions & 17 deletions analysis/miscabund_pipeline.R
Original file line number Diff line number Diff line change
Expand Up @@ -53,27 +53,27 @@ cache$del(key = "lock", namespace = "session")

# Run the pipeline
nodename <- Sys.info()["nodename"]
if(grepl("ufhpc", nodename)) {
print("I know I am on the HiPerGator!")
library(clustermq)
options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
## Run the pipeline parallelized for HiPerGator
make(all,
force = TRUE,
cache = cache,
cache_log_file = here::here("analysis", "drake", "cache_log_miscabund.txt"),
verbose = 1,
parallelism = "clustermq",
jobs = 100,
caching = "master",
memory_strategy = "autoclean",
garbage_collection = TRUE) # Important for DBI caches!
} else {
# if(grepl("ufhpc", nodename)) {
# print("I know I am on the HiPerGator!")
# library(clustermq)
# options(clustermq.scheduler = "slurm", clustermq.template = here::here("slurm_clustermq.tmpl"))
# ## Run the pipeline parallelized for HiPerGator
# make(all,
# force = TRUE,
# cache = cache,
# cache_log_file = here::here("analysis", "drake", "cache_log_miscabund.txt"),
# verbose = 1,
# parallelism = "clustermq",
# jobs = 100,
# caching = "master",
# memory_strategy = "autoclean",
# garbage_collection = TRUE) # Important for DBI caches!
# } else {
library(clustermq)
options(clustermq.scheduler = "multicore")
# Run the pipeline on multiple local cores
system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_miscabund.txt"), verbose = 1, memory_strategy = "autoclean"))
}
#}

#system.time(make(all, cache = cache, cache_log_file = here::here("analysis", "drake", "cache_log_miscabund.txt")))

Expand Down
2 changes: 1 addition & 1 deletion submit_bbs_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=8GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=47:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/bbs_pipeline%j.out
#SBATCH --error logs/bbs_pipeline%j.err
Expand Down
2 changes: 1 addition & 1 deletion submit_fia_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=10GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=47:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/fia_pipeline%j.out
#SBATCH --error logs/fia_pipeline%j.err
Expand Down
2 changes: 1 addition & 1 deletion submit_fia_small_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=15GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=47:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/fias_pipeline%j.out
#SBATCH --error logs/fias_pipeline%j.err
Expand Down
2 changes: 1 addition & 1 deletion submit_gentry_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=8GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=47:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/gentry_pipeline%j.out
#SBATCH --error logs/gentry_pipeline%j.err
Expand Down
2 changes: 1 addition & 1 deletion submit_mcdb_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=8GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=24:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/mcdb_pipeline%j.out
#SBATCH --error logs/mcdb_pipeline%j.err
Expand Down
2 changes: 1 addition & 1 deletion submit_misc_pipeline.sbatch
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#SBATCH --qos=ewhite-b
#SBATCH --ntasks=1 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --mem-per-cpu=15GB
#SBATCH --mem-per-cpu=4GB
#SBATCH --time=47:00:00 #Time limit hrs:min:sec
#SBATCH --output logs/misc_pipeline%j.out
#SBATCH --error logs/misc_pipeline%j.err
Expand Down

0 comments on commit f8e979a

Please sign in to comment.