From 57255d7c969eeb8d3110b1b8c28619ab47758e51 Mon Sep 17 00:00:00 2001 From: wlandau-lilly Date: Tue, 12 Nov 2024 20:57:32 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20wlandau/?= =?UTF-8?q?crew@da957aea7a58f147b9c9d424148b166a830e2d5e=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- articles/plugins.html | 46 +++++++++++++++++++++++---------- pkgdown.yml | 2 +- reference/crew_eval.html | 2 +- reference/crew_random_name.html | 2 +- search.json | 2 +- 5 files changed, 37 insertions(+), 17 deletions(-) diff --git a/articles/plugins.html b/articles/plugins.html index 6912f17d..c52eaee9 100644 --- a/articles/plugins.html +++ b/articles/plugins.html @@ -237,6 +237,26 @@

Example#> [1] "crew::crew_worker(settings = list(url = \"ws://127.0.0.1:5000/3/aa9c59ea\", asyncdial = FALSE, autoexit = 15L, cleanup = 1L, output = TRUE, maxtasks = Inf, idletime = Inf, walltime = Inf, timerstart = 0L, tls = NULL, rs = NULL), launcher = \"my_launcher\", worker = 3L, instance = \"aa9c59ea\", options_metrics = crew::crew_options_metrics(path = NULL, seconds_interval = 5))"
+

Worker retries +

+

The launch_worker() method can call +self$crashes(index = worker) to get the number of +consecutive times the worker has crashed (exited without completing all +its tasks). You can use the crash count to assign more memory, CPU +cores, or other resources to a worker launch. Here is a pseudo-code +sketch which increases available memory by 4 GB each time a worker +crashes, then resets back to the original 16 GB when the worker +succeeds:

+
+launch_worker = function(call, name, launcher, worker, instance) {
+  gigabytes_memory <- 16 + 4 * self$crashes(index = worker)
+  submit_job(call, name, gigabytes_memory)
+}
+

This mechanism is similar to retryable +options in crew.cluster and in +crew.aws.batch.

+
+

Helper

It is useful to have a helper function that creates controllers with @@ -257,7 +277,7 @@

Helper hand. You may want to adjust the default arguments based on the specifics of your platform, especially seconds_launch if workers take a long time to launch.

-
+
 #' @title Create a controller with the custom launcher.
 #' @export
 #' @description Create an `R6` object to submit tasks and
@@ -335,19 +355,19 @@ 

Informal testingFirst, create and start a controller. You may wish to monitor local processes on your computer to make sure the mirai dispatcher starts.

-
+
 library(crew)
 controller <- crew_controller_custom(workers = 2)
 controller$start()

Try pushing a task that gets the local IP address and process ID of the worker instance.

-
+
 controller$push(
   name = "get worker IP address and process ID",
   command = paste(getip::getip(type = "local"), ps::ps_pid())
 )

Wait for the task to complete and look at the result.

-
+
 controller$wait()
 result <- controller$pop()
 result$result[[1]]
@@ -361,7 +381,7 @@ 

Informal testingSLURM or AWS Batch launcher.

-
+
 getip::getip(type = "local")
 #> "192.168.0.2"
 controller$launcher$workers$handle[[1]]$get_pid()
@@ -369,7 +389,7 @@ 

Informal testingIf you did not set any timeouts or task limits, the worker that ran the task should still be online. The other worker had no tasks, so it did not need to launch.

-
+
 controller$client$summary()
 #> # A tibble: 2 × 6
 #>   worker online instances assigned complete socket
@@ -379,7 +399,7 @@ 

Informal testingWhen you are done, terminate the controller. This terminates the mirai dispatcher process and the crew workers.

-
+
 controller$terminate()

Finally, use the process monitoring interface of your computing platform or operating system to verify that all mirai @@ -392,7 +412,7 @@

Load testing
+
 library(crew)
 controller <- crew_controller_custom(
   seconds_idle = 2L,
@@ -477,7 +497,7 @@ 

Asynchronymirai::call_mirai() to make sure any tasks submitted by launch_worker() have resolved before they are used by terminate_worker().

-
+
 async_launcher_class <- R6::R6Class(
   classname = "custom_launcher_class",
   inherit = crew::crew_class_launcher,
@@ -501,7 +521,7 @@ 

Asynchrony
+
 crew_controller_async <- function(
   name = "async controller name",
   workers = 1L,
@@ -562,7 +582,7 @@ 

Asynchrony
+
 async_controller <- crew_controller_async(workers = 12, processes = 4)

async_controller$start() automatically launches 4 local processes to asynchronously auto-scale the workers, and @@ -588,7 +608,7 @@

Managing workers for listing and terminating AWS Batch jobs, as well as viewing CloudWatch logs.

The source code for the local monitor is copied below:

-
+
 crew_monitor_local <- function() {
   crew_class_monitor_local$new()
 }
@@ -622,7 +642,7 @@ 

Managing workers as.integer(sort(processes$pid[filter])) }

Example usage:

-
+
 monitor <- crew_monitor_local()
 monitor$dispatchers() # List PIDs of all local {mirai} dispatcher processes.
 #> [1] 31215
diff --git a/pkgdown.yml b/pkgdown.yml
index fc8f6443..5e34d56e 100644
--- a/pkgdown.yml
+++ b/pkgdown.yml
@@ -9,7 +9,7 @@ articles:
   promises: promises.html
   risks: risks.html
   shiny: shiny.html
-last_built: 2024-11-11T20:29Z
+last_built: 2024-11-12T20:56Z
 urls:
   reference: https://wlandau.github.io/crew/reference
   article: https://wlandau.github.io/crew/articles
diff --git a/reference/crew_eval.html b/reference/crew_eval.html
index bc3d58aa..66d89a7d 100644
--- a/reference/crew_eval.html
+++ b/reference/crew_eval.html
@@ -150,7 +150,7 @@ 

See alsoExamples

crew_eval(quote(1 + 1))
 #> $name
-#> [1] "unnamed_task_16d576670c3a"
+#> [1] "unnamed_task_16db697857a2"
 #> 
 #> $command
 #> [1] NA
diff --git a/reference/crew_random_name.html b/reference/crew_random_name.html
index 6dbeb563..97b1b3f7 100644
--- a/reference/crew_random_name.html
+++ b/reference/crew_random_name.html
@@ -93,7 +93,7 @@ 

See also

Examples

crew_random_name()
-#> [1] "d227cd74026a5d82d05cf477"
+#> [1] "3dd9067fae82953accd5ea8f"