Replies: 3 comments 16 replies
-
my initial attempt looks like this #' @export
store_class_repository.api <- function(repository, store, format) {
format <- gsub(pattern = "\\&.*$", replacement = "", x = format)
c(
sprintf("tar_api_%s", format),
"tar_api",
"tar_cloud",
if_any(inherits(store, "tar_external"), character(0), "tar_external"),
class(store)
)
}
#' @export
store_assert_repository_setting.api <- function(repository) {
}
#' @export
store_produce_path.tar_api <- function(store, name, object, path_store) {
store_produce_api_path(
store = store,
name = name,
object = object,
path_store = path_store
)
}
store_produce_api_path <- function(store, name, object, path_store) {
triplicate <- store$resources$api$triplicate %|||% store$resources$triplicate
tar_assert_nonempty(triplicate)
tar_assert_chr(triplicate)
tar_assert_scalar(triplicate)
tar_assert_nzchar(triplicate)
root_prefix <- store$resources$api$prefix %|||%
store$resources$prefix %|||%
path_store_default()
prefix <- path_objects_dir(path_store = root_prefix)
tar_assert_nonempty(prefix)
tar_assert_chr(prefix)
tar_assert_scalar(prefix)
key <- file.path(prefix, name)
tar_assert_nzchar(key)
triplicate <- paste0("triplicate=", triplicate)
key <- paste0("key=", key)
c(triplicate, key)
}
store_api_key <- function(path) {
keyvalue_field(x = path, pattern = "^key=")
}
store_api_path_field <- function(path, pattern) {
keyvalue_field(x = path, pattern = pattern)
}
keyvalue_field <- function(x, pattern) {
element <- grep(pattern = pattern, x = x, value = TRUE)
gsub(pattern = pattern, replacement = "", x = element)
}
api_base_req <- function(
base_url = Sys.getenv('API_ROOT_URL'),
token = Sys.getenv('API_TOKEN')
){
httr2::request(base_url = base_url) |>
httr2::req_headers(
"Accept" = "application/json",
"Authorization" = sprintf('Bearer %s', token),
.redact = 'Authorization'
) |>
httr2::req_method('GET')
}
#' @export
store_read_object.tar_api <- function(store) {
path <- store$file$path
key <- store_api_key(path)
scratch <- path_scratch_temp_network(pattern = basename(store_api_key(path)))
on.exit(unlink(scratch))
dir_create(dirname(scratch))
api_base_req() |>
httr2::req_url_path_append("/file/download") |>
httr2::req_url_query(path = path) |>
httr2::req_perform(path = scratch)
store_convert_object(store, store_read_path(store, scratch))
}
#' @export
store_exist_object.tar_api <- function(store, name = NULL) {
api_base_req() |>
httr2::req_url_path_append("/file") |>
httr2::req_url_query(path = dirname(store$file$path)) |>
httr2::req_perform() |>
httr2::resp_body_json() |>
sapply(function(x, nm) x$name == nm, nm = name, simplify = TRUE) |>
any()
}
#' @export
store_delete_objects.tar_api <- function(store) {
message(
"Can not delete objects in current api\n",
"You need to delete them manually."
)
}
#' @export
store_upload_object.tar_api <- function(store) {
on.exit(unlink(store$file$stage, recursive = TRUE, force = TRUE))
store_upload_object_api(store)
}
store_upload_object_api <- function(store) {
api_base_req() |>
httr2::req_method('POST') |>
httr2::req_url_path_append("/file/upload") |>
httr2::req_url_query(folder_path = path) |>
httr2::req_body_multipart(file = curl::form_file(store$file$path)) |>
httr2::req_perform()
invisible()
}
#' @export
store_has_correct_hash.tar_api <- function(store) {
hash <- store_api_hash(store)
!is.null(hash) && identical(hash, store$file$hash)
}
store_api_hash <- function(store) {
tar_runtime$inventories$api <- tar_runtime$inventories$api %|||%
inventory_api_init()
tar_runtime$inventories$api$get_cache(store = store)
}
# nocov end
#' @export
store_get_packages.tar_api <- function(store) {
c("httr2", NextMethod())
}
# inventory ----
inventory_api_init <- function() {
out <- inventory_api_new()
out$reset()
out
}
inventory_api_new <- function() {
inventory_api_class$new()
}
inventory_api_class <- R6::R6Class(
classname = "tar_inventory_api",
inherit = inventory_class,
class = FALSE,
portable = FALSE,
cloneable = FALSE,
public = list(
get_key = function(store) {
store_api_key(store$file$path)
},
set_cache = function(store) {
path <- store$file$path
# TODO
}
)
)
|
Beta Was this translation helpful? Give feedback.
-
If you're willing to use content-addressable storage (CAS), then you could use the new interface that came with the latest release: https://docs.ropensci.org/targets/reference/tar_repository_cas.html. Also c.f. #1232, #1323. |
Beta Was this translation helpful? Give feedback.
-
Followup just to see we are using as intended when packaging the functions if I do this target’s can find exported functions of the package, similar behaviour like when in R subdir it cant find functions outside of the main functions. targets::tar_option_set(
repository = tar_repository_cas(
upload = targets.egds::wise_upload(key, path) ,
download = targets.egds::wise_download(key, path),
exists = targets.egds::wise_exists(key)
),
resources = tar_resources(
repository_cas = tar_resources_repository_cas(
envvars = c(
MY_TOKEN = Sys.getenv('MY_TOKEN')
)
)
)
) But this works targets::tar_option_set(
repository = tar_repository_cas(
upload = function(key, path) targets.egds::wise_upload(key, path),
download = function(key, path) targets.egds::wise_download(key, path),
exists = function(key) targets.egds::wise_exists(key)
),
resources = tar_resources(
repository_cas = tar_resources_repository_cas(
envvars = c(
MY_TOKEN = Sys.getenv('MY_TOKEN')
)
)
)
) If i try to put those wrapped functions in the package and export it ie targets.egds::wise_repository <- function(){
tar_repository_cas(
upload = function(key, path) targets.egds::wise_upload(key, path),
download = function(key, path) targets.egds::wise_download(key, path),
exists = function(key) targets.egds::wise_exists(key)
)
} then i get back to the same problem that it cant find functions in the package. is this behaviour intended? |
Beta Was this translation helpful? Give feedback.
-
Help
Description
I have storage endpoints similar in nature to how targets manages aws s3 (or gcp) storage that i want to connect the pipeline to for managing the objects created by targets.
Is there a good way to leverage the infrastructure of targets to do this? or would i need to build one from scratch to mimic what class_aws, class_aws_file and class_inventory_aws do?
alternatively, could the aws infra that is built for targets be generalized to support any storage endpoint and aws would be just a case of it?
Beta Was this translation helpful? Give feedback.
All reactions