diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0c66adc..8360eb5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,8 @@ repos: # - tidyr # - tokenizers # - urltools - - id: use-tidy-description + # - id: use-tidy-description + # args: [--warn_only] - id: lintr args: [--warn_only] - id: readme-rmd-rendered diff --git a/DESCRIPTION b/DESCRIPTION index 4d215a5..aac5d93 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: gpttools Title: Extensions and Tools for gptstudio -Version: 0.0.8.9009 +Version: 0.0.8.9010 Authors@R: person("James", "Wade", , "github@jameshwade.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-9740-1905")) @@ -39,15 +39,20 @@ Imports: xml2, yaml Suggests: - bslib, bsicons, + bslib, covr, + furrr, + future, htmltools, httr, knitr, later, mockr, + pak, pdftools, + precommit, + reprex, reticulate, rmarkdown, roxygen2, @@ -61,13 +66,12 @@ Suggests: tuneR, uuid, waiter, - withr, - future, - furrr, - reprex -Remotes: michelnivard/gptstudio + withr VignetteBuilder: knitr +Remotes: + michelnivard/gptstudio, + rstudio/rstudioapi Config/testthat/edition: 3 Config/testthat/parallel: true Encoding: UTF-8 diff --git a/NEWS.md b/NEWS.md index d62d2d1..c4c4c55 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,7 @@ ## Major features and improvements +* Added a GitHub Copilot-like code suggestion adding called "Copilot." * Added support for more AI services: anthropic, huggingface, google ai studio, and ollama (local models) (in #0042b93, #f3c64c2). * Implemented a fully local option for AI models (in #231f2c8, #482157f). * Users can now use local embeddings as an option within the package (in #0042b93). diff --git a/R/chat.R b/R/chat.R index ecc616c..7e967fa 100644 --- a/R/chat.R +++ b/R/chat.R @@ -41,7 +41,7 @@ #' `gptstudio_response_process()` for further processing. Defaults to `FALSE`. #' Refer to `gptstudio_response_process()` for more details., #' @param where A character string indicating the location or environment where -#' the chat is taking place. Options are `c("", "source", and "shiny")`. The +#' the chat is taking place. Options are `c("console", "source", and "shiny")`. The #' default is `""`, which means the chat is taking place in the R console. #' @param ... Reserved for future use. #' @@ -85,7 +85,7 @@ chat <- function(prompt, task = NULL, custom_prompt = NULL, process_response = FALSE, - where = "", + where = "console", ...) { if (rlang::is_false(stream) || service %in% c("google", "azure_openai", "huggingface")) { response <- @@ -110,38 +110,102 @@ chat <- function(prompt, response$response } } else { - if (where != "") where <- paste0("_", where) - callback <- glue::glue("create_stream_handler_{service}{where}") |> - get() - switch(service, - "openai" = { - response <- stream_chat_openai( - prompt = prompt, - element_callback = callback(), - model = model - ) - }, - "anthropic" = { - response <- stream_chat_anthropic( - prompt = prompt, - element_callback = callback(), - model = model - ) - }, - "perplexity" = { - response <- stream_chat_perplexity( - prompt = prompt, - element_callback = callback(), - model = model - ) - }, - "cohere" = { - response <- stream_chat_cohere( - prompt = prompt, - element_callback = callback(), - model = model - ) - } + stream_chat( + prompt = prompt, + service = service, + r = NULL, + output_id = NULL, + where = where ) } } + +#' Ghost Chat +#' +#' @inheritParams chat +#' +ghost_chat <- function(service = getOption("gpttools.service", "openai"), + stream = TRUE, + where = "source") { + context <- get_cursor_context() + + instructions <- glue::glue( + "You are an expert coding assistant that provides brief code suggestions + directly into the files as code. Your response will go directly into an + .{context$file_ext} file. You response should only contain code or code + comments. Do not add freetext. + You are given context above and below the current cursor position. + + Here is an example: + + library(tidyverse) + + p1 <- + ggplot(mtcars, aes(x = mpg, y = wt)) + + geom_point() + + geom_smooth(method = 'lm') + + labs(title = 'MPG vs. Weight', x = 'Miles per Gallon', y = 'Weight') + + [[start here]] + + ggsave(\"myplot.png\", p1) + + Your reponse begins at the placeholder [[start_here]]. + + Here is the context: + + {context$above} + {context$below}" + ) + + stream_chat( + prompt = instructions, + service = service, + r = NULL, + output_id = NULL, + where = where + ) +} + +get_cursor_context <- function(context_lines = 20, + placeholder = "[[start_here]]") { + doc <- rstudioapi::getActiveDocumentContext() + cursor_line <- doc$selection[[1]]$range$start[1] + cursor_pos <- doc$selection[[1]]$range$end + start_line <- max(1, cursor_line - context_lines) + end_line <- min(length(doc$content), cursor_line + context_lines) + + original_str <- doc$contents[cursor_line] + doc$contents[cursor_line] <- + stringr::str_c( + stringr::str_sub(original_str, end = cursor_pos[2] - 1), + placeholder, + stringr::str_sub(original_str, start = cursor_pos[2]) + ) + + context_above <- if (start_line < cursor_line) { + doc$content[(start_line):(cursor_line)] |> + paste0(collapse = "\n") + } else { + character(0) + } + + context_below <- if (end_line > cursor_line) { + doc$content[(cursor_line + 1):end_line] |> + paste0(collapse = "\n") + } else { + character(0) + } + + if (doc$path == "") { + file_ext <- "R" + } else { + file_ext <- doc$path |> tools::file_ext() + } + + list( + above = context_above, + below = context_below, + cursor = cursor_pos, + file_ext = file_ext + ) +} diff --git a/R/history.R b/R/history.R index 56bc71f..fcbd227 100644 --- a/R/history.R +++ b/R/history.R @@ -364,14 +364,15 @@ chat_with_context <- function(query, answer <- stream_chat_openai( prompt = simple_prompt, - element_callback = create_stream_handler() + element_callback = create_handler(service) ) } else { - stream_chat_shiny( + stream_chat( prompt = simple_prompt, service = service, r = rv, - output_id = "streaming" + output_id = "streaming", + where = "shiny" ) answer <- rv$response } diff --git a/R/stream-anthropic.R b/R/stream-anthropic.R index e43bfa6..1a7ddb4 100644 --- a/R/stream-anthropic.R +++ b/R/stream-anthropic.R @@ -1,71 +1,5 @@ -create_stream_handler_anthropic <- function() { - env <- rlang::env() - - function(x) { - x <- rawToChar(x) - - pattern <- "\\{\"type\":\"completion\",.*\"log_id\":\"compl_[^\"]*\"\\}" - - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - if (stringr::str_detect(env$resp, pattern)) { - parsed <- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() |> - purrr::pluck("completion") - - env$full_resp <- paste0(env$full_resp, parsed) - - cat(parsed) - - # Use shinyjs to update a div with the response - # shinyjs::html(output_id, env$full_resp) - # r$response <- env$full_resp - - env$resp <- stringr::str_split(env$resp, pattern) - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} - -create_stream_handler_anthropic_for_shiny <- function(r, output_id) { - env <- rlang::env() - - function(x) { - x <- rawToChar(x) - - # cat(x) - - pattern <- "\\{\"type\":\"completion\",.*\"log_id\":\"compl_[^\"]*\"\\}" - - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - if (stringr::str_detect(env$resp, pattern)) { - parsed <- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() |> - purrr::pluck("completion") - - env$full_resp <- paste0(env$full_resp, parsed) - - # Use shinyjs to update a div with the response - shinyjs::html(output_id, env$full_resp) - r$response <- env$full_resp - - env$resp <- stringr::str_split(env$resp, pattern) - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} - stream_chat_anthropic <- function(prompt, - element_callback = create_stream_handler_anthropic(), + element_callback = create_handler("anthropic"), model = "claude-2", key = Sys.getenv("ANTHROPIC_API_KEY")) { request_body <- list( diff --git a/R/stream-chat.R b/R/stream-chat.R index 87a5c3e..e8311a4 100644 --- a/R/stream-chat.R +++ b/R/stream-chat.R @@ -1,39 +1,46 @@ -stream_chat_shiny <- function(prompt, service, r, output_id) { +stream_chat <- function(prompt, + service = getOption("gpttools.service"), + r = NULL, + output_id = "streaming", + where = "console") { switch(service, "openai" = { response <- stream_chat_openai( prompt = prompt, - element_callback = create_handler_for_shiny("openai", r, output_id) + element_callback = create_handler("openai", r, output_id, where) ) }, "anthropic" = { response <- stream_chat_anthropic( prompt = prompt, - element_callback = create_handler_for_shiny("anthropic", r, output_id) + element_callback = create_handler("anthropic", r, output_id, where) ) }, "perplexity" = { response <- stream_chat_perplexity( prompt = prompt, - element_callback = create_handler_for_shiny("perplexity", r, output_id) + element_callback = create_handler("perplexity", r, output_id, where) ) }, "cohere" = { response <- stream_chat_cohere( prompt = prompt, - element_callback = create_handler_for_shiny("cohere", r, output_id) + element_callback = create_handler("cohere", r, output_id, where) ) }, "ollama" = { response <- stream_chat_ollama( prompt = prompt, - element_callback = create_handler_for_shiny("ollama", r, output_id) + element_callback = create_handler("ollama", r, output_id, where) ) } ) } -create_handler_for_shiny <- function(service = "openai", r, output_id = "streaming") { +create_handler <- function(service = "openai", + r, + output_id = "streaming", + where = "console") { env <- rlang::env() env$resp <- NULL env$full_resp <- NULL @@ -62,11 +69,25 @@ create_handler_for_shiny <- function(service = "openai", r, output_id = "streami jsonlite::fromJSON() |> purrr::pluck(!!!new_pluck) env$full_resp <- paste0(env$full_resp, parsed) - shinyjs::html( - output_id, - shiny::markdown(paste("**Assistant**", env$full_resp, sep = "\n\n")) - ) - r$response <- env$full_resp + + if (where == "shiny") { + shinyjs::html( + output_id, + shiny::markdown(paste("**Assistant**", env$full_resp, sep = "\n\n")) + ) + + r$response <- env$full_resp + } else if (where == "console") { + cat(parsed) + } else if (where == "source") { + rlang::check_installed("pak") + rlang::check_installed("rstudioapi", + version = "0.15.0.9", + action = \(pkg, ...) pak::pak("rstudio/rstudioapi") + ) + rstudioapi::setGhostText(env$full_resp) + } + env$resp <- stringr::str_split(env$resp, pattern) env$resp <- env$resp[[1]][[length(env$resp[[1]])]] } diff --git a/R/stream-cohere.R b/R/stream-cohere.R index 2883dfc..c905a1b 100644 --- a/R/stream-cohere.R +++ b/R/stream-cohere.R @@ -1,6 +1,6 @@ stream_chat_cohere <- function(prompt, - element_callback = create_stream_handler_cohere(), - model = "command", + model = getOption("gpttools.model", "command"), + element_callback = create_handler("cohere"), key = Sys.getenv("COHERE_API_KEY")) { request_body <- list( message = prompt, @@ -31,38 +31,3 @@ stream_chat_cohere <- function(prompt, )) } } - -create_stream_handler_cohere <- function() { - env <- rlang::env() - - function(x) { - x <- rawToChar(x) - # cat(x) - - pattern <- - '\\{"is_finished":false,"event_type":"text-generation","text":".*"\\}' - - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - if (stringr::str_detect(env$resp, pattern)) { - parsed <- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() |> - purrr::pluck("text") - - env$full_resp <- paste0(env$full_resp, parsed) - - cat(parsed) - - # # Uncomment and customize if you need to update UI components in a Shiny app: - # shinyjs::html(output_id, env$full_resp) - # r$response <- env$full_resp - - env$resp <- stringr::str_split(env$resp, pattern) - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} diff --git a/R/stream-ollama.R b/R/stream-ollama.R index fc1a4a1..ffb16d0 100644 --- a/R/stream-ollama.R +++ b/R/stream-ollama.R @@ -1,6 +1,6 @@ stream_chat_ollama <- function(prompt, - model = "phi", - element_callback = create_stream_handler_ollama()) { + model = getOption("gpttools.model"), + element_callback = create_handler("ollama")) { body <- list( model = model, prompt = prompt, @@ -53,36 +53,3 @@ ollama_is_available <- function(verbose = FALSE) { invisible(check_value) } - -create_stream_handler_ollama <- function() { - env <- rlang::env() - - function(x) { - x <- rawToChar(x) - - pattern <- '\\{"model":.*"done":false\\}' - - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - if (stringr::str_detect(env$resp, pattern)) { - parsed <- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() |> - purrr::pluck("response") - - env$full_resp <- paste0(env$full_resp, parsed) - - cat(parsed) - - # # Uncomment and customize if you need to update UI components in a Shiny app: - # shinyjs::html(output_id, env$full_resp) - # r$response <- env$full_resp - - env$resp <- stringr::str_split(env$resp, pattern) - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} diff --git a/R/stream-openai.R b/R/stream-openai.R index d3ebb62..9dbf48d 100644 --- a/R/stream-openai.R +++ b/R/stream-openai.R @@ -1,6 +1,6 @@ stream_chat_openai <- function(prompt = NULL, - element_callback = create_stream_handler_openai(), - model = "gpt-4-turbo-preview", + element_callback = create_handler("openai"), + model = getOption("gpttools.model", "gpt-4-turbo-preview"), openai_api_key = Sys.getenv("OPENAI_API_KEY"), shiny = FALSE) { messages <- list( @@ -30,113 +30,3 @@ stream_chat_openai <- function(prompt = NULL, invisible(response) } - - -create_stream_handler_openai <- function() { - # Create an environment to hold the state - env <- rlang::env() - env$resp <- NULL - - function(x) { - pattern <- '\\{"id":.*?\\}\\]\\}' - x <- rawToChar(x) - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - - if (stringr::str_detect(env$resp, pattern)) { - parsed <- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() |> - purrr::pluck("choices") |> - purrr::pluck("delta") |> - purrr::pluck("content") - - cat(parsed) - - # Reset resp after processing - env$resp <- stringr::str_split(env$resp, pattern) - - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} - -# create_handler_for_shiny <- function(r, output_id = "streaming") { -# env <- rlang::env() -# env$resp <- NULL -# env$full_resp <- NULL -# -# function(x) { -# pattern <- '\\{"id":.*?\\}\\]\\}' -# x <- rawToChar(x) -# -# if (rlang::is_null(env$resp)) { -# env$resp <- x -# } else { -# env$resp <- paste0(env$resp, x) -# } -# -# if (stringr::str_detect(env$resp, pattern)) { -# parsed <- stringr::str_extract(env$resp, pattern) |> -# jsonlite::fromJSON() |> -# purrr::pluck("choices", "delta", "content") -# -# env$full_resp <- paste0(env$full_resp, parsed) -# -# # Use shinyjs to update a div with the response -# shinyjs::html(output_id, -# shiny::markdown(paste("**Assistant**", env$full_resp, sep = "\n\n"))) -# r$response <- env$full_resp -# -# env$resp <- stringr::str_split(env$resp, pattern) -# env$resp <- env$resp[[1]][[length(env$resp[[1]])]] -# } -# TRUE -# } -# } - - -# response <- -# stream_chat_completion( -# message = "Tell me a story about a magical backpack.", -# element_callback = function(content) cat(content, "\n") -# ) -# -# result <- -# callr::r(\() { -# stream_chat_completion( -# message = "Tell me a story about a magical backpack.", -# element_callback = function(content) cat(content, "\n") -# ) -# }) -# -# Define a wrapper function to call in the subprocess -# subprocess_function <- function(stream_chat_completion, create_stream_handler, message, element_callback) { -# environment(stream_chat_completion) <- environment() -# environment(create_stream_handler) <- environment() -# stream_chat_completion( -# message = message, -# element_callback = element_callback -# ) -# } -# -# # Call the subprocess with the wrapper function, passing the necessary objects and arguments -# result <- callr::r_bg( -# func = subprocess_function, -# args = list( -# stream_chat_completion = stream_chat_completion, -# create_stream_handler = create_stream_handler, -# message = "Tell me a story about a magical backpack.", -# element_callback = function(content) cat(content, "\n") -# ) -# ) -# -# output <- NULL -# while (rlang::is_true(result$is_alive())) { -# cat(result$read_output()) -# output <- paste0(output, result$read_output()) -# Sys.sleep(0.1) -# } diff --git a/R/stream-perplexity.R b/R/stream-perplexity.R index 9617a29..d990dd1 100644 --- a/R/stream-perplexity.R +++ b/R/stream-perplexity.R @@ -1,68 +1,6 @@ -request_base_perplexity <- function(api_key = Sys.getenv("PERPLEXITY_API_KEY")) { - url <- "https://api.perplexity.ai/chat/completions" - httr2::request(url) |> - httr2::req_method("POST") |> - httr2::req_headers( - accept = "application/json", - "Content-Type" = "application/json", - Authorization = paste("Bearer", api_key) - ) -} - -query_api_perplexity <- function(request_body, api_key = Sys.getenv("PERPLEXITY_API_KEY")) { - response <- request_base_perplexity(api_key) |> - httr2::req_body_json(data = request_body) |> - httr2::req_retry(max_tries = 3) |> - httr2::req_perform() - - if (httr2::resp_is_error(response)) { - status <- httr2::resp_status(response) - description <- httr2::resp_status_desc(response) - stop("Perplexity API request failed with error ", status, ": ", description, call. = FALSE) - } - - httr2::resp_body_json(response) -} - -create_stream_handler_perplexity <- function(output_id = NULL, r = NULL) { - env <- rlang::env() - function(x) { - x <- rawToChar(x) - # cat(x) - pattern <- '\\{"id".*?\\}\\}\\]\\}' - - if (rlang::is_null(env$resp)) { - env$resp <- x - } else { - env$resp <- paste0(env$resp, x) - } - - if (stringr::str_detect(env$resp, pattern)) { - parsed_no_pluck <<- stringr::str_extract(env$resp, pattern) |> - jsonlite::fromJSON() - - parsed <- - parsed_no_pluck |> - purrr::pluck("choices", "delta", "content") - - env$full_resp <- paste0(env$full_resp, parsed) - - cat(parsed) - - # Use shinyjs to update a div with the response - # shinyjs::html(output_id, env$full_resp) - # r$response <- env$full_resp - - env$resp <- stringr::str_split(env$resp, pattern) - env$resp <- env$resp[[1]][[length(env$resp[[1]])]] - } - TRUE - } -} - stream_chat_perplexity <- function(prompt, - element_callback = create_stream_handler_perplexity(), - model = "pplx-7b-chat", + element_callback = create_handler("perplexity"), + model = getOption("gpttools.model", "pplx-7b-chat"), api_key = Sys.getenv("PERPLEXITY_API_KEY")) { request_body <- list( model = model, @@ -72,7 +10,14 @@ stream_chat_perplexity <- function(prompt, stream = TRUE ) - response <- request_base_perplexity(api_key) |> + response <- + httr2::request("https://api.perplexity.ai/chat/completions") |> + httr2::req_method("POST") |> + httr2::req_headers( + accept = "application/json", + "Content-Type" = "application/json", + Authorization = paste("Bearer", api_key) + ) |> httr2::req_body_json(data = request_body) |> httr2::req_retry(max_tries = 3) |> httr2::req_perform_stream(callback = element_callback, buffer_kb = 0.01) diff --git a/inst/rstudio/addins.dcf b/inst/rstudio/addins.dcf index c90ee7b..dc64c61 100644 --- a/inst/rstudio/addins.dcf +++ b/inst/rstudio/addins.dcf @@ -3,6 +3,11 @@ Description: Chat app with Retrieval for use with various AI services Binding: chat_with_retrieval Interactive: true +Name: Copilot +Description: Generate code suggestions in source docs like .R, .Rmd, and .qmd +Binding: ghost_chat +Interactive: true + Name: Convert script to function Description: Generate comments for your code Binding: script_to_function_addin diff --git a/inst/streaming_example.R b/inst/streaming_example.R index 7253793..2fe6b95 100644 --- a/inst/streaming_example.R +++ b/inst/streaming_example.R @@ -23,7 +23,11 @@ server <- function(input, output, session) { shinyjs::show("response") stream_chat_openai( prompt = input$message, - element_callback = create_stream_handler_for_shiny(rv, "response") + element_callback = create_handler("openai", + r = rv, + output_id = "response", + where = "shiny" + ) ) shinyjs::hide("response") }) diff --git a/man/chat.Rd b/man/chat.Rd index 0fee105..9e38fe1 100644 --- a/man/chat.Rd +++ b/man/chat.Rd @@ -15,7 +15,7 @@ chat( task = NULL, custom_prompt = NULL, process_response = FALSE, - where = "", + where = "console", ... ) } @@ -64,7 +64,7 @@ response. If \code{TRUE}, the response will be passed to Refer to \code{gptstudio_response_process()} for more details.,} \item{where}{A character string indicating the location or environment where -the chat is taking place. Options are \verb{c("", "source", and "shiny")}. The +the chat is taking place. Options are \verb{c("console", "source", and "shiny")}. The default is \code{""}, which means the chat is taking place in the R console.} \item{...}{Reserved for future use.} diff --git a/man/ghost_chat.Rd b/man/ghost_chat.Rd new file mode 100755 index 0000000..f8d05fd --- /dev/null +++ b/man/ghost_chat.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.R +\name{ghost_chat} +\alias{ghost_chat} +\title{Ghost Chat} +\usage{ +ghost_chat( + service = getOption("gpttools.service", "openai"), + stream = TRUE, + where = "source" +) +} +\arguments{ +\item{service}{The AI service to be used for the request. If not explicitly +provided, this defaults to the value set in +\code{getOption("gptstudio.service")}. If the option is not set, make sure to +provide this parameter to avoid errors.} + +\item{stream}{A logical value indicating whether the interaction should be +treated as a stream for continuous interactions. If not explicitly +provided, this defaults to the value set in +\code{getOption("gptstudio.stream")}.} + +\item{where}{A character string indicating the location or environment where +the chat is taking place. Options are \verb{c("console", "source", and "shiny")}. The +default is \code{""}, which means the chat is taking place in the R console.} +} +\description{ +Ghost Chat +}