diff --git a/src/bin/stream_api_buf_reader.rs b/src/bin/stream_api_buf_reader.rs new file mode 100644 index 0000000..db9cd57 --- /dev/null +++ b/src/bin/stream_api_buf_reader.rs @@ -0,0 +1,92 @@ +use dotenvy::dotenv; +use reqwest::{ + blocking::Client, + header::{HeaderMap, HeaderValue, ACCEPT, AUTHORIZATION}, +}; +use serde::Deserialize; +use serde_json::json; +use std::io::{self, BufRead, BufReader, Write}; +#[allow(dead_code)] +#[derive(Deserialize, Debug)] +struct ChatCompletionChunk { + choices: Vec, +} + +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +struct Choices { + index: u64, + delta: Delta, +} + +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +struct Delta { + content: String, +} + +fn main() { + dotenv().expect(".env file not found"); + let openai_api_key = + std::env::var("OPENAI_API_KEY").expect("OPENAI_API_KEY not found in .env file"); + let client = Client::new(); + let url = "https://api.openai.com/v1/chat/completions"; + + let mut headers = HeaderMap::new(); + + headers.insert( + ACCEPT, + HeaderValue::from_bytes(b"text/event-stream").unwrap(), + ); + headers.insert( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", openai_api_key)).unwrap(), + ); + + let request_body = json!( + { + "model": "gpt-4-turbo-preview", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant, who generates output in JSON format only." + }, + {"role": "user", "content": "Hi!"} + ], + "response_format": {"type": "json_object"}, + "stream": true + } + ); + + let response = client + .post(url) + .headers(headers) + .json(&request_body) + .send() + .unwrap(); + let reader = BufReader::new(response); + + let mut buffer = String::new(); + for line in reader.lines() { + let line = line.unwrap(); + + if line.starts_with("data: ") { + buffer.push_str(&line[6..]); // Extract payload after "data: " + } else if line.is_empty() { + // Encountered a complete SSE message + let chunk_result = serde_json::from_str::(&buffer); + match chunk_result { + Ok(chunk) => { + print!("{}", chunk.choices[0].delta.content); + io::stdout().flush().unwrap(); + } + Err(_) => { + // println!("Error parsing chunk: {:?}", e); + continue; + } + } + buffer.clear(); + } + } + println!(); +} diff --git a/src/bin/stream_api_response_byte_stream.rs b/src/bin/stream_api_response_byte_stream.rs new file mode 100644 index 0000000..99173a4 --- /dev/null +++ b/src/bin/stream_api_response_byte_stream.rs @@ -0,0 +1,64 @@ +use dotenvy::dotenv; +use error_chain::error_chain; +use futures_util::stream::StreamExt; +use reqwest::{ + header::{HeaderMap, HeaderValue, ACCEPT, AUTHORIZATION}, + Client, +}; +use serde_json::json; +use std::env; + +error_chain! { + foreign_links { + Io(std::io::Error); + HttpRequest(reqwest::Error); + } +} + +#[tokio::main] +async fn main() -> Result<()> { + dotenv().expect(".env file not found"); + let openai_api_key = env::var("OPENAI_API_KEY").expect("OPENAI_API_KEY not found in .env file"); + let url = "https://api.openai.com/v1/chat/completions"; + let mut headers = HeaderMap::new(); + + headers.insert( + ACCEPT, + HeaderValue::from_bytes(b"text/event-stream").unwrap(), + ); + headers.insert( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", openai_api_key)).unwrap(), + ); + + let request_body = json!( + { + "model": "gpt-4-turbo-preview", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant, who generates output in JSON format only." + }, + {"role": "user", "content": "What is the meaning of life?"} + ], + "response_format": {"type": "json_object"}, + "stream": true + } + ); + + let client = Client::new(); + + let mut response_stream = client + .post(url) + .headers(headers) + .json(&request_body) + .send() + .await? + .bytes_stream(); + + while let Some(chunk) = response_stream.next().await { + println!("Chunk: {:?}", chunk?); + } + + Ok(()) +} diff --git a/src/bin/stream_api_response_chunk.rs b/src/bin/stream_api_response_chunk.rs new file mode 100644 index 0000000..29ede2e --- /dev/null +++ b/src/bin/stream_api_response_chunk.rs @@ -0,0 +1,61 @@ +use dotenvy::dotenv; +use error_chain::error_chain; +use reqwest::{ + header::{HeaderMap, HeaderValue, ACCEPT, AUTHORIZATION}, + Client, +}; +use serde_json::json; +use std::env; + +error_chain! { + foreign_links { + Io(std::io::Error); + HttpRequest(reqwest::Error); + } +} + +#[tokio::main] +async fn main() -> Result<()> { + dotenv().expect(".env file not found"); + let openai_api_key = env::var("OPENAI_API_KEY").expect("OPENAI_API_KEY not found in .env file"); + let url = "https://api.openai.com/v1/chat/completions"; + let mut headers = HeaderMap::new(); + + headers.insert( + ACCEPT, + HeaderValue::from_bytes(b"text/event-stream").unwrap(), + ); + headers.insert( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", openai_api_key)).unwrap(), + ); + + let request_body = json!( + { + "model": "gpt-4-turbo-preview", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant, who generates output in JSON format only." + }, + {"role": "user", "content": "What is the meaning of life?"} + ], + "response_format": {"type": "json_object"}, + "stream": true + } + ); + + let client = Client::new(); + let mut response = client + .post(url) + .headers(headers.clone()) + .json(&request_body) + .send() + .await?; + + while let Some(chunk) = response.chunk().await? { + println!("Chunk: {:?}", chunk); + } + + Ok(()) +}