diff --git a/trials/finetuning/__init__.py b/trials/finetuning/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trials/finetuning/inference_fine_tuned.py b/trials/finetuning/inference_fine_tuned.py new file mode 100644 index 00000000..4a396071 --- /dev/null +++ b/trials/finetuning/inference_fine_tuned.py @@ -0,0 +1,24 @@ +# Steps to prepare data and submit/check OpenAI finetuning +# import subprocess +# subprocess.run("openai tools fine_tunes.prepare_data -f " + "finetuning_dataset.jsonl") +# export OPENAI_API_KEY= +# openai api fine_tunes.create -t -m +# openai api fine_tunes.list + + +import openai + +# Use your OpenAI API Key +openai.api_key = "" + +sample_chunks = ["You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . -> ", + " We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas . - > "] + +# Give your finetuned model name here +# "davinci:ft-personal-2023-07-14-10-43-51" +model_name = "" +response = openai.Completion.create( + model=model_name, + prompt=sample_chunks[0]) + +print(response) diff --git a/trials/youtube_scraping.py b/trials/finetuning/youtube_scraping.py similarity index 100% rename from trials/youtube_scraping.py rename to trials/finetuning/youtube_scraping.py diff --git a/trials/server/__init__.py b/trials/server/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trials/server_multithreaded.py b/trials/server/server_multithreaded.py similarity index 98% rename from trials/server_multithreaded.py rename to trials/server/server_multithreaded.py index 1d27dfdb..1c5e75d7 100644 --- a/trials/server_multithreaded.py +++ b/trials/server/server_multithreaded.py @@ -16,8 +16,8 @@ from av import AudioFifo from sortedcontainers import SortedDict from whisper_jax import FlaxWhisperPipline -from ..utils.log_utils import logger -from ..utils.run_utils import config, Mutex +from reflector.utils.log_utils import logger +from reflector.utils.run_utils import config, Mutex WHISPER_MODEL_SIZE = config['DEFAULT']["WHISPER_REAL_TIME_MODEL_SIZE"] pcs = set() diff --git a/trials/title_summary/__init__.py b/trials/title_summary/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trials/api.py b/trials/title_summary/api.py similarity index 100% rename from trials/api.py rename to trials/title_summary/api.py diff --git a/trials/bert.py b/trials/title_summary/bert.py similarity index 100% rename from trials/bert.py rename to trials/title_summary/bert.py diff --git a/trials/gpt2.py b/trials/title_summary/gpt2.py similarity index 100% rename from trials/gpt2.py rename to trials/title_summary/gpt2.py diff --git a/trials/incsum.py b/trials/title_summary/incsum.py similarity index 100% rename from trials/incsum.py rename to trials/title_summary/incsum.py diff --git a/trials/openai_endpoint.py b/trials/title_summary/openai_endpoint.py similarity index 99% rename from trials/openai_endpoint.py rename to trials/title_summary/openai_endpoint.py index 7a572353..c92856c5 100644 --- a/trials/openai_endpoint.py +++ b/trials/title_summary/openai_endpoint.py @@ -28,7 +28,7 @@ response = openai.ChatCompletion.create(model=model, n=1, max_tokens=300) -# Try finetuned model +# Try fine tuned model # model = "davinci:ft-personal-2023-07-14-10-43-51" # response = openai.Completion.create(model=model, # prompt=sample_chunks[0] + " -> ") diff --git a/trials/pegasus.py b/trials/title_summary/pegasus.py similarity index 100% rename from trials/pegasus.py rename to trials/title_summary/pegasus.py diff --git a/trials/t5.py b/trials/title_summary/t5.py similarity index 100% rename from trials/t5.py rename to trials/title_summary/t5.py diff --git a/trials/transcript.txt b/trials/title_summary/transcript.txt similarity index 100% rename from trials/transcript.txt rename to trials/title_summary/transcript.txt diff --git a/trials/vicuna.py b/trials/title_summary/vicuna.py similarity index 100% rename from trials/vicuna.py rename to trials/title_summary/vicuna.py