mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
move all experiments to trials
This commit is contained in:
43
trials/bert.py
Normal file
43
trials/bert.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import torch
|
||||||
|
from transformers import BertTokenizer, BertModel
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
from sklearn.metrics.pairwise import cosine_similarity
|
||||||
|
|
||||||
|
# Load the pre-trained BERT model and tokenizer
|
||||||
|
model_name = "bert-base-uncased"
|
||||||
|
model = BertModel.from_pretrained(model_name)
|
||||||
|
tokenizer = BertTokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
# Set the device to use
|
||||||
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
# Load the SentenceTransformer model
|
||||||
|
sentence_transformer_model = SentenceTransformer('average_word_embeddings_glove.6B.300d')
|
||||||
|
|
||||||
|
# Define the input text
|
||||||
|
text = "Your input text to be summarized goes here."
|
||||||
|
|
||||||
|
# Tokenize the text
|
||||||
|
tokens = tokenizer.tokenize(text)
|
||||||
|
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
||||||
|
input_ids = torch.tensor([input_ids]).to(device)
|
||||||
|
|
||||||
|
# Get the BERT model output
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(input_ids)[0] # Extract the last hidden states
|
||||||
|
|
||||||
|
# Calculate sentence embeddings
|
||||||
|
sentence_embeddings = outputs.mean(dim=1).squeeze().cpu().numpy()
|
||||||
|
input_text_embedding = sentence_transformer_model.encode([text])[0]
|
||||||
|
|
||||||
|
# Calculate cosine similarity between sentences and input text
|
||||||
|
similarity_scores = cosine_similarity([input_text_embedding], sentence_embeddings)
|
||||||
|
|
||||||
|
# Sort the sentences by similarity scores in descending order
|
||||||
|
sorted_sentences = [sent for _, sent in sorted(zip(similarity_scores[0], sentences), reverse=True)]
|
||||||
|
|
||||||
|
# Choose the top sentences as the summary
|
||||||
|
num_summary_sentences = 2 # Adjust as needed
|
||||||
|
summary = ". ".join(sorted_sentences[:num_summary_sentences])
|
||||||
|
print("Summary:", summary)
|
||||||
33
trials/pegasus.py
Normal file
33
trials/pegasus.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
||||||
|
import torch
|
||||||
|
# Load the Pegasus model and tokenizer
|
||||||
|
model_name = "google/pegasus-large"
|
||||||
|
model = PegasusForConditionalGeneration.from_pretrained(model_name)
|
||||||
|
tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
# Set the device to use
|
||||||
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
sample_chunks = ["You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ",
|
||||||
|
" We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."]
|
||||||
|
|
||||||
|
|
||||||
|
# Define the input text for summarization
|
||||||
|
text = sample_chunks[1]
|
||||||
|
|
||||||
|
inputs = tokenizer(text, truncation=True, padding="longest", return_tensors="pt").to(device)
|
||||||
|
|
||||||
|
# Generate the summary
|
||||||
|
summary_ids = model.generate(
|
||||||
|
inputs["input_ids"],
|
||||||
|
attention_mask=inputs["attention_mask"],
|
||||||
|
max_length=200,
|
||||||
|
num_beams=4,
|
||||||
|
length_penalty=2.0,
|
||||||
|
early_stopping=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Decode and print the summary
|
||||||
|
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
||||||
|
print("Summary:", summary)
|
||||||
27
trials/t5.py
Normal file
27
trials/t5.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
||||||
|
import torch
|
||||||
|
# Load the T5 model and tokenizer
|
||||||
|
model_name = "t5-base"
|
||||||
|
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
||||||
|
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
# Set the device to use
|
||||||
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
sample_chunks = ["You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ",
|
||||||
|
" We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."]
|
||||||
|
|
||||||
|
|
||||||
|
# Define the input text for summarization
|
||||||
|
text = "Summarize the following text in 3 key points. text : " + sample_chunks[1]
|
||||||
|
|
||||||
|
# Tokenize the input text
|
||||||
|
inputs = tokenizer.encode(text, return_tensors="pt").to(device)
|
||||||
|
|
||||||
|
# Generate the summary
|
||||||
|
summary_ids = model.generate(inputs, max_length=1000, num_beams=4, early_stopping=True)
|
||||||
|
|
||||||
|
# Decode and print the summary
|
||||||
|
summary = tokenizer.decode(summary_ids.squeeze(), skip_special_tokens=True)
|
||||||
|
print("Summary:", summary)
|
||||||
44
trials/vicuna.py
Normal file
44
trials/vicuna.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
from gpt4all import GPT4All
|
||||||
|
|
||||||
|
model = GPT4All("/Users/gokulmohanarangan/Library/Application Support/nomic.ai/GPT4All/ggml-vicuna-13b-1.1-q4_2.bin")
|
||||||
|
|
||||||
|
import spacy
|
||||||
|
|
||||||
|
|
||||||
|
def split_text_file(filename, token_count):
|
||||||
|
nlp = spacy.load('en_core_web_md')
|
||||||
|
|
||||||
|
with open(filename, 'r') as file:
|
||||||
|
text = file.read()
|
||||||
|
|
||||||
|
doc = nlp(text)
|
||||||
|
total_tokens = len(doc)
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
start_index = 0
|
||||||
|
|
||||||
|
while start_index < total_tokens:
|
||||||
|
end_index = start_index + token_count
|
||||||
|
part_tokens = doc[start_index:end_index]
|
||||||
|
part = ' '.join(token.text for token in part_tokens)
|
||||||
|
parts.append(part)
|
||||||
|
start_index = end_index
|
||||||
|
|
||||||
|
return parts
|
||||||
|
|
||||||
|
parts = split_text_file("transcript.txt", 1800)
|
||||||
|
final_summary = []
|
||||||
|
for part in parts:
|
||||||
|
prompt = f"""
|
||||||
|
### Human:
|
||||||
|
Summarize the following text without missing any key points and action items.
|
||||||
|
|
||||||
|
{part}
|
||||||
|
### Assistant:
|
||||||
|
"""
|
||||||
|
output = model.generate(prompt)
|
||||||
|
final_summary.append(output)
|
||||||
|
|
||||||
|
|
||||||
|
with open("sum.txt", "w") as sum:
|
||||||
|
sum.write(" ".join(final_summary))
|
||||||
98
trials/youtube_scraping.py
Normal file
98
trials/youtube_scraping.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
import json
|
||||||
|
import yt_dlp as youtube_dl
|
||||||
|
from whisper_jax import FlaxWhisperPipline
|
||||||
|
import jax.numpy as jnp
|
||||||
|
|
||||||
|
# Function to extract chapter information from a YouTube video URL
|
||||||
|
def get_youtube_chapters(video_id):
|
||||||
|
video_url = "https://www.youtube.com/watch?v=" + video_id
|
||||||
|
ydl_opts = {
|
||||||
|
'extract_flat': 'in_playlist',
|
||||||
|
'skip_download': True,
|
||||||
|
'quiet': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
video_info = ydl.extract_info(video_url, download=False)
|
||||||
|
|
||||||
|
chapters = []
|
||||||
|
|
||||||
|
if 'chapters' in video_info:
|
||||||
|
for chapter in video_info['chapters']:
|
||||||
|
start_time = chapter['start_time']
|
||||||
|
end_time = chapter['end_time']
|
||||||
|
title = chapter['title']
|
||||||
|
|
||||||
|
chapters.append({
|
||||||
|
'start': start_time,
|
||||||
|
'end': end_time,
|
||||||
|
'title': title
|
||||||
|
})
|
||||||
|
|
||||||
|
return chapters
|
||||||
|
|
||||||
|
|
||||||
|
# Function to extract video transcription using yt_dlp
|
||||||
|
def get_youtube_transcription(video_id):
|
||||||
|
ydl_opts = {
|
||||||
|
'format': 'bestaudio/best',
|
||||||
|
'postprocessors': [{
|
||||||
|
'key': 'FFmpegExtractAudio',
|
||||||
|
'preferredcodec': 'mp3',
|
||||||
|
'preferredquality': '192',
|
||||||
|
}],
|
||||||
|
'outtmpl': './artefacts/audio', # Specify output file path and name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Download the audio
|
||||||
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
ydl.download(["https://www.youtube.com/watch?v=" + video_id])
|
||||||
|
media_file = "./artefacts/audio.mp3"
|
||||||
|
|
||||||
|
pipeline = FlaxWhisperPipline("openai/whisper-" + "tiny",
|
||||||
|
dtype=jnp.float16,
|
||||||
|
batch_size=16)
|
||||||
|
whisper_result = pipeline(media_file, return_timestamps=True)
|
||||||
|
return whisper_result["chunks"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Function to scrape YouTube video transcripts and chapter information
|
||||||
|
def scrape_youtube_data(video_id):
|
||||||
|
transcript_text = get_youtube_transcription(video_id)
|
||||||
|
chapters = get_youtube_chapters(video_id)
|
||||||
|
print("transcript_text", transcript_text)
|
||||||
|
print("chapters", chapters)
|
||||||
|
return transcript_text, chapters
|
||||||
|
|
||||||
|
|
||||||
|
# Function to generate fine-tuning dataset from YouTube data
|
||||||
|
def generate_finetuning_dataset(video_ids):
|
||||||
|
prompt_completion_pairs = []
|
||||||
|
for video_id in video_ids:
|
||||||
|
transcript_text, chapters = scrape_youtube_data(video_id)
|
||||||
|
if transcript_text is not None and chapters is not None:
|
||||||
|
for chapter in chapters:
|
||||||
|
start_time = chapter["start"]
|
||||||
|
end_time = chapter["end"]
|
||||||
|
chapter_text = chapter["title"]
|
||||||
|
|
||||||
|
prompt = ""
|
||||||
|
for transcript in transcript_text:
|
||||||
|
if transcript["timestamp"][0] >= start_time and transcript["timestamp"][1] < end_time:
|
||||||
|
prompt += transcript["text"]
|
||||||
|
|
||||||
|
if prompt is not None:
|
||||||
|
completion = chapter_text
|
||||||
|
prompt_completion_pairs.append({"prompt": prompt, "completion": completion})
|
||||||
|
|
||||||
|
return prompt_completion_pairs
|
||||||
|
|
||||||
|
|
||||||
|
# Add all the video ids here, the videos must have captions [chapters]
|
||||||
|
video_ids = ["yTnSEZIwnkU"]
|
||||||
|
dataset = generate_finetuning_dataset(video_ids)
|
||||||
|
|
||||||
|
with open("finetuning_dataset.jsonl", "w") as f:
|
||||||
|
for example in dataset:
|
||||||
|
f.write(json.dumps(example) + "\n")
|
||||||
Reference in New Issue
Block a user