mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-22 21:29:05 +00:00
restructuring
This commit is contained in:
0
trials/title_summary/__init__.py
Normal file
0
trials/title_summary/__init__.py
Normal file
57
trials/title_summary/api.py
Normal file
57
trials/title_summary/api.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import requests
|
||||
import spacy
|
||||
|
||||
# Enter the Machine where the LLM is hosted
|
||||
LLM_MACHINE_IP = ""
|
||||
# This is the URL of text-generation-webui
|
||||
URL = f"http://{LLM_MACHINE_IP}:5000/api/v1/generate"
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
def split_text_file(filename, token_count):
|
||||
nlp = spacy.load('en_core_web_md')
|
||||
|
||||
with open(filename, 'r') as file:
|
||||
text = file.read()
|
||||
|
||||
doc = nlp(text)
|
||||
total_tokens = len(doc)
|
||||
|
||||
parts = []
|
||||
start_index = 0
|
||||
|
||||
while start_index < total_tokens:
|
||||
end_index = start_index + token_count
|
||||
part_tokens = doc[start_index:end_index - 5]
|
||||
part = ' '.join(token.text for token in part_tokens)
|
||||
parts.append(part)
|
||||
start_index = end_index
|
||||
|
||||
return parts
|
||||
|
||||
|
||||
final_summary = ""
|
||||
parts = split_text_file("transcript.txt", 1600)
|
||||
|
||||
for part in parts:
|
||||
prompt = f"""
|
||||
### Human:
|
||||
Given the following text, distill the most important information
|
||||
into a short summary: {part}
|
||||
|
||||
### Assistant:
|
||||
"""
|
||||
data = {
|
||||
"prompt": prompt
|
||||
}
|
||||
try:
|
||||
response = requests.post(URL, headers=headers, json=data)
|
||||
print(response.json())
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
|
||||
with open("summary.txt", "w") as sum:
|
||||
sum.write(" ".join(final_summary))
|
||||
43
trials/title_summary/bert.py
Normal file
43
trials/title_summary/bert.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import torch
|
||||
from transformers import BertTokenizer, BertModel
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
# Load the pre-trained BERT model and tokenizer
|
||||
model_name = "bert-base-uncased"
|
||||
model = BertModel.from_pretrained(model_name)
|
||||
tokenizer = BertTokenizer.from_pretrained(model_name)
|
||||
|
||||
# Set the device to use
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model.to(device)
|
||||
|
||||
# Load the SentenceTransformer model
|
||||
sentence_transformer_model = SentenceTransformer('average_word_embeddings_glove.6B.300d')
|
||||
|
||||
# Define the input text
|
||||
text = "Your input text to be summarized goes here."
|
||||
|
||||
# Tokenize the text
|
||||
tokens = tokenizer.tokenize(text)
|
||||
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
||||
input_ids = torch.tensor([input_ids]).to(device)
|
||||
|
||||
# Get the BERT model output
|
||||
with torch.no_grad():
|
||||
outputs = model(input_ids)[0] # Extract the last hidden states
|
||||
|
||||
# Calculate sentence embeddings
|
||||
sentence_embeddings = outputs.mean(dim=1).squeeze().cpu().numpy()
|
||||
input_text_embedding = sentence_transformer_model.encode([text])[0]
|
||||
|
||||
# Calculate cosine similarity between sentences and input text
|
||||
similarity_scores = cosine_similarity([input_text_embedding], sentence_embeddings)
|
||||
|
||||
# Sort the sentences by similarity scores in descending order
|
||||
sorted_sentences = [sent for _, sent in sorted(zip(similarity_scores[0], sentences), reverse=True)]
|
||||
|
||||
# Choose the top sentences as the summary
|
||||
num_summary_sentences = 2 # Adjust as needed
|
||||
summary = ". ".join(sorted_sentences[:num_summary_sentences])
|
||||
print("Summary:", summary)
|
||||
101
trials/title_summary/gpt2.py
Normal file
101
trials/title_summary/gpt2.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# Approach 1
|
||||
from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
||||
|
||||
model_name = 'EleutherAI/gpt-neo-1.3B'
|
||||
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
||||
model = GPTNeoForCausalLM.from_pretrained(model_name)
|
||||
|
||||
conversation = """
|
||||
Summarize the following conversation in 3 key sentences:
|
||||
|
||||
We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI .
|
||||
Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development .
|
||||
Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations .
|
||||
Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude .
|
||||
Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council .
|
||||
Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas .
|
||||
"""
|
||||
|
||||
input_ids = tokenizer.encode(conversation, return_tensors='pt')
|
||||
|
||||
output = model.generate(input_ids,
|
||||
max_length=30,
|
||||
num_return_sequences=1)
|
||||
|
||||
caption = tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
print("Caption:", caption[len(input_ids):])
|
||||
|
||||
|
||||
# Approach 2
|
||||
import torch
|
||||
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||||
|
||||
model_name = "gpt2"
|
||||
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
||||
model = GPT2LMHeadModel.from_pretrained(model_name)
|
||||
|
||||
model.eval()
|
||||
|
||||
text = """
|
||||
You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . "
|
||||
"""
|
||||
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
input_ids = tokenizer.encode(text,
|
||||
max_length=100,
|
||||
truncation=True,
|
||||
return_tensors="pt")
|
||||
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
|
||||
output = model.generate(input_ids,
|
||||
max_new_tokens=20,
|
||||
num_return_sequences=1,
|
||||
num_beams=2,
|
||||
attention_mask=attention_mask)
|
||||
|
||||
chapter_titles = [tokenizer.decode(output[i], skip_special_tokens=True) for i in range(output.shape[0])]
|
||||
for i, title in enumerate(chapter_titles):
|
||||
print("Caption: ", title)
|
||||
|
||||
# Approach 3
|
||||
|
||||
import torch
|
||||
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||||
|
||||
|
||||
def generate_response(conversation, max_length=100):
|
||||
input_text = ""
|
||||
for entry in conversation:
|
||||
role = entry["role"]
|
||||
content = entry["content"]
|
||||
input_text += f"{role}: {content}\n"
|
||||
|
||||
# Tokenize the entire conversation
|
||||
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
||||
|
||||
# Generate text based on the entire conversation
|
||||
with torch.no_grad():
|
||||
output = model.generate(input_ids, pad_token_id=tokenizer.eos_token_id)
|
||||
|
||||
# Decode the generated text and return it
|
||||
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
return response
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# Call appropriate approach from the main while experimenting
|
||||
model_name = "gpt2"
|
||||
model = GPT2LMHeadModel.from_pretrained(model_name)
|
||||
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
||||
|
||||
sample_chunks = [
|
||||
"You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . "
|
||||
]
|
||||
|
||||
conversation = [
|
||||
{"role": "system", "content": "Summarize this text"},
|
||||
{"role": "user", "content": " text : " + sample_chunks[0]},
|
||||
]
|
||||
|
||||
response = generate_response(conversation)
|
||||
print("Response:", response)
|
||||
157
trials/title_summary/incsum.py
Normal file
157
trials/title_summary/incsum.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import spacy
|
||||
import sys
|
||||
|
||||
|
||||
# Observe the incremental summaries by performing summaries in chunks
|
||||
with open("transcript.txt") as f:
|
||||
transcription = f.read()
|
||||
|
||||
|
||||
def split_text_file(filename, token_count):
|
||||
nlp = spacy.load('en_core_web_md')
|
||||
|
||||
with open(filename, 'r') as file:
|
||||
text = file.read()
|
||||
|
||||
doc = nlp(text)
|
||||
total_tokens = len(doc)
|
||||
|
||||
parts = []
|
||||
start_index = 0
|
||||
|
||||
while start_index < total_tokens:
|
||||
end_index = start_index + token_count
|
||||
part_tokens = doc[start_index:end_index]
|
||||
part = ' '.join(token.text for token in part_tokens)
|
||||
parts.append(part)
|
||||
start_index = end_index
|
||||
|
||||
return parts
|
||||
|
||||
|
||||
# Set the chunk length here to split the transcript and test
|
||||
MAX_CHUNK_LENGTH = 1000
|
||||
|
||||
chunks = split_text_file("transcript.txt", MAX_CHUNK_LENGTH)
|
||||
print("Number of chunks", len(chunks))
|
||||
|
||||
# Write chunks to file to refer to input vs output, separated by blank lines
|
||||
with open("chunks" + str(MAX_CHUNK_LENGTH) + ".txt", "a") as f:
|
||||
for c in chunks:
|
||||
f.write(c + "\n\n")
|
||||
|
||||
# If we want to run only a certain model, type the option while running
|
||||
# ex. python incsum.py 1 => will run approach 1
|
||||
# If no input, will run all approaches
|
||||
|
||||
try:
|
||||
index = sys.argv[1]
|
||||
except:
|
||||
index = None
|
||||
|
||||
# Approach 1 : facebook/bart-large-cnn
|
||||
if index == "1" or index is None:
|
||||
SUMMARY_MODEL = "facebook/bart-large-cnn"
|
||||
MIN_LENGTH = 5
|
||||
MAX_LENGTH = 10
|
||||
BEAM_SIZE = 2
|
||||
|
||||
print("Performing chunk summary : " + SUMMARY_MODEL)
|
||||
|
||||
from transformers import BartTokenizer, BartForConditionalGeneration
|
||||
|
||||
tokenizer = BartTokenizer.from_pretrained(SUMMARY_MODEL)
|
||||
model = BartForConditionalGeneration.from_pretrained(SUMMARY_MODEL)
|
||||
summaries = []
|
||||
for c in chunks:
|
||||
input_ids = tokenizer.encode(c,
|
||||
truncation=True,
|
||||
max_length=MAX_CHUNK_LENGTH,
|
||||
padding="max_length",
|
||||
return_tensors='pt')
|
||||
summary_ids = model.generate(
|
||||
input_ids,
|
||||
num_beams=BEAM_SIZE,
|
||||
max_length=56,
|
||||
early_stopping=True,
|
||||
length_penalty=1.0)
|
||||
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
||||
summaries.append(summary)
|
||||
|
||||
with open("bart-summaries.txt", "a") as f:
|
||||
for summary in summaries:
|
||||
f.write(summary + "\n\n")
|
||||
|
||||
# Approach 2
|
||||
if index == "2" or index is None:
|
||||
print("Performing chunk summary : " + "gpt-neo-1.3B")
|
||||
|
||||
import torch
|
||||
from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
||||
|
||||
model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
||||
summaries = []
|
||||
|
||||
for c in chunks:
|
||||
input_ids = tokenizer.encode(c,
|
||||
truncation=True,
|
||||
return_tensors='pt')
|
||||
input_length = input_ids.shape[1]
|
||||
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
|
||||
|
||||
max_summary_length = 100
|
||||
max_length = input_length + max_summary_length
|
||||
|
||||
output = model.generate(input_ids,
|
||||
max_length=max_length,
|
||||
attention_mask=attention_mask,
|
||||
pad_token_id=model.config.eos_token_id,
|
||||
num_beams=4,
|
||||
length_penalty=2.0,
|
||||
early_stopping=True)
|
||||
summary_ids = output[0, input_length:]
|
||||
summary = tokenizer.decode(summary_ids, skip_special_tokens=True)
|
||||
summaries.append(summary)
|
||||
with open("gptneo1.3B-summaries.txt", "a") as f:
|
||||
f.write(summary + "\n\n")
|
||||
|
||||
# Approach 3
|
||||
if index == "3" or index is None:
|
||||
print("Performing chunk summary : " + "mpt-7B")
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
config = transformers.AutoConfig.from_pretrained('mosaicml/mpt-7b',
|
||||
trust_remote_code=True)
|
||||
config.attn_config['attn_impl'] = 'triton'
|
||||
config.max_seq_len = 1024
|
||||
config.init_device = "meta"
|
||||
|
||||
model = transformers.AutoModelForCausalLM.from_pretrained(
|
||||
'mosaicml/mpt-7b',
|
||||
trust_remote_code=True,
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
|
||||
|
||||
summaries = []
|
||||
for c in chunks:
|
||||
input_ids = tokenizer.encode(c, return_tensors="pt")
|
||||
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
|
||||
output = model.generate(input_ids,
|
||||
max_new_tokens=25,
|
||||
attention_mask=attention_mask,
|
||||
pad_token_id=model.config.eos_token_id,
|
||||
num_return_sequences=1)
|
||||
summary = tokenizer.decode(output[0],
|
||||
skip_special_tokens=True)
|
||||
summaries.append(summary)
|
||||
|
||||
with open("mpt-7b-summaries.txt", "a") as f:
|
||||
for summary in summaries:
|
||||
f.write(summary + "\n\n")
|
||||
37
trials/title_summary/openai_endpoint.py
Normal file
37
trials/title_summary/openai_endpoint.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# Use OpenAI API endpoint to send data to OpenAI
|
||||
# along with prompts to caption/summarize the conversation
|
||||
|
||||
import openai
|
||||
|
||||
openai.api_key = ""
|
||||
|
||||
# to caption, user prompt used : "caption this conversation"
|
||||
# max_tokens=20
|
||||
|
||||
# to incremental summarize, user prompt used : "summarize this conversation in a few sentences by taking key points"
|
||||
# max_tokens=300
|
||||
|
||||
sample_chunks = [
|
||||
"You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ",
|
||||
" We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."]
|
||||
|
||||
conversation = [
|
||||
{"role": "system",
|
||||
"content": sample_chunks[1]},
|
||||
{"role": "user",
|
||||
"content": "summarize this conversation in a few sentences by taking key points"}
|
||||
]
|
||||
|
||||
model = "gpt-3.5-turbo"
|
||||
response = openai.ChatCompletion.create(model=model,
|
||||
messages=conversation,
|
||||
n=1,
|
||||
max_tokens=300)
|
||||
|
||||
# Try fine tuned model
|
||||
# model = "davinci:ft-personal-2023-07-14-10-43-51"
|
||||
# response = openai.Completion.create(model=model,
|
||||
# prompt=sample_chunks[0] + " -> ")
|
||||
|
||||
caption = response.choices[0]
|
||||
print(caption)
|
||||
33
trials/title_summary/pegasus.py
Normal file
33
trials/title_summary/pegasus.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
||||
import torch
|
||||
# Load the Pegasus model and tokenizer
|
||||
model_name = "google/pegasus-large"
|
||||
model = PegasusForConditionalGeneration.from_pretrained(model_name)
|
||||
tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
||||
|
||||
# Set the device to use
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model.to(device)
|
||||
|
||||
sample_chunks = ["You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ",
|
||||
" We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."]
|
||||
|
||||
|
||||
# Define the input text for summarization
|
||||
text = sample_chunks[1]
|
||||
|
||||
inputs = tokenizer(text, truncation=True, padding="longest", return_tensors="pt").to(device)
|
||||
|
||||
# Generate the summary
|
||||
summary_ids = model.generate(
|
||||
inputs["input_ids"],
|
||||
attention_mask=inputs["attention_mask"],
|
||||
max_length=200,
|
||||
num_beams=4,
|
||||
length_penalty=2.0,
|
||||
early_stopping=True,
|
||||
)
|
||||
|
||||
# Decode and print the summary
|
||||
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
||||
print("Summary:", summary)
|
||||
27
trials/title_summary/t5.py
Normal file
27
trials/title_summary/t5.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
||||
import torch
|
||||
# Load the T5 model and tokenizer
|
||||
model_name = "t5-base"
|
||||
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
||||
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
||||
|
||||
# Set the device to use
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model.to(device)
|
||||
|
||||
sample_chunks = ["You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ",
|
||||
" We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."]
|
||||
|
||||
|
||||
# Define the input text for summarization
|
||||
text = "Summarize the following text in 3 key points. text : " + sample_chunks[1]
|
||||
|
||||
# Tokenize the input text
|
||||
inputs = tokenizer.encode(text, return_tensors="pt").to(device)
|
||||
|
||||
# Generate the summary
|
||||
summary_ids = model.generate(inputs, max_length=1000, num_beams=4, early_stopping=True)
|
||||
|
||||
# Decode and print the summary
|
||||
summary = tokenizer.decode(summary_ids.squeeze(), skip_special_tokens=True)
|
||||
print("Summary:", summary)
|
||||
1
trials/title_summary/transcript.txt
Normal file
1
trials/title_summary/transcript.txt
Normal file
File diff suppressed because one or more lines are too long
44
trials/title_summary/vicuna.py
Normal file
44
trials/title_summary/vicuna.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from gpt4all import GPT4All
|
||||
|
||||
model = GPT4All("/Users/gokulmohanarangan/Library/Application Support/nomic.ai/GPT4All/ggml-vicuna-13b-1.1-q4_2.bin")
|
||||
|
||||
import spacy
|
||||
|
||||
|
||||
def split_text_file(filename, token_count):
|
||||
nlp = spacy.load('en_core_web_md')
|
||||
|
||||
with open(filename, 'r') as file:
|
||||
text = file.read()
|
||||
|
||||
doc = nlp(text)
|
||||
total_tokens = len(doc)
|
||||
|
||||
parts = []
|
||||
start_index = 0
|
||||
|
||||
while start_index < total_tokens:
|
||||
end_index = start_index + token_count
|
||||
part_tokens = doc[start_index:end_index]
|
||||
part = ' '.join(token.text for token in part_tokens)
|
||||
parts.append(part)
|
||||
start_index = end_index
|
||||
|
||||
return parts
|
||||
|
||||
parts = split_text_file("transcript.txt", 1800)
|
||||
final_summary = []
|
||||
for part in parts:
|
||||
prompt = f"""
|
||||
### Human:
|
||||
Summarize the following text without missing any key points and action items.
|
||||
|
||||
{part}
|
||||
### Assistant:
|
||||
"""
|
||||
output = model.generate(prompt)
|
||||
final_summary.append(output)
|
||||
|
||||
|
||||
with open("sum.txt", "w") as sum:
|
||||
sum.write(" ".join(final_summary))
|
||||
Reference in New Issue
Block a user