From f7d3c469e2dd71a00b7d7b32c2ae698246e42051 Mon Sep 17 00:00:00 2001 From: Koper Date: Wed, 2 Aug 2023 18:25:35 +0700 Subject: [PATCH 1/5] Installed pre-commit --- .pre-commit-config.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..629f606c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: debug-statements + - id: trailing-whitespace + +- repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + args: ["--profile", "black"] + +- repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + args: ["--line-length", "120"] + From 7192e9b1b93a102584cc4f99985e83de0d103d06 Mon Sep 17 00:00:00 2001 From: Koper Date: Wed, 2 Aug 2023 18:26:34 +0700 Subject: [PATCH 2/5] Update TIMELINE.md --- www/TIMELINE.md | 1 + 1 file changed, 1 insertion(+) diff --git a/www/TIMELINE.md b/www/TIMELINE.md index 82007396..5acb95ac 100644 --- a/www/TIMELINE.md +++ b/www/TIMELINE.md @@ -10,3 +10,4 @@ Here's a structured timeline for our project completion: | Friday | Big demo presentation | Let's stay focused and get our tasks done on time for a successful demo on Friday. Let's have a successful week! + From a8ea3a8bdfff827b9611ee1aee50f2681e6d9c13 Mon Sep 17 00:00:00 2001 From: Koper Date: Wed, 2 Aug 2023 18:35:38 +0700 Subject: [PATCH 3/5] pre-commit now does yarn format and several other tasks --- .pre-commit-config.yaml | 40 ++++++----- www/TIMELINE.md | 1 - www/app/components/CustomRecordPlugin.js | 86 ++++++++++++------------ www/app/components/dashboard.js | 8 ++- www/app/components/record.js | 25 +++---- www/app/components/webrtc.js | 2 +- www/app/layout.js | 5 +- www/app/page.js | 3 +- www/next.config.js | 5 +- www/package.json | 4 +- www/pages/sentry-example-page.js | 5 +- 11 files changed, 99 insertions(+), 85 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 629f606c..84075ef4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,21 +1,31 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: debug-statements - - id: trailing-whitespace + - repo: local + hooks: + - id: yarn-format + name: run yarn format + language: system + entry: bash -c 'cd www && yarn format' + pass_filenames: false + files: ^www/ -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - args: ["--profile", "black"] + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: debug-statements + - id: trailing-whitespace + - id: check-added-large-files + - id: detect-private-key -- repo: https://github.com/psf/black - rev: 23.1.0 - hooks: - - id: black - args: ["--line-length", "120"] + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + args: ["--profile", "black"] + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + args: ["--line-length", "120"] diff --git a/www/TIMELINE.md b/www/TIMELINE.md index 5acb95ac..82007396 100644 --- a/www/TIMELINE.md +++ b/www/TIMELINE.md @@ -10,4 +10,3 @@ Here's a structured timeline for our project completion: | Friday | Big demo presentation | Let's stay focused and get our tasks done on time for a successful demo on Friday. Let's have a successful week! - diff --git a/www/app/components/CustomRecordPlugin.js b/www/app/components/CustomRecordPlugin.js index 8e8cdc44..7e29ea7c 100644 --- a/www/app/components/CustomRecordPlugin.js +++ b/www/app/components/CustomRecordPlugin.js @@ -18,81 +18,81 @@ class CustomRecordPlugin extends RecordPlugin { return new CustomRecordPlugin(options || {}); } render(stream) { - if (!this.wavesurfer) return () => undefined + if (!this.wavesurfer) return () => undefined; - const container = this.wavesurfer.getWrapper() - const canvas = document.createElement('canvas') - canvas.width = container.clientWidth - canvas.height = container.clientHeight - canvas.style.zIndex = '10' - container.appendChild(canvas) + const container = this.wavesurfer.getWrapper(); + const canvas = document.createElement("canvas"); + canvas.width = container.clientWidth; + canvas.height = container.clientHeight; + canvas.style.zIndex = "10"; + container.appendChild(canvas); - const canvasCtx = canvas.getContext('2d') - const audioContext = new AudioContext() - const source = audioContext.createMediaStreamSource(stream) - const analyser = audioContext.createAnalyser() - analyser.fftSize = 2 ** 5 - source.connect(analyser) - const bufferLength = analyser.frequencyBinCount - const dataArray = new Uint8Array(bufferLength) + const canvasCtx = canvas.getContext("2d"); + const audioContext = new AudioContext(); + const source = audioContext.createMediaStreamSource(stream); + const analyser = audioContext.createAnalyser(); + analyser.fftSize = 2 ** 5; + source.connect(analyser); + const bufferLength = analyser.frequencyBinCount; + const dataArray = new Uint8Array(bufferLength); let animationId, previousTimeStamp; - const BUFFER_SIZE = 2 ** 8 - const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height) + const BUFFER_SIZE = 2 ** 8; + const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height); const drawWaveform = (timeStamp) => { - if (!canvasCtx) return + if (!canvasCtx) return; - analyser.getByteTimeDomainData(dataArray) - canvasCtx.clearRect(0, 0, canvas.width, canvas.height) - canvasCtx.fillStyle = '#cc3347' + analyser.getByteTimeDomainData(dataArray); + canvasCtx.clearRect(0, 0, canvas.width, canvas.height); + canvasCtx.fillStyle = "#cc3347"; if (previousTimeStamp === undefined) { - previousTimeStamp = timeStamp - dataBuffer.push(Math.min(...dataArray)) - dataBuffer.splice(0, 1) + previousTimeStamp = timeStamp; + dataBuffer.push(Math.min(...dataArray)); + dataBuffer.splice(0, 1); } const elapsed = timeStamp - previousTimeStamp; if (elapsed > 10) { - previousTimeStamp = timeStamp - dataBuffer.push(Math.min(...dataArray)) - dataBuffer.splice(0, 1) + previousTimeStamp = timeStamp; + dataBuffer.push(Math.min(...dataArray)); + dataBuffer.splice(0, 1); } // Drawing - const sliceWidth = canvas.width / dataBuffer.length - let x = 0 + const sliceWidth = canvas.width / dataBuffer.length; + let x = 0; for (let i = 0; i < dataBuffer.length; i++) { - const valueNormalized = dataBuffer[i] / canvas.height - const y = valueNormalized * canvas.height / 2 - const sliceHeight = canvas.height + 1 - y * 2 + const valueNormalized = dataBuffer[i] / canvas.height; + const y = (valueNormalized * canvas.height) / 2; + const sliceHeight = canvas.height + 1 - y * 2; - canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight) - x += sliceWidth + canvasCtx.fillRect(x, y, (sliceWidth * 2) / 3, sliceHeight); + x += sliceWidth; } - animationId = requestAnimationFrame(drawWaveform) - } + animationId = requestAnimationFrame(drawWaveform); + }; - drawWaveform() + drawWaveform(); return () => { if (animationId) { - cancelAnimationFrame(animationId) + cancelAnimationFrame(animationId); } if (source) { - source.disconnect() - source.mediaStream.getTracks().forEach((track) => track.stop()) + source.disconnect(); + source.mediaStream.getTracks().forEach((track) => track.stop()); } if (audioContext) { - audioContext.close() + audioContext.close(); } - canvas?.remove() - } + canvas?.remove(); + }; } startRecording(stream) { this.preventInteraction(); diff --git a/www/app/components/dashboard.js b/www/app/components/dashboard.js index e7017d11..cf5c75c0 100644 --- a/www/app/components/dashboard.js +++ b/www/app/components/dashboard.js @@ -1,7 +1,10 @@ import { Mulberry32 } from "../utils.js"; import React, { useState, useEffect } from "react"; -import { FontAwesomeIcon } from '@fortawesome/react-fontawesome' -import { faChevronRight, faChevronDown } from '@fortawesome/free-solid-svg-icons' +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; +import { + faChevronRight, + faChevronDown, +} from "@fortawesome/free-solid-svg-icons"; export function Dashboard({ isRecording, @@ -47,7 +50,6 @@ export function Dashboard({ ))} - {finalSummary && (

Final Summary

diff --git a/www/app/components/record.js b/www/app/components/record.js index cb3b03f8..33ee8fb4 100644 --- a/www/app/components/record.js +++ b/www/app/components/record.js @@ -7,27 +7,28 @@ import "react-dropdown/style.css"; import CustomRecordPlugin from "./CustomRecordPlugin"; - const AudioInputsDropdown = (props) => { const [ddOptions, setDdOptions] = useState([]); useEffect(() => { const init = async () => { // Request permission to use audio inputs - await navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => stream.getTracks().forEach((t) => t.stop())) + await navigator.mediaDevices + .getUserMedia({ audio: true }) + .then((stream) => stream.getTracks().forEach((t) => t.stop())); - const devices = await navigator.mediaDevices.enumerateDevices() + const devices = await navigator.mediaDevices.enumerateDevices(); const audioDevices = devices .filter((d) => d.kind === "audioinput" && d.deviceId != "") - .map((d) => ({ value: d.deviceId, label: d.label })) + .map((d) => ({ value: d.deviceId, label: d.label })); - if (audioDevices.length < 1) return console.log("no audio input devices") + if (audioDevices.length < 1) return console.log("no audio input devices"); - setDdOptions(audioDevices) - props.setDeviceId(audioDevices[0].value) - } - init() - }, []) + setDdOptions(audioDevices); + props.setDeviceId(audioDevices[0].value); + }; + init(); + }, []); const handleDropdownChange = (e) => { props.setDeviceId(e.value); @@ -40,8 +41,8 @@ const AudioInputsDropdown = (props) => { value={ddOptions[0]} disabled={props.disabled} /> - ) -} + ); +}; export default function Recorder(props) { const waveformRef = useRef(); diff --git a/www/app/components/webrtc.js b/www/app/components/webrtc.js index 1b9843c0..3469cd19 100644 --- a/www/app/components/webrtc.js +++ b/www/app/components/webrtc.js @@ -64,7 +64,7 @@ const useWebRTC = (stream) => { duration: serverData.duration, summary: serverData.summary, }, - text: '' + text: "", })); break; default: diff --git a/www/app/layout.js b/www/app/layout.js index 25b34bb7..163a2faa 100644 --- a/www/app/layout.js +++ b/www/app/layout.js @@ -17,9 +17,8 @@ export default function RootLayout({ children }) { Test - {children} - - + {children} + ); } diff --git a/www/app/page.js b/www/app/page.js index 6dae28ea..9fcf0087 100644 --- a/www/app/page.js +++ b/www/app/page.js @@ -12,7 +12,8 @@ const App = () => { // transcription, summary, etc const serverData = useWebRTC(stream); - const sendStopCmd = () => serverData?.peer?.send(JSON.stringify({ cmd: "STOP" })) + const sendStopCmd = () => + serverData?.peer?.send(JSON.stringify({ cmd: "STOP" })); return (
diff --git a/www/next.config.js b/www/next.config.js index 7f188f5c..610b5be8 100644 --- a/www/next.config.js +++ b/www/next.config.js @@ -1,11 +1,10 @@ /** @type {import('next').NextConfig} */ const nextConfig = { - output: 'standalone', + output: "standalone", }; module.exports = nextConfig; - // Sentry content below const { withSentryConfig } = require("@sentry/nextjs"); @@ -40,5 +39,5 @@ module.exports = withSentryConfig( // Automatically tree-shake Sentry logger statements to reduce bundle size disableLogger: true, - } + }, ); diff --git a/www/package.json b/www/package.json index 9f2f418b..373edc00 100644 --- a/www/package.json +++ b/www/package.json @@ -30,8 +30,8 @@ }, "main": "index.js", "repository": "https://github.com/Monadical-SAS/reflector-ui.git", - "author": "Koper ", - "license": "MIT", + "author": "Andreas ", + "license": "All Rights Reserved", "devDependencies": { "prettier": "^3.0.0" } diff --git a/www/pages/sentry-example-page.js b/www/pages/sentry-example-page.js index 5e935e43..bcace78b 100644 --- a/www/pages/sentry-example-page.js +++ b/www/pages/sentry-example-page.js @@ -70,7 +70,10 @@ export default function Home() {

Next, look for the error on the{" "} - Issues Page. + + Issues Page + + .

For more information, see{" "} From 6fc5fe9c6a53ce57e79ab20e455fd7647d534b00 Mon Sep 17 00:00:00 2001 From: Gokul Mohanarangan Date: Thu, 3 Aug 2023 12:28:22 +0530 Subject: [PATCH 4/5] sample chat llm --- server/trials/title_summary/chat_llm.py | 79 +++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 server/trials/title_summary/chat_llm.py diff --git a/server/trials/title_summary/chat_llm.py b/server/trials/title_summary/chat_llm.py new file mode 100644 index 00000000..557fb531 --- /dev/null +++ b/server/trials/title_summary/chat_llm.py @@ -0,0 +1,79 @@ +""" +This is an example code containing the bare essentials to load a chat + LLM and infer from it using a predefined prompt. The purpose of this file + is to show an example of inferring from a chat LLM which is required for + banana.dev due to its design and platform limitations +""" + +# The following logic was tested on the monadical-ml machine + +import json + +import torch +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer +) +from transformers.generation import GenerationConfig + +# This can be passed via the environment variable or the params supplied +# when starting the program via banana.dev platform +MODEL_NAME = "lmsys/vicuna-13b-v1.5" + +# Load the model in half precision, and less memory usage +model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, + low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16 + ) + +# Generation config +model.config.max_new_tokens = 300 +gen_cfg = GenerationConfig.from_model_config(model.config) +gen_cfg.max_new_tokens = 300 + +# Load the tokenizer +tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) + +# Move model to GPU +model = model.cuda() +print(f"Loading {MODEL_NAME} successful") + +# Inputs +sample_chunks = [ + "You all just came off of your incredible Google Cloud next conference where you released a wide variety of functionality and features and new products across artisan television and also across the entire sort of cloud ecosystem . You want to just first by walking through , first start by walking through all the innovations that you sort of released and what you 're excited about when you come to Google Cloud ? Now our vision is super simple . If you look at what smartphones did for a consumer , you know they took a computer and internet browser , a communication device , and a camera , and made it so that it 's in everybody 's pocket , so it really brought computation to every person . We feel that , you know , our , what we 're trying to do is take all the technological innovation that Google 's doing , but make it super simple so that everyone can consume it . And so that includes our global data center footprint , all the new types of hardware and large-scale systems we work on , the software that we 're making available for people to do high-scale computation , tools for data processing , tools for cybersecurity , processing , tools for cyber security , tools for machine learning , but make it so simple that everyone can use it . And every step that we do to simplify things for people , we think adoption can grow . And so that 's a lot of what we 've done these last three , four years , and we made a number of announcements that next in machine learning and AI in particular , you know , we look at our work as four elements , how we take our large-scale compute systems that were building for AI and how we make that available to everybody . Second , what we 're doing with the software stacks and top of it , things like jacks and other things and how we 're making those available to everybody . Third is advances because different people have different levels of expertise . Some people say I need the hardware to build my own large language model or algorithm . Other people say , look , I really need to use a building block . You guys give me . So , 30s we 've done a lot with AutoML and we announce new capability for image , video , and translation to make it available to everybody . And then lastly , we 're also building completely packaged solutions for some areas and we announce some new stuff . ", + " We 're joined next by Thomas Curian , CEO of Google Cloud , and Alexander Wang , CEO and founder of Scale AI . Thomas joined Google in November 2018 as the CEO of Google Cloud . Prior to Google , Thomas spent 22 years at Oracle , where most recently he was president of product development . Before that , Thomas worked at McKinsey as a business analyst and engagement manager . His nearly 30 years of experience have given him a deep knowledge of engineering enterprise relationships and leadership of large organizations . Thomas 's degrees include an MBA in administration and management from Stanford University , as an RJ Miller scholar and a BSEE in electrical engineering and computer science from Princeton University , where he graduated suma cum laude . Thomas serves as a member of the Stanford graduate School of Business Advisory Council and Princeton University School of Engineering Advisory Council . Please welcome to the stage , Thomas Curian and Alexander Wang . This is a super exciting conversation . Thanks for being here , Thomas ."] + +# Model Prompt template for current model +prompt = f""" + ### Human: + Create a JSON object as response.The JSON object must have 2 fields: + i) title and ii) summary.For the title field,generate a short title + for the given text. For the summary field, summarize the given text + in three sentences. + + {sample_chunks[0]} + + ### Assistant: + """ + +# Inference : Chat generation +input_ids = tokenizer.encode(prompt, return_tensors='pt').to(model.device) +output = model.generate(input_ids, generation_config=gen_cfg) + +# Process output +response = tokenizer.decode(output[0].cpu(), skip_special_tokens=True) +response = response.split("### Assistant:\n") +print("TitleSummaryJsonResponse :", json.loads(response[1])) +print("Inference successful") + +# Sample response for sample_chunks[0] + +# TitleSummaryJsonResponse : +# { +# 'title': 'Google Cloud Next Conference: Simplifying AI and Machine Learning for Everyone', +# 'summary': 'Google Cloud announced a wide range of innovations and new products in the AI +# and machine learning space at the recent Google Cloud Next conference. The goal +# is to make these technologies accessible to everyone by simplifying the process +# and providing tools for data processing, cybersecurity, and machine learning. +# Google is also working on advances in AutoML and packaged solutions for certain areas.' +# } \ No newline at end of file From a8cd3702723348288dd507eb6f64cd22034f1a56 Mon Sep 17 00:00:00 2001 From: Andreas Bonini <78463782+AndreasBonini@users.noreply.github.com> Date: Thu, 3 Aug 2023 17:51:35 +0700 Subject: [PATCH 5/5] Delete TIMELINE.md --- www/TIMELINE.md | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 www/TIMELINE.md diff --git a/www/TIMELINE.md b/www/TIMELINE.md deleted file mode 100644 index 82007396..00000000 --- a/www/TIMELINE.md +++ /dev/null @@ -1,12 +0,0 @@ -# Project Timeline - -Here's a structured timeline for our project completion: - -| Day | Objective | -| --------- | ------------------------------------------------------ | -| Tuesday | Front-end and Back-end integration | -| Wednesday | Project will be polished and tested by Adam | -| Thursday | Project completion. Additional tests will be performed | -| Friday | Big demo presentation | - -Let's stay focused and get our tasks done on time for a successful demo on Friday. Let's have a successful week!