Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added YouTube Metadata Reader #12975

Merged
merged 7 commits into from May 2, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -0,0 +1,153 @@
llama_index/_static
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
bin/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
etc/
include/
lib/
lib64/
parts/
sdist/
share/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
.ruff_cache

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints
notebooks/

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
pyvenv.cfg

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# Jetbrains
.idea
modules/
*.swp

# VsCode
.vscode

# pipenv
Pipfile
Pipfile.lock

# pyright
pyrightconfig.json
@@ -0,0 +1,6 @@
repos:
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
language_version: python3
@@ -0,0 +1,3 @@
poetry_requirements(
name="poetry",
)
@@ -0,0 +1,17 @@
GIT_ROOT ?= $(shell git rev-parse --show-toplevel)

help: ## Show all Makefile targets.
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'

format: ## Run code autoformatters (black).
pre-commit install
git ls-files | xargs pre-commit run black --files ||true

lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy
pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files

test: ## Run tests via pytest.
pytest tests

watch-docs: ## Build and watch documentation.
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
@@ -0,0 +1,44 @@
# LlamaIndex Readers Integration: Youtube-Metadata

```bash
pip install llama_index.readers.youtube_metadata
```

This loader fetches the metadata of Youtube videos using the Google APIs. (https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id={videos_string}&key={api_key}). You must have a Google API key to use.

Transcripts of the text transcript of Youtube videos is fetched using the `youtube_transcript_api` Python package.

## Usage

Simply pass an array of YouTube Video_ID into `load_data`.

```python
from llama_index.readers.youtube_metadata import YoutubeMetaData

api_key = "Axxxxx" # youtube API Key

video_ids = ["S_0hBL4ILAg", "a2skIq6hFiY"]

youtube_meta = YoutubeMetaData(api_key)
details = youtube_meta.load_data(video_ids)
```

This can be combined with the YoutubeTranscriptReader to provide more information for RAG AI inquiries.

```python
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.readers.youtube_metadata import YoutubeMetaData

video_ids = ["S_0hBL4ILAg", "a2skIq6hFiY"] # Example video IDs
yt_metadata = YouTubeMetaData(api_key=api_key)
print("Testing YouTubeMetaData...")
print(yt_metadata.load_data(video_ids))

yt_meta_transcript = YouTubeMetaDataAndTranscript(api_key=api_key)
print("Testing YouTubeMetaDataAndTranscript...")
print(yt_meta_transcript.load_data(video_ids))
```

The Video_id for youtube videos is right in the URL. In this URL: https://www.youtube.com/watch?v=a2skIq6hFiY&t=60s

The video_Id is 'a2skIq6hFiY&t'.
@@ -0,0 +1 @@
python_sources()
@@ -0,0 +1,6 @@
from llama_index.readers.youtube_metadata.base import (
YouTubeMetaData,
YouTubeMetaDataAndTranscript,
)

__all__ = ["YouTubeMetaData", "YouTubeMetaDataAndTranscript"]
@@ -0,0 +1,66 @@
# YoutubeMetaData.py
# Class to return Youtube Meta data for a video ID
import requests
from pydantic import Field
from typing import Any, List, Dict
from youtube_transcript_api import YouTubeTranscriptApi
from llama_index.core.readers.base import BasePydanticReader


class YouTubeMetaData(BasePydanticReader):
api_key: str

def load_data(self, video_ids):
details = {}

def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]

video_id_chunks = list(chunks(video_ids, 20))
for chunk in video_id_chunks:
videos_string = ",".join(chunk)
url = f"https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id={videos_string}&key={self.api_key}"
response = requests.get(url).json()
if "items" not in response:
print("Error in API response:", response)
continue

for item in response["items"]:
video_id = item["id"]
details[video_id] = {
"title": item["snippet"]["title"],
"description": item["snippet"]["description"],
"publishDate": item["snippet"]["publishedAt"],
"statistics": item["statistics"],
"tags": item["snippet"].get("tags", []),
"url": f"https://www.youtube.com/watch?v={video_id}",
}

return details


class YouTubeMetaDataAndTranscript(BasePydanticReader):
api_key: str = Field(..., description="API key for YouTube data access")
metadata_loader: YouTubeMetaData = None # Don't instantiate here
transcript_loader: Any = YouTubeTranscriptApi # Assume this is a simple callable

def initialize_loaders(self):
if not self.metadata_loader:
self.metadata_loader = YouTubeMetaData(api_key=self.api_key)

def load_data(self, video_ids: List[str]) -> Dict[str, Any]:
self.initialize_loaders() # Make sure loaders are initialized
all_details = {}
for video_id in video_ids:
metadata = self.metadata_loader.load_data([video_id])
try:
transcripts = self.transcript_loader.get_transcript(video_id)
except Exception as e:
transcripts = str(e)
all_details[video_id] = {
"metadata": metadata.get(video_id, {}),
"transcript": transcripts,
}
return all_details