Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
58 lines
1.9 KiB
Python
58 lines
1.9 KiB
Python
# HF architecture dict:
|
|
arch_dict = {
|
|
# https://huggingface.co/docs/transformers/model_doc/roberta#roberta
|
|
"roberta": {
|
|
"config_names": {
|
|
"context_length": "max_position_embeddings",
|
|
"vocab_size": "vocab_size",
|
|
"width": "hidden_size",
|
|
"heads": "num_attention_heads",
|
|
"layers": "num_hidden_layers",
|
|
"layer_attr": "layer",
|
|
"token_embeddings_attr": "embeddings"
|
|
},
|
|
"pooler": "mean_pooler",
|
|
},
|
|
# https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
|
|
"xlm-roberta": {
|
|
"config_names": {
|
|
"context_length": "max_position_embeddings",
|
|
"vocab_size": "vocab_size",
|
|
"width": "hidden_size",
|
|
"heads": "num_attention_heads",
|
|
"layers": "num_hidden_layers",
|
|
"layer_attr": "layer",
|
|
"token_embeddings_attr": "embeddings"
|
|
},
|
|
"pooler": "mean_pooler",
|
|
},
|
|
# https://huggingface.co/docs/transformers/model_doc/mt5#mt5
|
|
"mt5": {
|
|
"config_names": {
|
|
# unlimited seqlen
|
|
# https://github.com/google-research/text-to-text-transfer-transformer/issues/273
|
|
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
|
|
"context_length": "",
|
|
"vocab_size": "vocab_size",
|
|
"width": "d_model",
|
|
"heads": "num_heads",
|
|
"layers": "num_layers",
|
|
"layer_attr": "block",
|
|
"token_embeddings_attr": "embed_tokens"
|
|
},
|
|
"pooler": "mean_pooler",
|
|
},
|
|
"bert": {
|
|
"config_names": {
|
|
"context_length": "max_position_embeddings",
|
|
"vocab_size": "vocab_size",
|
|
"width": "hidden_size",
|
|
"heads": "num_attention_heads",
|
|
"layers": "num_hidden_layers",
|
|
"layer_attr": "layer",
|
|
"token_embeddings_attr": "embeddings"
|
|
},
|
|
"pooler": "mean_pooler",
|
|
}
|
|
}
|