BAAI / Emu2-Chat

huggingface.co
Total runs: 192
24-hour runs: 2
7-day runs: 8
30-day runs: -3
Model's Last Updated: December 21 2023
text-generation

Introduction of Emu2-Chat

Model Details of Emu2-Chat

Emu2-Chat

Paper | 🤗HF Demo | Demo | Project Page | Github

Model Weights
Model name Weight
Emu2 🤗 HF link
Emu2-Chat 🤗 HF link
Emu2-Gen 🤗 HF link
Inference (Huggingface Version)
Single GPU
from PIL import Image
import requests
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")

model = AutoModelForCausalLM.from_pretrained(
    "BAAI/Emu2-Chat",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    trust_remote_code=True).to('cuda').eval()


# `[<IMG_PLH>]` is the image placeholder which will be replaced by image embeddings. 
# the number of `[<IMG_PLH>]` should be equal to the number of input images

query = '[<IMG_PLH>]Describe the image in details:' 
image = Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/blue_black_1_top_left.jpg?raw=true',stream=True).raw).convert('RGB')


inputs = model.build_input_ids(
    text=[query],
    tokenizer=tokenizer,
    image=[image]
)

with torch.no_grad():
     outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        image=inputs["image"].to(torch.bfloat16),
        max_new_tokens=64,
        length_penalty=-1)

output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)

Interleaved image and text

from PIL import Image
import requests
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")

model = AutoModelForCausalLM.from_pretrained(
    "BAAI/Emu2-Chat",
    torch_dtype=torch.bfloat16,
    low_cpu_mem_usage=True,
    trust_remote_code=True).to('cuda').eval()

# `[<IMG_PLH>]` is the image placeholder which will be replaced by image embeddings. 
# the number of `[<IMG_PLH>]` should be equal to the number of input images

query = "[<IMG_PLH>][red, white, 3, bottom left].[<IMG_PLH>][yellow, white, 2, top left].[<IMG_PLH>][green, black, 4, bottom right][<IMG_PLH>]"

images = [
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/red_white_3_bottom_left.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/yellow_white_2_top_right.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/green_black_4_bottom_right.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/blue_black_1_top_left.jpg?raw=true',stream=True).raw).convert('RGB'),
]

inputs = model.build_input_ids(
    text=[query],
    tokenizer=tokenizer,
    image=images

)

with torch.no_grad():
     outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        image=inputs["image"].to(torch.bfloat16),
        max_new_tokens=64,
        length_penalty=-1)

output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
Multi GPU
from PIL import Image 
import requests
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch

tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")

with init_empty_weights():
     model = AutoModelForCausalLM.from_pretrained(
        "BAAI/Emu2-Chat",
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        trust_remote_code=True)  

device_map = infer_auto_device_map(model, max_memory={0:'38GiB',1:'38GiB',}, no_split_module_classes=['Block','LlamaDecoderLayer'])  
# input and output logits should be on same device
device_map["model.decoder.lm.lm_head"] = 0

model = load_checkpoint_and_dispatch(
    model, 
    'local/path/to/hf/version/Emu2-Chat/model',
    device_map=device_map).eval()

# `[<IMG_PLH>]` is the image placeholder which will be replaced by image embeddings. 
# the number of `[<IMG_PLH>]` should be equal to the number of input images

query = '[<IMG_PLH>]Describe the image in details:' 
image = Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/blue_black_1_top_left.jpg?raw=true',stream=True).raw).convert('RGB')

inputs = model.build_input_ids(
    text=[query],
    tokenizer=tokenizer,
    image=[image]

)

with torch.no_grad():
     outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        image=inputs["image"].to(torch.bfloat16),
        max_new_tokens=64,
        length_penalty=-1)

output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)

Interleaved image and text

from PIL import Image 
import requests
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch

tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")

with init_empty_weights():
     model = AutoModelForCausalLM.from_pretrained(
        "BAAI/Emu2-Chat",
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        trust_remote_code=True)  

device_map = infer_auto_device_map(model, max_memory={0:'38GiB',1:'38GiB',}, no_split_module_classes=['Block','LlamaDecoderLayer'])  
# input and output logits should be on same device
device_map["model.decoder.lm.lm_head"] = 0

model = load_checkpoint_and_dispatch(
    model, 
    'local/path/to/hf/version/Emu2-Chat/model',
    device_map=device_map).eval()

# `[<IMG_PLH>]` is the image placeholder which will be replaced by image embeddings. 
# the number of `[<IMG_PLH>]` should be equal to the number of input images
query = "[<IMG_PLH>][red, white, 3, bottom left].[<IMG_PLH>][yellow, white, 2, top left].[<IMG_PLH>][green, black, 4, bottom right][<IMG_PLH>]"

images = [
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/red_white_3_bottom_left.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/yellow_white_2_top_right.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/green_black_4_bottom_right.jpg?raw=true',stream=True).raw).convert('RGB'),
    Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/blue_black_1_top_left.jpg?raw=true',stream=True).raw).convert('RGB'),
]

inputs = model.build_input_ids(
    text=[query],
    tokenizer=tokenizer,
    image=images

)

with torch.no_grad():
     outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        image=inputs["image"].to(torch.bfloat16),
        max_new_tokens=64,
        length_penalty=-1)

output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
Quantization

Check quantization guidance at transformers

from PIL import Image 
import requests
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")

model = AutoModelForCausalLM.from_pretrained(
    "BAAI/Emu2-Chat",
    load_in_4bit=True,
    trust_remote_code=True, 
    bnb_4bit_compute_dtype=torch.float16).eval()

query = '[<IMG_PLH>]Describe the image in details:' 
image = Image.open(requests.get('https://github.com/baaivision/Emu/Emu2/examples/blue_black_1_top_left.jpg?raw=true',stream=True).raw).convert('RGB')

inputs = model.build_input_ids(
    text=[query],
    tokenizer=tokenizer,
    image=[image]

)

with torch.no_grad():
     outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        image=inputs["image"].to(torch.float16), # should be torch.float16
        max_new_tokens=64,
        length_penalty=-1)

output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
Citation

If you find Emu2 useful for your research and applications, please consider starring this repository and citing:

@article{Emu2,
    title={Generative Multimodal Models are In-Context Learners}, 
    author={Quan Sun and Yufeng Cui and Xiaosong Zhang and Fan Zhang and Qiying Yu and Zhengxiong Luo and Yueze Wang and Yongming Rao and Jingjing Liu and Tiejun Huang and Xinlong Wang},
    publisher={arXiv preprint arXiv:2312.13286},
    year={2023},
}

Runs of BAAI Emu2-Chat on huggingface.co

192
Total runs
2
24-hour runs
-2
3-day runs
8
7-day runs
-3
30-day runs

More Information About Emu2-Chat huggingface.co Model

Emu2-Chat huggingface.co

Emu2-Chat huggingface.co is an AI model on huggingface.co that provides Emu2-Chat's model effect (), which can be used instantly with this BAAI Emu2-Chat model. huggingface.co supports a free trial of the Emu2-Chat model, and also provides paid use of the Emu2-Chat. Support call Emu2-Chat model through api, including Node.js, Python, http.

Emu2-Chat huggingface.co Url

https://huggingface.co/BAAI/Emu2-Chat

BAAI Emu2-Chat online free

Emu2-Chat huggingface.co is an online trial and call api platform, which integrates Emu2-Chat's modeling effects, including api services, and provides a free online trial of Emu2-Chat, you can try Emu2-Chat online for free by clicking the link below.

BAAI Emu2-Chat online free url in huggingface.co:

https://huggingface.co/BAAI/Emu2-Chat

Emu2-Chat install

Emu2-Chat is an open source model from GitHub that offers a free installation service, and any user can find Emu2-Chat on GitHub to install. At the same time, huggingface.co provides the effect of Emu2-Chat install, users can directly use Emu2-Chat installed effect in huggingface.co for debugging and trial. It also supports api for free installation.

Emu2-Chat install url in huggingface.co:

https://huggingface.co/BAAI/Emu2-Chat

Url of Emu2-Chat

Emu2-Chat huggingface.co Url

Provider of Emu2-Chat huggingface.co

BAAI
ORGANIZATIONS

Other API from BAAI

huggingface.co

Total runs: 4.8M
Run Growth: -14.0M
Growth Rate: -289.11%
Updated: February 22 2024
huggingface.co

Total runs: 3.7M
Run Growth: 2.1M
Growth Rate: 57.15%
Updated: July 03 2024
huggingface.co

Total runs: 3.1M
Run Growth: -431.0M
Growth Rate: -14104.14%
Updated: February 21 2024
huggingface.co

Total runs: 2.8M
Run Growth: -166.0K
Growth Rate: -5.88%
Updated: February 21 2024
huggingface.co

Total runs: 762.0K
Run Growth: 266.3K
Growth Rate: 34.94%
Updated: December 13 2023
huggingface.co

Total runs: 703.0K
Run Growth: 531.7K
Growth Rate: 75.63%
Updated: April 02 2024
huggingface.co

Total runs: 696.3K
Run Growth: 422.8K
Growth Rate: 60.72%
Updated: October 12 2023
huggingface.co

Total runs: 220.4K
Run Growth: -234.0K
Growth Rate: -106.19%
Updated: November 14 2023
huggingface.co

Total runs: 32.8K
Run Growth: 5.6K
Growth Rate: 16.97%
Updated: April 17 2024
huggingface.co

Total runs: 31.1K
Run Growth: 17.8K
Growth Rate: 57.34%
Updated: October 12 2023
huggingface.co

Total runs: 30.5K
Run Growth: 19.7K
Growth Rate: 64.73%
Updated: October 12 2023
huggingface.co

Total runs: 23.9K
Run Growth: -22.5K
Growth Rate: -94.03%
Updated: December 26 2022
huggingface.co

Total runs: 7.9K
Run Growth: 3.9K
Growth Rate: 49.61%
Updated: October 12 2023
huggingface.co

Total runs: 6.6K
Run Growth: -304
Growth Rate: -4.59%
Updated: February 22 2024
huggingface.co

Total runs: 4.0K
Run Growth: 420
Growth Rate: 10.49%
Updated: October 12 2023
huggingface.co

Total runs: 3.1K
Run Growth: 3.0K
Growth Rate: 97.65%
Updated: October 27 2023
huggingface.co

Total runs: 3.0K
Run Growth: 2.6K
Growth Rate: 85.58%
Updated: May 13 2024
huggingface.co

Total runs: 2.4K
Run Growth: 1.8K
Growth Rate: 75.57%
Updated: April 19 2024
huggingface.co

Total runs: 2.3K
Run Growth: -1.7K
Growth Rate: -75.17%
Updated: February 07 2024
huggingface.co

Total runs: 2.3K
Run Growth: 329
Growth Rate: 14.35%
Updated: August 15 2024
huggingface.co

Total runs: 2.2K
Run Growth: 973
Growth Rate: 44.15%
Updated: June 11 2024
huggingface.co

Total runs: 1.7K
Run Growth: 918
Growth Rate: 53.50%
Updated: August 15 2024
huggingface.co

Total runs: 1.3K
Run Growth: -1.3K
Growth Rate: -105.90%
Updated: September 21 2023
huggingface.co

Total runs: 1.2K
Run Growth: 990
Growth Rate: 80.95%
Updated: March 07 2024
huggingface.co

Total runs: 749
Run Growth: -29
Growth Rate: -3.87%
Updated: June 07 2024
huggingface.co

Total runs: 730
Run Growth: -5.7K
Growth Rate: -783.42%
Updated: September 18 2023
huggingface.co

Total runs: 694
Run Growth: 46
Growth Rate: 6.63%
Updated: August 15 2024
huggingface.co

Total runs: 662
Run Growth: 154
Growth Rate: 23.26%
Updated: October 29 2023
huggingface.co

Total runs: 636
Run Growth: -561
Growth Rate: -88.21%
Updated: April 02 2024
huggingface.co

Total runs: 489
Run Growth: 414
Growth Rate: 84.66%
Updated: February 07 2024
huggingface.co

Total runs: 475
Run Growth: -1.1K
Growth Rate: -229.26%
Updated: October 12 2023
huggingface.co

Total runs: 454
Run Growth: 22
Growth Rate: 4.85%
Updated: June 24 2024
huggingface.co

Total runs: 346
Run Growth: 146
Growth Rate: 42.20%
Updated: August 23 2023
huggingface.co

Total runs: 178
Run Growth: 138
Growth Rate: 77.53%
Updated: December 31 2022
huggingface.co

Total runs: 177
Run Growth: 20
Growth Rate: 11.30%
Updated: December 21 2023
huggingface.co

Total runs: 132
Run Growth: 23
Growth Rate: 17.42%
Updated: August 23 2023
huggingface.co

Total runs: 84
Run Growth: -17
Growth Rate: -20.24%
Updated: May 13 2024
huggingface.co

Total runs: 71
Run Growth: -36
Growth Rate: -50.70%
Updated: April 18 2023
huggingface.co

Total runs: 68
Run Growth: -126
Growth Rate: -185.29%
Updated: June 24 2024
huggingface.co

Total runs: 57
Run Growth: 30
Growth Rate: 52.63%
Updated: December 25 2023
huggingface.co

Total runs: 46
Run Growth: -17
Growth Rate: -36.96%
Updated: October 02 2023
huggingface.co

Total runs: 32
Run Growth: 10
Growth Rate: 31.25%
Updated: July 24 2023
huggingface.co

Total runs: 28
Run Growth: -24
Growth Rate: -85.71%
Updated: October 27 2023
huggingface.co

Total runs: 25
Run Growth: -18
Growth Rate: -72.00%
Updated: December 05 2023
huggingface.co

Total runs: 18
Run Growth: -15
Growth Rate: -83.33%
Updated: May 31 2024
huggingface.co

Total runs: 9
Run Growth: 7
Growth Rate: 77.78%
Updated: November 28 2023
huggingface.co

Total runs: 6
Run Growth: -9
Growth Rate: -150.00%
Updated: May 13 2024
huggingface.co

Total runs: 4
Run Growth: -2
Growth Rate: -50.00%
Updated: October 09 2023
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: December 07 2022
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: December 30 2023
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: April 21 2023
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: August 13 2023
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: March 26 2023
huggingface.co

Total runs: 0
Run Growth: 0
Growth Rate: 0.00%
Updated: March 18 2024