{"work":{"id":"892bf722-73fc-4b2e-858d-e1dddc728b4c","openalex_id":null,"doi":null,"arxiv_id":"2404.16790","raw_key":null,"title":"Seed-bench-2-plus: Benchmarking multimodal large language models with text-rich visual comprehension","authors":null,"authors_text":"Bohao Li, Yuying Ge, Yi Chen, Yixiao Ge, Ruimao Zhang, and Ying Shan","year":2024,"venue":null,"abstract":null,"external_url":"https://arxiv.org/abs/2404.16790","cited_by_count":null,"metadata_source":"arxiv_reference","metadata_fetched_at":"2026-05-16T07:59:32.781253+00:00","pith_arxiv_id":null,"created_at":"2026-05-10T09:03:24.874667+00:00","updated_at":"2026-05-16T07:59:32.781253+00:00","title_quality_ok":true,"display_title":"SEED-Bench-2-Plus: Bench- 12 marking multimodal large language models with text-rich vi- sual comprehension","render_title":"SEED-Bench-2-Plus: Bench- 12 marking multimodal large language models with text-rich vi- sual comprehension"},"hub":{"state":{"work_id":"892bf722-73fc-4b2e-858d-e1dddc728b4c","tier":"hub","tier_reason":"10+ Pith inbound or 1,000+ external citations","pith_inbound_count":10,"external_cited_by_count":null,"distinct_field_count":2,"first_pith_cited_at":"2024-08-23T17:59:51+00:00","last_pith_cited_at":"2026-05-13T03:50:54+00:00","author_build_status":"not_needed","summary_status":"needed","contexts_status":"needed","graph_status":"needed","ask_index_status":"not_needed","reader_status":"not_needed","recognition_status":"not_needed","updated_at":"2026-05-16T10:48:44.680977+00:00","tier_text":"hub"},"tier":"hub","role_counts":[{"context_role":"dataset","n":4}],"polarity_counts":[{"context_polarity":"use_dataset","n":4}],"runs":{},"summary":{},"graph":{},"authors":[]}}