task
stringclasses
1 value
source
stringclasses
1 value
video
stringlengths
37
38
duration
int64
120
150
tgt
listlengths
1
24
conversations
listlengths
2
2
tvg
qvhighlights
qvh/videos/t_9gaLdfc_Y_360.0_510.0.mp4
150
[ [ 0, 150 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A man sitting on the driver seat of a car holding a phone and talking to the camera' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/t_9gaLdfc_Y_510.0_660.0.mp4
150
[ [ 0, 46 ], [ 50, 52 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Man talks to the camera holding his phone in the car' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/t_9gaLdfc_Y_660.0_810.0.mp4
124
[ [ 106, 124 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'Red, white, and blue end of video telling the viewer to subscribe' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eSbPJx_eH8E_60.0_210.0.mp4
150
[ [ 104, 130 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Man stands under a awning alone' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eSbPJx_eH8E_210.0_360.0.mp4
150
[ [ 84, 150 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'Boy showing some footages of Manhattan' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eSbPJx_eH8E_360.0_510.0.mp4
150
[ [ 34, 60 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Asian man talks to a cop'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eSbPJx_eH8E_660.0_810.0.mp4
150
[ [ 84, 150 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Man navigates around a metro station'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/PGDbwm6X-Ag_60.0_210.0.mp4
150
[ [ 0, 22 ], [ 26, 56 ], [ 60, 74 ], [ 146, 150 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A large woman in a grey sweater holds her phone in her hands as she expresses herself on camera about her eating habits', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/PGDbwm6X-Ag_360.0_510.0.mp4
150
[ [ 0, 62 ], [ 64, 138 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Woman in gray top talks in front of her kitchen cabinets', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/PGDbwm6X-Ag_210.0_360.0.mp4
150
[ [ 24, 26 ], [ 88, 90 ], [ 92, 98 ], [ 100, 104 ], [ 108, 112 ], [ 114, 116 ], [ 124, 136 ], [ 142, 144 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'A woman raising and moving her right hand while talking'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/PGDbwm6X-Ag_510.0_660.0.mp4
150
[ [ 118, 132 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'A woman talks about animal crackers' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/PGDbwm6X-Ag_660.0_810.0.mp4
150
[ [ 20, 40 ], [ 42, 50 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A woman shows a sturdy-looking $5 all-rubber water bottle cleaner she really needs for thermoses' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/TH4s50qr1Jo_60.0_210.0.mp4
132
[ [ 36, 68 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Water is flooding covering the entire street and several feet in height of water is sitting up the length of the signs and traffic signals' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/WGyD9NG47Rw_60.0_210.0.mp4
150
[ [ 40, 150 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A news broadcast talks to a Florida mayor during a 2020 hurricane' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/OG0ZEQamfGs_360.0_510.0.mp4
150
[ [ 0, 150 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Man in baseball cap is riding in a car at night' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/OG0ZEQamfGs_60.0_210.0.mp4
150
[ [ 74, 82 ], [ 84, 108 ], [ 110, 118 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'A man is pulling big stacks of cash out of his pack and putting them on the desk'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/OG0ZEQamfGs_210.0_360.0.mp4
150
[ [ 0, 150 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A man talks to the camera at night and makes hand gestures', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/OG0ZEQamfGs_510.0_660.0.mp4
150
[ [ 44, 76 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A black man is holding and showing off a thick stack of cash' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/OG0ZEQamfGs_660.0_810.0.mp4
150
[ [ 4, 20 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A guy with black top is holding stacks of dollars and talking at the same time' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/9gb6KJYFJiw_210.0_360.0.mp4
150
[ [ 0, 20 ], [ 40, 106 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Boris Johnson speaks at his desk with a UK flag to the right' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/9gb6KJYFJiw_360.0_510.0.mp4
150
[ [ 0, 14 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Women are white T shaped podiums give a speech which an interpretator works beside them'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/9gb6KJYFJiw_60.0_210.0.mp4
150
[ [ 58, 104 ], [ 108, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Boris Johnson speaks next to the UK flag' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_360.0_510.0.mp4
150
[ [ 66, 72 ], [ 76, 86 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A family is eating dinner together' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_360.0_510.0.mp4
150
[ [ 66, 86 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A family eats dinner together' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_210.0_360.0.mp4
150
[ [ 64, 106 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'An Indian couple feeding an adult woman wearing a dress' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_60.0_210.0.mp4
150
[ [ 0, 130 ], [ 134, 150 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Two women smile and talk to the camera' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_210.0_360.0.mp4
150
[ [ 64, 88 ], [ 90, 106 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A woman and a man touching another woman's forehead and feeding her something' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_60.0_210.0.mp4
150
[ [ 104, 128 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A woman in a red dress raising her right hand and showing three bracelets she is wearing' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_510.0_660.0.mp4
150
[ [ 112, 144 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A woman is showing an area which is decorated with blue and yellow balloons on the wall' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/eYTTYnFgKQc_660.0_810.0.mp4
150
[ [ 26, 44 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Woman lights and blows out candles on a cake', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Zg3euh8V70I_210.0_360.0.mp4
150
[ [ 28, 50 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A guy about to enjoy a beef kebab and excited about it' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Zg3euh8V70I_60.0_210.0.mp4
150
[ [ 72, 76 ], [ 78, 110 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Man and woman walk down a sidewalk together' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Zg3euh8V70I_360.0_510.0.mp4
150
[ [ 50, 88 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'The rear view of people riding in the car together', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Zg3euh8V70I_660.0_810.0.mp4
150
[ [ 42, 78 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'a woman orders and eats ramen' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/H0iSh9A7rLI_210.0_360.0.mp4
150
[ [ 56, 100 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A man in a blue and white adidas jacket is sitting on the couch relaxing with his friend' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/H0iSh9A7rLI_60.0_210.0.mp4
150
[ [ 76, 90 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'A man is seen looking in his closet to pick out some clothes'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/H0iSh9A7rLI_360.0_510.0.mp4
150
[ [ 20, 24 ], [ 30, 66 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A man in black jacket and a man in white hoodie are talking in front a camera' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/fqtHFwlPRCI_60.0_210.0.mp4
150
[ [ 108, 124 ], [ 126, 150 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Little boy enjoy cycling in the street' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/fqtHFwlPRCI_360.0_510.0.mp4
150
[ [ 72, 88 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Kid in blue top walks over a dead tree branch'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/fqtHFwlPRCI_210.0_360.0.mp4
150
[ [ 54, 92 ], [ 102, 108 ], [ 110, 124 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Vlogger is driving a motorcycle around a city' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/fqtHFwlPRCI_510.0_660.0.mp4
150
[ [ 78, 150 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'Two Friends are talking about the cancellation of their exams cancellation at the park' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/1crgYZDNnQc_60.0_210.0.mp4
150
[ [ 0, 12 ], [ 14, 18 ], [ 32, 42 ], [ 50, 54 ], [ 122, 124 ], [ 132, 136 ], [ 142, 148 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Blonde woman talks with a large photo wall behind her' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/1crgYZDNnQc_210.0_360.0.mp4
150
[ [ 36, 60 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A lady walking to her motor bike' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/1crgYZDNnQc_360.0_510.0.mp4
150
[ [ 40, 86 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A group of girls are sitting on the table while they talk in front of the camera' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/1crgYZDNnQc_510.0_660.0.mp4
150
[ [ 134, 148 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'A woman is picking up a candle, lighting it and putting it in the corner' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/1crgYZDNnQc_660.0_810.0.mp4
150
[ [ 0, 40 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A woman is looking over her laptop setup with a red candle beside it' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/VuNojQITwbw_210.0_360.0.mp4
150
[ [ 8, 150 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Footage of people walking through a street with no cars', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/VuNojQITwbw_60.0_210.0.mp4
150
[ [ 0, 32 ], [ 34, 66 ], [ 76, 82 ], [ 86, 132 ], [ 140, 146 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'People walk by the same street' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/VuNojQITwbw_360.0_510.0.mp4
150
[ [ 0, 24 ], [ 34, 128 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Vlogger walks down the street to a castle'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/VuNojQITwbw_510.0_660.0.mp4
150
[ [ 22, 50 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'Statues of people are on the outside of a building' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/VuNojQITwbw_660.0_810.0.mp4
150
[ [ 132, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'View walking up to a cathedral' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Pkk_CsWb-PI_60.0_210.0.mp4
150
[ [ 0, 24 ], [ 30, 36 ], [ 42, 60 ], [ 66, 82 ], [ 104, 116 ], [ 120, 136 ], [ 142, 150 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A woman with dark curly hair sits on a white sofa and talks' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Pkk_CsWb-PI_210.0_360.0.mp4
150
[ [ 42, 56 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A woman is demonstrating how to search for something online and the correct page' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Pkk_CsWb-PI_360.0_510.0.mp4
126
[ [ 24, 80 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A woman explains a tip on understanding why people watch the videos you are making', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/tiSNrKUDE4A_60.0_210.0.mp4
150
[ [ 38, 58 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Two masked women are having a conversation' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/tiSNrKUDE4A_210.0_360.0.mp4
150
[ [ 0, 2 ], [ 8, 10 ], [ 14, 16 ], [ 30, 32 ], [ 40, 42 ], [ 44, 46 ], [ 50, 52 ], [ 62, 64 ], [ 74, 82 ], [ 90, 92 ], [ 98, 100 ], [ 104, 106 ], [ 112, 114 ], [ 116, 118 ], [ 126, 128 ], [ 130, 136 ], [ 140, 146 ], [ 148, 150 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Most memorable day of my life with my parents' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/tiSNrKUDE4A_360.0_510.0.mp4
150
[ [ 36, 80 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A woman sits at a heater to warm herself before a simple but superb dinner is served', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/8sSsKMqFbmw_210.0_360.0.mp4
150
[ [ 64, 114 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A young woman recording herself talking while driving her car' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/8sSsKMqFbmw_360.0_510.0.mp4
150
[ [ 34, 46 ], [ 48, 50 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Woman wears a fur lined parka outside' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/8sSsKMqFbmw_60.0_210.0.mp4
150
[ [ 28, 34 ], [ 36, 44 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Woman looks at her phone' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/AFxCO_DyzYM_360.0_510.0.mp4
150
[ [ 0, 6 ], [ 26, 54 ], [ 90, 108 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Two guys singing and dancing inside a car', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/AFxCO_DyzYM_210.0_360.0.mp4
150
[ [ 0, 40 ], [ 42, 52 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Two men wearing hats sing in the car together' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/AFxCO_DyzYM_60.0_210.0.mp4
150
[ [ 86, 94 ], [ 96, 104 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Men are wearing top hats in the car' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/AFxCO_DyzYM_510.0_660.0.mp4
150
[ [ 0, 14 ], [ 22, 24 ], [ 42, 58 ], [ 60, 104 ], [ 144, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Bruno Mars and James Corden sing in the car' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/AFxCO_DyzYM_660.0_810.0.mp4
150
[ [ 14, 98 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Two guys inside a SUV singing out loud and dancing' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/31ZUXr-pLiw_210.0_360.0.mp4
150
[ [ 78, 104 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Two men are equally in frame in front of a bookcase', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/31ZUXr-pLiw_60.0_210.0.mp4
150
[ [ 86, 100 ], [ 104, 110 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Man in black talks to camera in a grass field' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/31ZUXr-pLiw_360.0_510.0.mp4
150
[ [ 132, 150 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Full parliament is in season', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Dn7gWOPeUu8_360.0_510.0.mp4
150
[ [ 56, 118 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A woman is showing off the historic route 66 landmark store' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Dn7gWOPeUu8_210.0_360.0.mp4
150
[ [ 38, 54 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Woman holds out a sandwich and eats it' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/Dn7gWOPeUu8_60.0_210.0.mp4
150
[ [ 0, 6 ], [ 40, 48 ], [ 54, 64 ], [ 70, 76 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Van talks to camera from a van' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/NkRKL8IsbnY_60.0_210.0.mp4
150
[ [ 18, 48 ], [ 56, 62 ], [ 66, 68 ], [ 72, 76 ], [ 78, 84 ], [ 106, 118 ], [ 126, 130 ], [ 140, 148 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Woman monologues in a pink top', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/NkRKL8IsbnY_210.0_360.0.mp4
150
[ [ 34, 56 ], [ 66, 68 ], [ 70, 72 ], [ 80, 82 ], [ 116, 126 ], [ 130, 136 ], [ 142, 146 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Tourist on the roads of Delhi India'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/NkRKL8IsbnY_360.0_510.0.mp4
150
[ [ 100, 132 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'The woman is talking in the back seat of a moving vehicle', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/ZDbNe3mS0aw_60.0_210.0.mp4
150
[ [ 16, 20 ], [ 28, 36 ], [ 38, 42 ], [ 48, 60 ], [ 72, 78 ], [ 90, 102 ], [ 108, 130 ], [ 146, 150 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A man rides around a city street on a bike doing stunts and tricks', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/ZDbNe3mS0aw_210.0_360.0.mp4
150
[ [ 0, 2 ], [ 4, 6 ], [ 10, 12 ], [ 28, 36 ], [ 40, 44 ], [ 52, 60 ], [ 66, 74 ], [ 76, 78 ], [ 86, 92 ], [ 96, 98 ], [ 136, 138 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A cyclist performing difficult cycle stunts' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/ZDbNe3mS0aw_360.0_510.0.mp4
144
[ [ 112, 120 ], [ 124, 142 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A man is performing flips and jumps on a bike down a lush green hill', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YZcJMw1XQTU_210.0_360.0.mp4
150
[ [ 30, 84 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Young African American couple taking precaution before on boarding' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YZcJMw1XQTU_60.0_210.0.mp4
150
[ [ 0, 62 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'Woman in white top monologues while driving' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YZcJMw1XQTU_360.0_510.0.mp4
150
[ [ 0, 22 ], [ 36, 78 ], [ 80, 118 ], [ 120, 130 ], [ 144, 150 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'A woman in black hoodie and wearing black mask is talking in front of her camera'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YNW1L-hQq1Q_210.0_360.0.mp4
150
[ [ 4, 6 ], [ 16, 18 ], [ 24, 26 ], [ 30, 32 ], [ 40, 42 ], [ 46, 48 ], [ 66, 68 ], [ 72, 74 ], [ 78, 80 ], [ 82, 90 ], [ 92, 94 ], [ 96, 98 ], [ 100, 102 ], [ 108, 110 ], [ 114, 116 ], [ 120, 122 ], [ 126, 128 ], [ 134, 136 ], [ 140, 142 ], [ 146, 148 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Travel with me India VLOG', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YNW1L-hQq1Q_60.0_210.0.mp4
150
[ [ 20, 48 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'man making vlog in dubai outside hospital wearing mask during lockdown' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/YNW1L-hQq1Q_360.0_510.0.mp4
150
[ [ 72, 100 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'A man talks up close to the camera with his mask on', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/yV8rE2z20ok_360.0_510.0.mp4
150
[ [ 0, 32 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A woman talks to camera about ice cream and shows some off' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/yV8rE2z20ok_210.0_360.0.mp4
150
[ [ 2, 12 ], [ 30, 36 ], [ 60, 68 ], [ 80, 100 ], [ 148, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Blonde woman vlogs while walking down the street,' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/yV8rE2z20ok_60.0_210.0.mp4
150
[ [ 0, 60 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A blonde woman is getting pictures in front of an area decorated with various pink objects' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/GxLn7kFtTwA_60.0_210.0.mp4
150
[ [ 0, 34 ], [ 38, 74 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'A woman in a red dress holding a camera and talking in front of a brick wall' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/GxLn7kFtTwA_210.0_360.0.mp4
150
[ [ 96, 100 ], [ 108, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Woman in gray hoodie is carrying a mic' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/qT6BJ4RS1BM_60.0_210.0.mp4
150
[ [ 114, 128 ], [ 132, 136 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A news anchor is talking to a reporter at a remote location through a screen from a curved wooden chair' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/qT6BJ4RS1BM_360.0_510.0.mp4
150
[ [ 38, 46 ], [ 64, 72 ], [ 108, 118 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'Woman in red blazer is talking on a tv during a interview' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/qT6BJ4RS1BM_210.0_360.0.mp4
150
[ [ 36, 50 ], [ 102, 108 ], [ 146, 150 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'Woman in red top is shownn on the tv during a interview' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/a1q_1DJDUak_210.0_360.0.mp4
150
[ [ 122, 150 ] ]
[ { "from": "human", "value": "<image>\nLocalize the visual content described by the given textual query 'A couple seated outdoors order a raw meat dish and pasta then proceed to eat' in the video, and output the start and end timestamps in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/a1q_1DJDUak_360.0_510.0.mp4
150
[ [ 112, 142 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Two people, including a man in a white t-shirt, have lunch in St Mark's square in Venice', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/a1q_1DJDUak_60.0_210.0.mp4
150
[ [ 112, 126 ] ]
[ { "from": "human", "value": "<image>\nGiven a textual query: 'Vlogger shows off the view from their hotel room by the canal', when does the described content occur in the video? Please return the timestamp in seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/a1q_1DJDUak_510.0_660.0.mp4
150
[ [ 0, 18 ] ]
[ { "from": "human", "value": "<image>\nFind the video segment that corresponds to the given textual query 'Woman in black dress rides in a gondola alone' and determine its start and end seconds.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/SsjijAXVkBE_60.0_210.0.mp4
150
[ [ 24, 58 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A walking view of shops and a plane in a airport' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/SsjijAXVkBE_210.0_360.0.mp4
150
[ [ 110, 150 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'A woman vlogs to show the items she bought from a store' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/SsjijAXVkBE_360.0_510.0.mp4
150
[ [ 62, 68 ], [ 84, 92 ], [ 98, 108 ] ]
[ { "from": "human", "value": "<image>\nLocate the visual content mentioned in the text query 'A woman wearing a black mask is holding a phone and talking as she walks' within the video using timestamps.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/5LPS7E-0tuA_360.0_510.0.mp4
150
[ [ 38, 62 ], [ 72, 84 ] ]
[ { "from": "human", "value": "<image>\nThe given natural language query 'People are walking down the street in slow motion' is semantically aligned with a video moment, please give the start time and end time of the video moment.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]
tvg
qvhighlights
qvh/videos/5LPS7E-0tuA_210.0_360.0.mp4
150
[ [ 0, 20 ] ]
[ { "from": "human", "value": "<image>\nDetect and report the start and end timestamps of the video segment that semantically matches the given textual query 'Many people are walking down a busy city street beside high rise buidlings'.The format of your response should be: 'The event happens in <start time> - <end time>''." }, { "from": "gpt", "value": "The event happens in <vid> - <vid>." } ]