/Users/buildslave/jenkins/workspace/coverage/llvm-project/lldb/source/Plugins/ObjectContainer/BSD-Archive/ObjectContainerBSDArchive.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- ObjectContainerBSDArchive.cpp -------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "ObjectContainerBSDArchive.h" |
10 | | |
11 | | #if defined(_WIN32) || defined(__ANDROID__) |
12 | | // Defines from ar, missing on Windows |
13 | | #define SARMAG 8 |
14 | | #define ARFMAG "`\n" |
15 | | |
16 | | typedef struct ar_hdr { |
17 | | char ar_name[16]; |
18 | | char ar_date[12]; |
19 | | char ar_uid[6], ar_gid[6]; |
20 | | char ar_mode[8]; |
21 | | char ar_size[10]; |
22 | | char ar_fmag[2]; |
23 | | } ar_hdr; |
24 | | #else |
25 | | #include <ar.h> |
26 | | #endif |
27 | | |
28 | | #include "lldb/Core/Module.h" |
29 | | #include "lldb/Core/ModuleSpec.h" |
30 | | #include "lldb/Core/PluginManager.h" |
31 | | #include "lldb/Host/FileSystem.h" |
32 | | #include "lldb/Symbol/ObjectFile.h" |
33 | | #include "lldb/Utility/ArchSpec.h" |
34 | | #include "lldb/Utility/LLDBLog.h" |
35 | | #include "lldb/Utility/Stream.h" |
36 | | #include "lldb/Utility/Timer.h" |
37 | | |
38 | | #include "llvm/Object/Archive.h" |
39 | | #include "llvm/Support/MemoryBuffer.h" |
40 | | |
41 | | using namespace lldb; |
42 | | using namespace lldb_private; |
43 | | |
44 | | using namespace llvm::object; |
45 | | |
46 | | LLDB_PLUGIN_DEFINE(ObjectContainerBSDArchive) |
47 | | |
48 | 12 | ObjectContainerBSDArchive::Object::Object() : ar_name() {} |
49 | | |
50 | 92 | void ObjectContainerBSDArchive::Object::Clear() { |
51 | 92 | ar_name.Clear(); |
52 | 92 | modification_time = 0; |
53 | 92 | size = 0; |
54 | 92 | file_offset = 0; |
55 | 92 | file_size = 0; |
56 | 92 | } |
57 | | |
58 | 0 | void ObjectContainerBSDArchive::Object::Dump() const { |
59 | 0 | printf("name = \"%s\"\n", ar_name.GetCString()); |
60 | 0 | printf("mtime = 0x%8.8" PRIx32 "\n", modification_time); |
61 | 0 | printf("size = 0x%8.8" PRIx32 " (%" PRIu32 ")\n", size, size); |
62 | 0 | printf("file_offset = 0x%16.16" PRIx64 " (%" PRIu64 ")\n", file_offset, |
63 | 0 | file_offset); |
64 | 0 | printf("file_size = 0x%16.16" PRIx64 " (%" PRIu64 ")\n\n", file_size, |
65 | 0 | file_size); |
66 | 0 | } |
67 | | |
68 | | ObjectContainerBSDArchive::Archive::Archive(const lldb_private::ArchSpec &arch, |
69 | | const llvm::sys::TimePoint<> &time, |
70 | | lldb::offset_t file_offset, |
71 | | lldb_private::DataExtractor &data, |
72 | | ArchiveType archive_type) |
73 | 12 | : m_arch(arch), m_modification_time(time), m_file_offset(file_offset), |
74 | 12 | m_objects(), m_data(data), m_archive_type(archive_type) {} |
75 | | |
76 | | Log *l = GetLog(LLDBLog::Object); |
77 | 12 | ObjectContainerBSDArchive::Archive::~Archive() = default; |
78 | | |
79 | 12 | size_t ObjectContainerBSDArchive::Archive::ParseObjects() { |
80 | 12 | DataExtractor &data = m_data; |
81 | | |
82 | 12 | std::unique_ptr<llvm::MemoryBuffer> mem_buffer = |
83 | 12 | llvm::MemoryBuffer::getMemBuffer( |
84 | 12 | llvm::StringRef((const char *)data.GetDataStart(), |
85 | 12 | data.GetByteSize()), |
86 | 12 | llvm::StringRef(), |
87 | 12 | /*RequiresNullTerminator=*/false); |
88 | | |
89 | 12 | auto exp_ar = llvm::object::Archive::create(mem_buffer->getMemBufferRef()); |
90 | 12 | if (!exp_ar) { |
91 | 0 | LLDB_LOG_ERROR(l, exp_ar.takeError(), "failed to create archive: {0}"); |
92 | 0 | return 0; |
93 | 0 | } |
94 | 12 | auto llvm_archive = std::move(exp_ar.get()); |
95 | | |
96 | 12 | llvm::Error iter_err = llvm::Error::success(); |
97 | 12 | Object obj; |
98 | 92 | for (const auto &child: llvm_archive->children(iter_err)) { |
99 | 92 | obj.Clear(); |
100 | 92 | auto exp_name = child.getName(); |
101 | 92 | if (exp_name) { |
102 | 92 | obj.ar_name = ConstString(exp_name.get()); |
103 | 92 | } else { |
104 | 0 | LLDB_LOG_ERROR(l, exp_name.takeError(), |
105 | 0 | "failed to get archive object name: {0}"); |
106 | 0 | continue; |
107 | 0 | } |
108 | | |
109 | 92 | auto exp_mtime = child.getLastModified(); |
110 | 92 | if (exp_mtime) { |
111 | 92 | obj.modification_time = |
112 | 92 | std::chrono::duration_cast<std::chrono::seconds>( |
113 | 92 | std::chrono::time_point_cast<std::chrono::seconds>( |
114 | 92 | exp_mtime.get()).time_since_epoch()).count(); |
115 | 92 | } else { |
116 | 0 | LLDB_LOG_ERROR(l, exp_mtime.takeError(), |
117 | 0 | "failed to get archive object time: {0}"); |
118 | 0 | continue; |
119 | 0 | } |
120 | | |
121 | 92 | auto exp_size = child.getRawSize(); |
122 | 92 | if (exp_size) { |
123 | 92 | obj.size = exp_size.get(); |
124 | 92 | } else { |
125 | 0 | LLDB_LOG_ERROR(l, exp_size.takeError(), |
126 | 0 | "failed to get archive object size: {0}"); |
127 | 0 | continue; |
128 | 0 | } |
129 | | |
130 | 92 | obj.file_offset = child.getDataOffset(); |
131 | | |
132 | 92 | auto exp_file_size = child.getSize(); |
133 | 92 | if (exp_file_size) { |
134 | 92 | obj.file_size = exp_file_size.get(); |
135 | 92 | } else { |
136 | 0 | LLDB_LOG_ERROR(l, exp_file_size.takeError(), |
137 | 0 | "failed to get archive object file size: {0}"); |
138 | 0 | continue; |
139 | 0 | } |
140 | 92 | m_object_name_to_index_map.Append(obj.ar_name, m_objects.size()); |
141 | 92 | m_objects.push_back(obj); |
142 | 92 | } |
143 | 12 | if (iter_err) { |
144 | 0 | LLDB_LOG_ERROR(l, std::move(iter_err), |
145 | 0 | "failed to iterate over archive objects: {0}"); |
146 | 0 | } |
147 | | // Now sort all of the object name pointers |
148 | 12 | m_object_name_to_index_map.Sort(); |
149 | 12 | return m_objects.size(); |
150 | 12 | } |
151 | | |
152 | | ObjectContainerBSDArchive::Object * |
153 | | ObjectContainerBSDArchive::Archive::FindObject( |
154 | 59 | ConstString object_name, const llvm::sys::TimePoint<> &object_mod_time) { |
155 | 59 | const ObjectNameToIndexMap::Entry *match = |
156 | 59 | m_object_name_to_index_map.FindFirstValueForName(object_name); |
157 | 59 | if (!match) |
158 | 0 | return nullptr; |
159 | 59 | if (object_mod_time == llvm::sys::TimePoint<>()) |
160 | 0 | return &m_objects[match->value]; |
161 | | |
162 | 59 | const uint64_t object_modification_date = llvm::sys::toTimeT(object_mod_time); |
163 | 59 | if (m_objects[match->value].modification_time == object_modification_date) |
164 | 56 | return &m_objects[match->value]; |
165 | | |
166 | 3 | const ObjectNameToIndexMap::Entry *next_match = |
167 | 3 | m_object_name_to_index_map.FindNextValueForName(match); |
168 | 3 | while (next_match) { |
169 | 2 | if (m_objects[next_match->value].modification_time == |
170 | 2 | object_modification_date) |
171 | 2 | return &m_objects[next_match->value]; |
172 | 0 | next_match = m_object_name_to_index_map.FindNextValueForName(next_match); |
173 | 0 | } |
174 | | |
175 | 1 | return nullptr; |
176 | 3 | } |
177 | | |
178 | | ObjectContainerBSDArchive::Archive::shared_ptr |
179 | | ObjectContainerBSDArchive::Archive::FindCachedArchive( |
180 | | const FileSpec &file, const ArchSpec &arch, |
181 | 122 | const llvm::sys::TimePoint<> &time, lldb::offset_t file_offset) { |
182 | 122 | std::lock_guard<std::recursive_mutex> guard(Archive::GetArchiveCacheMutex()); |
183 | 122 | shared_ptr archive_sp; |
184 | 122 | Archive::Map &archive_map = Archive::GetArchiveCache(); |
185 | 122 | Archive::Map::iterator pos = archive_map.find(file); |
186 | | // Don't cache a value for "archive_map.end()" below since we might delete an |
187 | | // archive entry... |
188 | 166 | while (pos != archive_map.end() && pos->first == file94 ) { |
189 | 94 | bool match = true; |
190 | 94 | if (arch.IsValid() && |
191 | 94 | !pos->second->GetArchitecture().IsCompatibleMatch(arch)) |
192 | 0 | match = false; |
193 | 94 | else if (file_offset != LLDB_INVALID_OFFSET && |
194 | 94 | pos->second->GetFileOffset() != file_offset) |
195 | 44 | match = false; |
196 | 94 | if (match) { |
197 | 50 | if (pos->second->GetModificationTime() == time) { |
198 | 50 | return pos->second; |
199 | 50 | } else { |
200 | | // We have a file at the same path with the same architecture whose |
201 | | // modification time doesn't match. It doesn't make sense for us to |
202 | | // continue to use this BSD archive since we cache only the object info |
203 | | // which consists of file time info and also the file offset and file |
204 | | // size of any contained objects. Since this information is now out of |
205 | | // date, we won't get the correct information if we go and extract the |
206 | | // file data, so we should remove the old and outdated entry. |
207 | 0 | archive_map.erase(pos); |
208 | 0 | pos = archive_map.find(file); |
209 | 0 | continue; // Continue to next iteration so we don't increment pos |
210 | | // below... |
211 | 0 | } |
212 | 50 | } |
213 | 44 | ++pos; |
214 | 44 | } |
215 | 72 | return archive_sp; |
216 | 122 | } |
217 | | |
218 | | ObjectContainerBSDArchive::Archive::shared_ptr |
219 | | ObjectContainerBSDArchive::Archive::ParseAndCacheArchiveForFile( |
220 | | const FileSpec &file, const ArchSpec &arch, |
221 | | const llvm::sys::TimePoint<> &time, lldb::offset_t file_offset, |
222 | 12 | DataExtractor &data, ArchiveType archive_type) { |
223 | 12 | shared_ptr archive_sp( |
224 | 12 | new Archive(arch, time, file_offset, data, archive_type)); |
225 | 12 | if (archive_sp) { |
226 | 12 | const size_t num_objects = archive_sp->ParseObjects(); |
227 | 12 | if (num_objects > 0) { |
228 | 12 | std::lock_guard<std::recursive_mutex> guard( |
229 | 12 | Archive::GetArchiveCacheMutex()); |
230 | 12 | Archive::GetArchiveCache().insert(std::make_pair(file, archive_sp)); |
231 | 12 | } else { |
232 | 0 | archive_sp.reset(); |
233 | 0 | } |
234 | 12 | } |
235 | 12 | return archive_sp; |
236 | 12 | } |
237 | | |
238 | | ObjectContainerBSDArchive::Archive::Map & |
239 | 134 | ObjectContainerBSDArchive::Archive::GetArchiveCache() { |
240 | 134 | static Archive::Map g_archive_map; |
241 | 134 | return g_archive_map; |
242 | 134 | } |
243 | | |
244 | | std::recursive_mutex & |
245 | 134 | ObjectContainerBSDArchive::Archive::GetArchiveCacheMutex() { |
246 | 134 | static std::recursive_mutex g_archive_map_mutex; |
247 | 134 | return g_archive_map_mutex; |
248 | 134 | } |
249 | | |
250 | 3.92k | void ObjectContainerBSDArchive::Initialize() { |
251 | 3.92k | PluginManager::RegisterPlugin(GetPluginNameStatic(), |
252 | 3.92k | GetPluginDescriptionStatic(), CreateInstance, |
253 | 3.92k | GetModuleSpecifications); |
254 | 3.92k | } |
255 | | |
256 | 3.92k | void ObjectContainerBSDArchive::Terminate() { |
257 | 3.92k | PluginManager::UnregisterPlugin(CreateInstance); |
258 | 3.92k | } |
259 | | |
260 | | ObjectContainer *ObjectContainerBSDArchive::CreateInstance( |
261 | | const lldb::ModuleSP &module_sp, DataBufferSP &data_sp, |
262 | | lldb::offset_t data_offset, const FileSpec *file, |
263 | 170 | lldb::offset_t file_offset, lldb::offset_t length) { |
264 | 170 | ConstString object_name(module_sp->GetObjectName()); |
265 | 170 | if (!object_name) |
266 | 3 | return nullptr; |
267 | | |
268 | 167 | if (data_sp) { |
269 | | // We have data, which means this is the first 512 bytes of the file Check |
270 | | // to see if the magic bytes match and if they do, read the entire table of |
271 | | // contents for the archive and cache it |
272 | 57 | DataExtractor data; |
273 | 57 | data.SetData(data_sp, data_offset, length); |
274 | 57 | ArchiveType archive_type = ObjectContainerBSDArchive::MagicBytesMatch(data); |
275 | 57 | if (file && data_sp && archive_type != ArchiveType::Invalid) { |
276 | 9 | LLDB_SCOPED_TIMERF( |
277 | 9 | "ObjectContainerBSDArchive::CreateInstance (module = %s, file = " |
278 | 9 | "%p, file_offset = 0x%8.8" PRIx64 ", file_size = 0x%8.8" PRIx64 ")", |
279 | 9 | module_sp->GetFileSpec().GetPath().c_str(), |
280 | 9 | static_cast<const void *>(file), static_cast<uint64_t>(file_offset), |
281 | 9 | static_cast<uint64_t>(length)); |
282 | | |
283 | | // Map the entire .a file to be sure that we don't lose any data if the |
284 | | // file gets updated by a new build while this .a file is being used for |
285 | | // debugging |
286 | 9 | DataBufferSP archive_data_sp = |
287 | 9 | FileSystem::Instance().CreateDataBuffer(*file, length, file_offset); |
288 | 9 | if (!archive_data_sp) |
289 | 0 | return nullptr; |
290 | | |
291 | 9 | lldb::offset_t archive_data_offset = 0; |
292 | | |
293 | 9 | Archive::shared_ptr archive_sp(Archive::FindCachedArchive( |
294 | 9 | *file, module_sp->GetArchitecture(), module_sp->GetModificationTime(), |
295 | 9 | file_offset)); |
296 | 9 | std::unique_ptr<ObjectContainerBSDArchive> container_up( |
297 | 9 | new ObjectContainerBSDArchive(module_sp, archive_data_sp, |
298 | 9 | archive_data_offset, file, file_offset, |
299 | 9 | length, archive_type)); |
300 | | |
301 | 9 | if (container_up) { |
302 | 9 | if (archive_sp) { |
303 | | // We already have this archive in our cache, use it |
304 | 0 | container_up->SetArchive(archive_sp); |
305 | 0 | return container_up.release(); |
306 | 9 | } else if (container_up->ParseHeader()) |
307 | 9 | return container_up.release(); |
308 | 9 | } |
309 | 9 | } |
310 | 110 | } else { |
311 | | // No data, just check for a cached archive |
312 | 110 | Archive::shared_ptr archive_sp(Archive::FindCachedArchive( |
313 | 110 | *file, module_sp->GetArchitecture(), module_sp->GetModificationTime(), |
314 | 110 | file_offset)); |
315 | 110 | if (archive_sp) { |
316 | 50 | std::unique_ptr<ObjectContainerBSDArchive> container_up( |
317 | 50 | new ObjectContainerBSDArchive(module_sp, data_sp, data_offset, file, |
318 | 50 | file_offset, length, |
319 | 50 | archive_sp->GetArchiveType())); |
320 | | |
321 | 50 | if (container_up) { |
322 | | // We already have this archive in our cache, use it |
323 | 50 | container_up->SetArchive(archive_sp); |
324 | 50 | return container_up.release(); |
325 | 50 | } |
326 | 50 | } |
327 | 110 | } |
328 | 108 | return nullptr; |
329 | 167 | } |
330 | | |
331 | | ArchiveType |
332 | 2.33k | ObjectContainerBSDArchive::MagicBytesMatch(const DataExtractor &data) { |
333 | 2.33k | uint32_t offset = 0; |
334 | 2.33k | const char *armag = (const char *)data.PeekData(offset, |
335 | 2.33k | sizeof(ar_hdr) + SARMAG); |
336 | 2.33k | if (armag == nullptr) |
337 | 0 | return ArchiveType::Invalid; |
338 | 2.33k | ArchiveType result = ArchiveType::Invalid; |
339 | 2.33k | if (strncmp(armag, ArchiveMagic, SARMAG) == 0) |
340 | 9 | result = ArchiveType::Archive; |
341 | 2.32k | else if (strncmp(armag, ThinArchiveMagic, SARMAG) == 0) |
342 | 3 | result = ArchiveType::ThinArchive; |
343 | 2.31k | else |
344 | 2.31k | return ArchiveType::Invalid; |
345 | | |
346 | 12 | armag += offsetof(struct ar_hdr, ar_fmag) + SARMAG; |
347 | 12 | if (strncmp(armag, ARFMAG, 2) == 0) |
348 | 12 | return result; |
349 | 0 | return ArchiveType::Invalid; |
350 | 12 | } |
351 | | |
352 | | ObjectContainerBSDArchive::ObjectContainerBSDArchive( |
353 | | const lldb::ModuleSP &module_sp, DataBufferSP &data_sp, |
354 | | lldb::offset_t data_offset, const lldb_private::FileSpec *file, |
355 | | lldb::offset_t file_offset, lldb::offset_t size, ArchiveType archive_type) |
356 | 59 | : ObjectContainer(module_sp, file, file_offset, size, data_sp, data_offset), |
357 | 59 | m_archive_sp() { |
358 | 59 | m_archive_type = archive_type; |
359 | 59 | } |
360 | | |
361 | 50 | void ObjectContainerBSDArchive::SetArchive(Archive::shared_ptr &archive_sp) { |
362 | 50 | m_archive_sp = archive_sp; |
363 | 50 | } |
364 | | |
365 | 59 | ObjectContainerBSDArchive::~ObjectContainerBSDArchive() = default; |
366 | | |
367 | 9 | bool ObjectContainerBSDArchive::ParseHeader() { |
368 | 9 | if (m_archive_sp.get() == nullptr) { |
369 | 9 | if (m_data.GetByteSize() > 0) { |
370 | 9 | ModuleSP module_sp(GetModule()); |
371 | 9 | if (module_sp) { |
372 | 9 | m_archive_sp = Archive::ParseAndCacheArchiveForFile( |
373 | 9 | m_file, module_sp->GetArchitecture(), |
374 | 9 | module_sp->GetModificationTime(), m_offset, m_data, m_archive_type); |
375 | 9 | } |
376 | | // Clear the m_data that contains the entire archive data and let our |
377 | | // m_archive_sp hold onto the data. |
378 | 9 | m_data.Clear(); |
379 | 9 | } |
380 | 9 | } |
381 | 9 | return m_archive_sp.get() != nullptr; |
382 | 9 | } |
383 | | |
384 | | FileSpec GetChildFileSpecificationsFromThin(llvm::StringRef childPath, |
385 | 5 | const FileSpec &parentFileSpec) { |
386 | 5 | llvm::SmallString<128> FullPath; |
387 | 5 | if (llvm::sys::path::is_absolute(childPath)) { |
388 | 0 | FullPath = childPath; |
389 | 5 | } else { |
390 | 5 | FullPath = parentFileSpec.GetDirectory().GetStringRef(); |
391 | 5 | llvm::sys::path::append(FullPath, childPath); |
392 | 5 | } |
393 | 5 | FileSpec child = FileSpec(FullPath.str(), llvm::sys::path::Style::posix); |
394 | 5 | return child; |
395 | 5 | } |
396 | | |
397 | 59 | ObjectFileSP ObjectContainerBSDArchive::GetObjectFile(const FileSpec *file) { |
398 | 59 | ModuleSP module_sp(GetModule()); |
399 | 59 | if (module_sp) { |
400 | 59 | if (module_sp->GetObjectName() && m_archive_sp) { |
401 | 59 | Object *object = m_archive_sp->FindObject( |
402 | 59 | module_sp->GetObjectName(), module_sp->GetObjectModificationTime()); |
403 | 59 | if (object) { |
404 | 58 | if (m_archive_type == ArchiveType::ThinArchive) { |
405 | | // Set file to child object file |
406 | 2 | FileSpec child = GetChildFileSpecificationsFromThin( |
407 | 2 | object->ar_name.GetStringRef(), m_file); |
408 | 2 | lldb::offset_t file_offset = 0; |
409 | 2 | lldb::offset_t file_size = object->size; |
410 | 2 | std::shared_ptr<DataBuffer> child_data_sp = |
411 | 2 | FileSystem::Instance().CreateDataBuffer(child, file_size, |
412 | 2 | file_offset); |
413 | 2 | if (!child_data_sp || |
414 | 2 | child_data_sp->GetByteSize() != object->file_size1 ) |
415 | 1 | return ObjectFileSP(); |
416 | 1 | lldb::offset_t data_offset = 0; |
417 | 1 | return ObjectFile::FindPlugin( |
418 | 1 | module_sp, &child, m_offset + object->file_offset, |
419 | 1 | object->file_size, child_data_sp, data_offset); |
420 | 2 | } |
421 | 56 | lldb::offset_t data_offset = object->file_offset; |
422 | 56 | return ObjectFile::FindPlugin( |
423 | 56 | module_sp, file, m_offset + object->file_offset, object->file_size, |
424 | 56 | m_archive_sp->GetData().GetSharedDataBuffer(), data_offset); |
425 | 58 | } |
426 | 59 | } |
427 | 59 | } |
428 | 1 | return ObjectFileSP(); |
429 | 59 | } |
430 | | |
431 | | size_t ObjectContainerBSDArchive::GetModuleSpecifications( |
432 | | const lldb_private::FileSpec &file, lldb::DataBufferSP &data_sp, |
433 | | lldb::offset_t data_offset, lldb::offset_t file_offset, |
434 | 2.27k | lldb::offset_t file_size, lldb_private::ModuleSpecList &specs) { |
435 | | |
436 | | // We have data, which means this is the first 512 bytes of the file Check to |
437 | | // see if the magic bytes match and if they do, read the entire table of |
438 | | // contents for the archive and cache it |
439 | 2.27k | DataExtractor data; |
440 | 2.27k | data.SetData(data_sp, data_offset, data_sp->GetByteSize()); |
441 | 2.27k | ArchiveType archive_type = ObjectContainerBSDArchive::MagicBytesMatch(data); |
442 | 2.27k | if (!file || !data_sp || archive_type == ArchiveType::Invalid) |
443 | 2.27k | return 0; |
444 | | |
445 | 3 | const size_t initial_count = specs.GetSize(); |
446 | 3 | llvm::sys::TimePoint<> file_mod_time = FileSystem::Instance().GetModificationTime(file); |
447 | 3 | Archive::shared_ptr archive_sp( |
448 | 3 | Archive::FindCachedArchive(file, ArchSpec(), file_mod_time, file_offset)); |
449 | 3 | bool set_archive_arch = false; |
450 | 3 | if (!archive_sp) { |
451 | 3 | set_archive_arch = true; |
452 | 3 | data_sp = |
453 | 3 | FileSystem::Instance().CreateDataBuffer(file, file_size, file_offset); |
454 | 3 | if (data_sp) { |
455 | 3 | data.SetData(data_sp, 0, data_sp->GetByteSize()); |
456 | 3 | archive_sp = Archive::ParseAndCacheArchiveForFile( |
457 | 3 | file, ArchSpec(), file_mod_time, file_offset, data, archive_type); |
458 | 3 | } |
459 | 3 | } |
460 | | |
461 | 3 | if (archive_sp) { |
462 | 3 | const size_t num_objects = archive_sp->GetNumObjects(); |
463 | 8 | for (size_t idx = 0; idx < num_objects; ++idx5 ) { |
464 | 5 | const Object *object = archive_sp->GetObjectAtIndex(idx); |
465 | 5 | if (object) { |
466 | 5 | if (archive_sp->GetArchiveType() == ArchiveType::ThinArchive) { |
467 | 3 | if (object->ar_name.IsEmpty()) |
468 | 0 | continue; |
469 | 3 | FileSpec child = GetChildFileSpecificationsFromThin( |
470 | 3 | object->ar_name.GetStringRef(), file); |
471 | 3 | if (ObjectFile::GetModuleSpecifications(child, 0, object->file_size, |
472 | 3 | specs)) { |
473 | 3 | ModuleSpec &spec = |
474 | 3 | specs.GetModuleSpecRefAtIndex(specs.GetSize() - 1); |
475 | 3 | llvm::sys::TimePoint<> object_mod_time( |
476 | 3 | std::chrono::seconds(object->modification_time)); |
477 | 3 | spec.GetObjectName() = object->ar_name; |
478 | 3 | spec.SetObjectOffset(0); |
479 | 3 | spec.SetObjectSize(object->file_size); |
480 | 3 | spec.GetObjectModificationTime() = object_mod_time; |
481 | 3 | } |
482 | 3 | continue; |
483 | 3 | } |
484 | 2 | const lldb::offset_t object_file_offset = |
485 | 2 | file_offset + object->file_offset; |
486 | 2 | if (object->file_offset < file_size && file_size > object_file_offset) { |
487 | 2 | if (ObjectFile::GetModuleSpecifications( |
488 | 2 | file, object_file_offset, file_size - object_file_offset, |
489 | 2 | specs)) { |
490 | 2 | ModuleSpec &spec = |
491 | 2 | specs.GetModuleSpecRefAtIndex(specs.GetSize() - 1); |
492 | 2 | llvm::sys::TimePoint<> object_mod_time( |
493 | 2 | std::chrono::seconds(object->modification_time)); |
494 | 2 | spec.GetObjectName() = object->ar_name; |
495 | 2 | spec.SetObjectOffset(object_file_offset); |
496 | 2 | spec.SetObjectSize(object->file_size); |
497 | 2 | spec.GetObjectModificationTime() = object_mod_time; |
498 | 2 | } |
499 | 2 | } |
500 | 2 | } |
501 | 5 | } |
502 | 3 | } |
503 | 3 | const size_t end_count = specs.GetSize(); |
504 | 3 | size_t num_specs_added = end_count - initial_count; |
505 | 3 | if (set_archive_arch && num_specs_added > 0) { |
506 | | // The archive was created but we didn't have an architecture so we need to |
507 | | // set it |
508 | 3 | for (size_t i = initial_count; i < end_count; ++i0 ) { |
509 | 3 | ModuleSpec module_spec; |
510 | 3 | if (specs.GetModuleSpecAtIndex(i, module_spec)) { |
511 | 3 | if (module_spec.GetArchitecture().IsValid()) { |
512 | 3 | archive_sp->SetArchitecture(module_spec.GetArchitecture()); |
513 | 3 | break; |
514 | 3 | } |
515 | 3 | } |
516 | 3 | } |
517 | 3 | } |
518 | 3 | return num_specs_added; |
519 | 2.27k | } |