/* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include "fuzzer/FuzzedDataProvider.h" using aidl::android::hardware::common::fmq::SynchronizedReadWrite; using aidl::android::hardware::common::fmq::UnsynchronizedWrite; using android::hardware::kSynchronizedReadWrite; using android::hardware::kUnsynchronizedWrite; typedef int32_t payload_t; // The reader/writers will wait during blocking calls static constexpr int kBlockingTimeoutNs = 100000; /* * MessageQueueBase.h contains asserts when memory allocation fails. So we need * to set a reasonable limit if we want to avoid those asserts. */ static constexpr size_t kAlignment = 8; static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1; /* * limit the custom grantor case to one page of memory. * If we want to increase this, we need to make sure that all of grantors offset * plus extent are less than the size of the page aligned ashmem region that is * created */ static constexpr size_t kMaxCustomGrantorMemoryBytes = PAGE_SIZE; /* * The read counter can be found in the shared memory 16 bytes before the start * of the ring buffer. */ static constexpr int kReadCounterOffsetBytes = 16; /* * The write counter can be found in the shared memory 8 bytes before the start * of the ring buffer. */ static constexpr int kWriteCounterOffsetBytes = 8; static constexpr int kMaxNumSyncReaders = 1; static constexpr int kMaxNumUnsyncReaders = 5; static constexpr int kMaxDataPerReader = 1000; typedef android::AidlMessageQueue AidlMessageQueueSync; typedef android::AidlMessageQueue AidlMessageQueueUnsync; typedef android::hardware::MessageQueue MessageQueueSync; typedef android::hardware::MessageQueue MessageQueueUnsync; typedef aidl::android::hardware::common::fmq::MQDescriptor AidlMQDescSync; typedef aidl::android::hardware::common::fmq::MQDescriptor AidlMQDescUnsync; typedef android::hardware::MQDescriptorSync MQDescSync; typedef android::hardware::MQDescriptorUnsync MQDescUnsync; // AIDL and HIDL have different ways of accessing the grantors template uint64_t* getCounterPtr(payload_t* start, const Desc& desc, int grantorIndx); uint64_t* createCounterPtr(payload_t* start, uint32_t offset, uint32_t data_offset) { // start is the address of the beginning of the FMQ data section in memory // offset is overall offset of the counter in the FMQ memory // data_offset is the overall offset of the data section in the FMQ memory // start - (data_offset) = beginning address of the FMQ memory return reinterpret_cast(reinterpret_cast(start) - data_offset + offset); } uint64_t* getCounterPtr(payload_t* start, const MQDescSync& desc, int grantorIndx) { uint32_t offset = desc.grantors()[grantorIndx].offset; uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset; return createCounterPtr(start, offset, data_offset); } uint64_t* getCounterPtr(payload_t* start, const MQDescUnsync& desc, int grantorIndx) { uint32_t offset = desc.grantors()[grantorIndx].offset; uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset; return createCounterPtr(start, offset, data_offset); } uint64_t* getCounterPtr(payload_t* start, const AidlMQDescSync& desc, int grantorIndx) { uint32_t offset = desc.grantors[grantorIndx].offset; uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset; return createCounterPtr(start, offset, data_offset); } uint64_t* getCounterPtr(payload_t* start, const AidlMQDescUnsync& desc, int grantorIndx) { uint32_t offset = desc.grantors[grantorIndx].offset; uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset; return createCounterPtr(start, offset, data_offset); } template void reader(const Desc& desc, std::vector readerData, bool userFd) { Queue readMq(desc); if (!readMq.isValid()) { LOG(ERROR) << "read mq invalid"; return; } FuzzedDataProvider fdp(&readerData[0], readerData.size()); payload_t* ring = reinterpret_cast(readMq.getRingBufferPtr()); while (fdp.remaining_bytes()) { typename Queue::MemTransaction tx; size_t numElements = fdp.ConsumeIntegralInRange(0, kMaxNumElements); if (!readMq.beginRead(numElements, &tx)) { continue; } const auto& region = tx.getFirstRegion(); payload_t* firstStart = region.getAddress(); // the ring buffer is only next to the read/write counters when there is // no user supplied fd if (!userFd) { if (fdp.ConsumeIntegral() == 1) { uint64_t* writeCounter = getCounterPtr(ring, desc, android::hardware::details::WRITEPTRPOS); *writeCounter = fdp.ConsumeIntegral(); } } (void)std::to_string(*firstStart); readMq.commitRead(numElements); } } template void readerBlocking(const Desc& desc, std::vector& readerData, std::atomic& readersNotFinished, std::atomic& writersNotFinished) { android::base::ScopeGuard guard([&readersNotFinished]() { readersNotFinished--; }); Queue readMq(desc); if (!readMq.isValid()) { LOG(ERROR) << "read mq invalid"; return; } FuzzedDataProvider fdp(&readerData[0], readerData.size()); do { size_t count = fdp.remaining_bytes() ? fdp.ConsumeIntegralInRange(0, readMq.getQuantumCount() + 1) : 1; std::vector data; data.resize(count); readMq.readBlocking(data.data(), count, kBlockingTimeoutNs); } while (fdp.remaining_bytes() > sizeof(size_t) && writersNotFinished > 0); } // Can't use blocking calls with Unsync queues(there is a static_assert) template <> void readerBlocking(const AidlMQDescUnsync&, std::vector&, std::atomic&, std::atomic&) {} template <> void readerBlocking(const MQDescUnsync&, std::vector&, std::atomic&, std::atomic&) {} template void writer(const Desc& desc, Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) { payload_t* ring = reinterpret_cast(writeMq.getRingBufferPtr()); while (fdp.remaining_bytes()) { typename Queue::MemTransaction tx; size_t numElements = 1; if (!writeMq.beginWrite(numElements, &tx)) { // need to consume something so we don't end up looping forever fdp.ConsumeIntegral(); continue; } const auto& region = tx.getFirstRegion(); payload_t* firstStart = region.getAddress(); // the ring buffer is only next to the read/write counters when there is // no user supplied fd if (!userFd) { if (fdp.ConsumeIntegral() == 1) { uint64_t* readCounter = getCounterPtr(ring, desc, android::hardware::details::READPTRPOS); *readCounter = fdp.ConsumeIntegral(); } } *firstStart = fdp.ConsumeIntegral(); writeMq.commitWrite(numElements); } } template void writerBlocking(Queue& writeMq, FuzzedDataProvider& fdp, std::atomic& writersNotFinished, std::atomic& readersNotFinished) { android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; }); while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) { size_t count = fdp.ConsumeIntegralInRange(0, writeMq.getQuantumCount() + 1); std::vector data; for (int i = 0; i < count; i++) { data.push_back(fdp.ConsumeIntegral()); } writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs); } } // Can't use blocking calls with Unsync queues(there is a static_assert) template <> void writerBlocking(AidlMessageQueueUnsync&, FuzzedDataProvider&, std::atomic&, std::atomic&) {} template <> void writerBlocking(MessageQueueUnsync&, FuzzedDataProvider&, std::atomic&, std::atomic&) {} template inline std::optional getDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp); template inline std::optional getAidlDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { if (queue) { // get the existing descriptor from the queue Desc desc = queue->dupeDesc(); if (desc.handle.fds[0].get() == -1) { return std::nullopt; } else { return std::make_optional(std::move(desc)); } } else { // create a custom descriptor std::vector grantors; size_t numGrantors = fdp.ConsumeIntegralInRange(0, 4); for (int i = 0; i < numGrantors; i++) { grantors.push_back({fdp.ConsumeIntegralInRange(-2, 2) /* fdIndex */, fdp.ConsumeIntegralInRange( 0, kMaxCustomGrantorMemoryBytes) /* offset */, fdp.ConsumeIntegralInRange( 0, kMaxCustomGrantorMemoryBytes) /* extent */}); // ashmem region is PAGE_SIZE and we need to make sure all of the // pointers and data region fit inside if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt; } android::base::unique_fd fd( ashmem_create_region("AidlCustomGrantors", kMaxCustomGrantorMemoryBytes)); ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); aidl::android::hardware::common::NativeHandle handle; handle.fds.emplace_back(fd.get()); return std::make_optional( {grantors, std::move(handle), sizeof(payload_t), fdp.ConsumeBool()}); } } template <> inline std::optional getDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { return getAidlDesc(queue, fdp); } template <> inline std::optional getDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { return getAidlDesc(queue, fdp); } template inline std::optional getHidlDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { if (queue) { auto desc = queue->getDesc(); if (!desc->isHandleValid()) { return std::nullopt; } else { return std::make_optional(std::move(*desc)); } } else { // create a custom descriptor std::vector grantors; size_t numGrantors = fdp.ConsumeIntegralInRange(0, 4); for (int i = 0; i < numGrantors; i++) { grantors.push_back({fdp.ConsumeIntegral() /* flags */, fdp.ConsumeIntegralInRange(0, 2) /* fdIndex */, fdp.ConsumeIntegralInRange( 0, kMaxCustomGrantorMemoryBytes) /* offset */, fdp.ConsumeIntegralInRange( 0, kMaxCustomGrantorMemoryBytes) /* extent */}); // ashmem region is PAGE_SIZE and we need to make sure all of the // pointers and data region fit inside if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt; } native_handle_t* handle = native_handle_create(1, 0); int ashmemFd = ashmem_create_region("HidlCustomGrantors", kMaxCustomGrantorMemoryBytes); ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE); handle->data[0] = ashmemFd; return std::make_optional(grantors, handle, sizeof(payload_t)); } } template <> inline std::optional getDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { return getHidlDesc(queue, fdp); } template <> inline std::optional getDesc(std::unique_ptr& queue, FuzzedDataProvider& fdp) { return getHidlDesc(queue, fdp); } template void fuzzWithReaders(std::vector& writerData, std::vector>& readerData, bool blocking) { FuzzedDataProvider fdp(&writerData[0], writerData.size()); bool evFlag = blocking || fdp.ConsumeBool(); size_t numElements = fdp.ConsumeIntegralInRange(1, kMaxNumElements); size_t bufferSize = numElements * sizeof(payload_t); bool userFd = fdp.ConsumeBool(); bool manualGrantors = fdp.ConsumeBool(); std::unique_ptr writeMq = nullptr; if (manualGrantors) { std::optional customDesc(getDesc(writeMq, fdp)); if (customDesc) { writeMq = std::make_unique(*customDesc); } } else { android::base::unique_fd dataFd; if (userFd) { // run test with our own data region dataFd.reset(::ashmem_create_region("CustomData", bufferSize)); } writeMq = std::make_unique(numElements, evFlag, std::move(dataFd), bufferSize); } if (writeMq == nullptr || !writeMq->isValid()) { return; } // get optional desc const std::optional desc(std::move(getDesc(writeMq, fdp))); CHECK(desc != std::nullopt); std::atomic readersNotFinished = readerData.size(); std::atomic writersNotFinished = 1; std::vector readers; for (int i = 0; i < readerData.size(); i++) { if (blocking) { readers.emplace_back(readerBlocking, std::ref(*desc), std::ref(readerData[i]), std::ref(readersNotFinished), std::ref(writersNotFinished)); } else { readers.emplace_back(reader, std::ref(*desc), std::ref(readerData[i]), userFd); } } if (blocking) { writerBlocking(*writeMq, fdp, writersNotFinished, readersNotFinished); } else { writer(*desc, *writeMq, fdp, userFd); } for (auto& reader : readers) { reader.join(); } } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { if (size < 1 || size > 50000) { return 0; } FuzzedDataProvider fdp(data, size); bool fuzzSync = fdp.ConsumeBool(); std::vector> readerData; uint8_t numReaders = fuzzSync ? fdp.ConsumeIntegralInRange(0, kMaxNumSyncReaders) : fdp.ConsumeIntegralInRange(0, kMaxNumUnsyncReaders); for (int i = 0; i < numReaders; i++) { readerData.emplace_back(fdp.ConsumeBytes(kMaxDataPerReader)); } bool fuzzBlocking = fdp.ConsumeBool(); std::vector writerData = fdp.ConsumeRemainingBytes(); if (fuzzSync) { fuzzWithReaders(writerData, readerData, fuzzBlocking); fuzzWithReaders(writerData, readerData, fuzzBlocking); } else { fuzzWithReaders(writerData, readerData, false); fuzzWithReaders(writerData, readerData, false); } return 0; }