2014-02-26 17:50:16 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012-2013 The Android Open Source Project
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2020-05-04 19:53:36 +00:00
|
|
|
#pragma once
|
2014-02-26 17:50:16 +00:00
|
|
|
|
|
|
|
#include <pthread.h>
|
logd: improve logd prune
upon memory usage high(>log_buffer_size), logd will try to prune(erase) all those old log elements which have been read by all readers for reclaiming the memory. As such, a too slow reader will be a hinder to the success of the prune. Logd has to try to kick-out the slow-est reader when memory usage is really too high(>2 * log_buffer_size). But the kick-out operation is just a request to the reader and at what time the reader will exit is always uncertain, beyond control. Furthermore, if you kick-out reader-A, waiting for A to exit; then, another reader-B may come in; then A exit; and then you kick-out-B, waiting for B to exit; and then, ...loop for ever: yes, logd may find that there seems to be always a slow reader hinder its pruning. As we all know, that, logd will probably kick-out a slow reader(logcat), hence, indeed, almost all log capturing tools will try to re-connect logd immediately after it being kick-out-ed. Such retry makes the issue easy to happen. And, we observed that the reader thread may often be blocked by socket write operation, which hindering its exiting and hereby hindering the prune progress. We need gracefully shutdown socket to relieve it from blocking and eventually stop such disaster from happening.
Test: monkey test for one day and one night
Change-Id: I5496ff74168b71e261914b91c145aa44814a5def
2018-12-20 15:10:41 +00:00
|
|
|
#include <sys/socket.h>
|
2014-02-26 17:50:16 +00:00
|
|
|
#include <sys/types.h>
|
2017-03-10 22:31:54 +00:00
|
|
|
#include <time.h>
|
2015-08-19 20:10:14 +00:00
|
|
|
|
2020-05-11 23:29:29 +00:00
|
|
|
#include <chrono>
|
|
|
|
#include <condition_variable>
|
2015-08-19 20:10:14 +00:00
|
|
|
#include <list>
|
2018-10-09 00:33:50 +00:00
|
|
|
#include <memory>
|
2015-08-19 20:10:14 +00:00
|
|
|
|
2016-09-30 20:30:33 +00:00
|
|
|
#include <log/log.h>
|
2014-02-26 17:50:16 +00:00
|
|
|
#include <sysutils/SocketClient.h>
|
|
|
|
|
2020-05-11 23:29:29 +00:00
|
|
|
#include "LogBuffer.h"
|
2020-05-15 02:25:05 +00:00
|
|
|
#include "LogWriter.h"
|
2020-05-11 23:29:29 +00:00
|
|
|
|
2020-05-12 20:16:41 +00:00
|
|
|
class LogReaderList;
|
2014-02-26 17:50:16 +00:00
|
|
|
|
2020-05-04 19:53:36 +00:00
|
|
|
class LogReaderThread {
|
2019-08-21 21:16:34 +00:00
|
|
|
public:
|
2020-05-15 02:25:05 +00:00
|
|
|
LogReaderThread(LogBuffer* log_buffer, LogReaderList* reader_list,
|
|
|
|
std::unique_ptr<LogWriter> writer, bool non_block, unsigned long tail,
|
2020-05-28 19:38:21 +00:00
|
|
|
LogMask log_mask, pid_t pid, log_time start_time, uint64_t sequence,
|
2020-05-15 02:25:05 +00:00
|
|
|
std::chrono::steady_clock::time_point deadline);
|
2020-05-11 23:29:29 +00:00
|
|
|
void triggerReader_Locked() { thread_triggered_condition_.notify_all(); }
|
2014-08-07 15:16:52 +00:00
|
|
|
|
2020-05-05 00:10:16 +00:00
|
|
|
void triggerSkip_Locked(log_id_t id, unsigned int skip) { skip_ahead_[id] = skip; }
|
2020-05-04 19:53:36 +00:00
|
|
|
void cleanSkip_Locked();
|
2014-02-26 17:50:16 +00:00
|
|
|
|
2020-05-04 19:53:36 +00:00
|
|
|
void release_Locked() {
|
logd: improve logd prune
upon memory usage high(>log_buffer_size), logd will try to prune(erase) all those old log elements which have been read by all readers for reclaiming the memory. As such, a too slow reader will be a hinder to the success of the prune. Logd has to try to kick-out the slow-est reader when memory usage is really too high(>2 * log_buffer_size). But the kick-out operation is just a request to the reader and at what time the reader will exit is always uncertain, beyond control. Furthermore, if you kick-out reader-A, waiting for A to exit; then, another reader-B may come in; then A exit; and then you kick-out-B, waiting for B to exit; and then, ...loop for ever: yes, logd may find that there seems to be always a slow reader hinder its pruning. As we all know, that, logd will probably kick-out a slow reader(logcat), hence, indeed, almost all log capturing tools will try to re-connect logd immediately after it being kick-out-ed. Such retry makes the issue easy to happen. And, we observed that the reader thread may often be blocked by socket write operation, which hindering its exiting and hereby hindering the prune progress. We need gracefully shutdown socket to relieve it from blocking and eventually stop such disaster from happening.
Test: monkey test for one day and one night
Change-Id: I5496ff74168b71e261914b91c145aa44814a5def
2018-12-20 15:10:41 +00:00
|
|
|
// gracefully shut down the socket.
|
2020-05-15 02:25:05 +00:00
|
|
|
writer_->Shutdown();
|
2020-05-05 00:10:16 +00:00
|
|
|
release_ = true;
|
2020-05-11 23:29:29 +00:00
|
|
|
thread_triggered_condition_.notify_all();
|
2017-03-10 22:31:54 +00:00
|
|
|
}
|
2014-02-26 17:50:16 +00:00
|
|
|
|
2020-05-28 19:38:21 +00:00
|
|
|
bool IsWatching(log_id_t id) const { return flush_to_state_->log_mask() & (1 << id); }
|
|
|
|
bool IsWatchingMultiple(LogMask log_mask) const {
|
|
|
|
return flush_to_state_->log_mask() & log_mask;
|
|
|
|
}
|
2020-05-05 00:10:16 +00:00
|
|
|
|
2020-05-15 02:25:05 +00:00
|
|
|
std::string name() const { return writer_->name(); }
|
2020-05-28 19:38:21 +00:00
|
|
|
uint64_t start() const { return flush_to_state_->start(); }
|
2020-05-11 23:29:29 +00:00
|
|
|
std::chrono::steady_clock::time_point deadline() const { return deadline_; }
|
2020-05-13 16:28:37 +00:00
|
|
|
FlushToState& flush_to_state() { return *flush_to_state_; }
|
2020-05-04 18:13:55 +00:00
|
|
|
|
|
|
|
private:
|
2020-05-05 00:10:16 +00:00
|
|
|
void ThreadFunction();
|
|
|
|
// flushTo filter callbacks
|
logd: move leading_dropped logic into FlushTo()
This logic isn't generic, so it should not be in the generic
LogReaderThread.
Moreover, it's currently broken in essentially every case except when
filtering by UID, because it runs as in the filter functions before
the actual filtering by pid/etc takes place. For example, when
filtering by pid, it's possible to get leading chatty messages. The
newly added test was failing previously but is fixed by this change.
It's fundamentally broken in the tail case. Take this example:
1: Normal message
2: Chatty message
3: Normal message
4: Normal message
If you read that log buffer with a tail value of 3, there are three
possible outcomes:
1) Messages #2-4, however this would include a leading chatty message,
which is not allowed.
2) Messages #3-4, however this is only 2, not 3 messages.
3) Messages #1-4, however this is 4, more than the 3 requested
messages.
This code chooses 2) as the correct solution, in this case, we don't
need to account for leading chatty messages when counting the total
logs in the buffer. A test is added for this case as well.
Test: new unit test
Change-Id: Id02eb81a8e77390aba4f85aac659c6cab498dbcd
2020-05-29 03:02:42 +00:00
|
|
|
FilterResult FilterFirstPass(log_id_t log_id, pid_t pid, uint64_t sequence, log_time realtime);
|
|
|
|
FilterResult FilterSecondPass(log_id_t log_id, pid_t pid, uint64_t sequence, log_time realtime);
|
2020-05-05 00:10:16 +00:00
|
|
|
|
2020-05-15 02:25:05 +00:00
|
|
|
std::condition_variable thread_triggered_condition_;
|
|
|
|
LogBuffer* log_buffer_;
|
|
|
|
LogReaderList* reader_list_;
|
|
|
|
std::unique_ptr<LogWriter> writer_;
|
|
|
|
|
2020-05-05 00:10:16 +00:00
|
|
|
// Set to true to cause the thread to end and the LogReaderThread to delete itself.
|
|
|
|
bool release_ = false;
|
|
|
|
|
|
|
|
// If set to non-zero, only pids equal to this are read by the reader.
|
|
|
|
const pid_t pid_;
|
|
|
|
// When a reader is referencing (via start_) old elements in the log buffer, and the log
|
|
|
|
// buffer's size grows past its memory limit, the log buffer may request the reader to skip
|
|
|
|
// ahead a specified number of logs.
|
|
|
|
unsigned int skip_ahead_[LOG_ID_MAX];
|
2020-05-28 19:38:21 +00:00
|
|
|
// LogBuffer::FlushTo() needs to store state across subsequent calls.
|
|
|
|
std::unique_ptr<FlushToState> flush_to_state_;
|
2020-05-05 00:10:16 +00:00
|
|
|
|
|
|
|
// These next three variables are used for reading only the most recent lines aka `adb logcat
|
|
|
|
// -t` / `adb logcat -T`.
|
|
|
|
// tail_ is the number of most recent lines to print.
|
|
|
|
unsigned long tail_;
|
|
|
|
// count_ is the result of a first pass through the log buffer to determine how many total
|
|
|
|
// messages there are.
|
|
|
|
unsigned long count_;
|
|
|
|
// index_ is used along with count_ to only start sending lines once index_ > (count_ - tail_)
|
|
|
|
// and to disconnect the reader (if it is dumpAndClose, `adb logcat -t`), when index_ >= count_.
|
|
|
|
unsigned long index_;
|
|
|
|
|
|
|
|
// When a reader requests logs starting from a given timestamp, its stored here for the first
|
|
|
|
// pass, such that logs before this time stamp that are accumulated in the buffer are ignored.
|
|
|
|
log_time start_time_;
|
2020-05-11 23:29:29 +00:00
|
|
|
// CLOCK_MONOTONIC based deadline used for log wrapping. If this deadline expires before logs
|
2020-05-05 00:10:16 +00:00
|
|
|
// wrap, then wake up and send the logs to the reader anyway.
|
2020-05-11 23:29:29 +00:00
|
|
|
std::chrono::steady_clock::time_point deadline_;
|
2020-05-05 00:10:16 +00:00
|
|
|
// If this reader is 'dumpAndClose' and will disconnect once it has read its intended logs.
|
|
|
|
const bool non_block_;
|
2014-02-26 17:50:16 +00:00
|
|
|
};
|