Application Component Granularity MagiskHide

Before switching to the new MagiskHide implementation (APK inotify),
logcat parsing provides us lots of information to target a process.
We were targeting components so that apps with multi-processes
can still be hidden properly.

After switching to the new implementation, our granularity is limited
to the UID of the process. This is especially dangerous since Android
allow apps signed with the same signature to share UIDs, and many system
apps utilize this for elevated permissions for some services.

This commit introduces process name matching. We could not blanketly
target an UID, so the workaround is to verify its process name before
unmounting.

The tricky thing is that any app developer is allowed to name the
process of its component to whatever they want; there is no 'one
rule to catch them all' to target a specific package. As a result,
Magisk Manager is updated to scan through all components of all apps,
and show different processes of the same app, each as a separate
hide target in the list.

The hide target database also has to be updated accordingly.
Each hide target is now a <package name, process name> pair. The
magiskhide CLI and Magisk Manager is updated to support this new
target format.
This commit is contained in:
topjohnwu
2019-03-01 17:08:08 -05:00
parent 885e3c574b
commit b1afd554fc
11 changed files with 337 additions and 245 deletions

View File

@ -33,20 +33,7 @@ using namespace std;
extern char *system_block, *vendor_block, *data_block;
static int inotify_fd = -1;
static set<int> hide_uid;
// Workaround for the lack of pthread_cancel
static void term_thread(int) {
LOGD("proc_monitor: running cleanup\n");
hide_list.clear();
hide_uid.clear();
hide_enabled = false;
pthread_mutex_destroy(&list_lock);
close(inotify_fd);
inotify_fd = -1;
LOGD("proc_monitor: terminating\n");
pthread_exit(nullptr);
}
static void term_thread(int);
static inline int read_ns(const int pid, struct stat *st) {
char path[32];
@ -75,25 +62,6 @@ static inline int parse_ppid(const int pid) {
return ppid;
}
static bool is_snet(const int pid) {
char path[32];
char buf[64];
int fd;
ssize_t len;
sprintf(path, "/proc/%d/cmdline", pid);
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd == -1)
return false;
len = read(fd, buf, sizeof(buf));
close(fd);
if (len == -1)
return false;
return !strcmp(buf, SAFETYNET_PROCESS);
}
static void hide_daemon(int pid) {
RunFinally fin([=]() -> void {
// Send resume signal
@ -143,10 +111,18 @@ static void hide_daemon(int pid) {
lazy_unmount(s.data());
}
// A mapping from pid to namespace inode to avoid time-consuming GC
static map<int, uint64_t> pid_ns_map;
/********************
* All the damn maps
********************/
static bool process_pid(int pid) {
map<string, string> hide_map; /* process -> package_name */
static map<int, uint64_t> pid_ns_map; /* pid -> last ns inode */
static map<int, vector<string_view>> uid_proc_map; /* uid -> list of process */
// All maps are protected by this lock
pthread_mutex_t map_lock;
static bool check_pid(int pid) {
// We're only interested in PIDs > 1000
if (pid <= 1000)
return true;
@ -154,7 +130,8 @@ static bool process_pid(int pid) {
struct stat ns, pns;
int ppid;
int uid = get_uid(pid);
if (hide_uid.count(uid)) {
auto it = uid_proc_map.find(uid);
if (it != uid_proc_map.end()) {
// Make sure we can read mount namespace
if ((ppid = parse_ppid(pid)) < 0 || read_ns(pid, &ns) || read_ns(ppid, &pns))
return true;
@ -167,20 +144,21 @@ static bool process_pid(int pid) {
if (pos != pid_ns_map.end() && pos->second == ns.st_ino)
return true;
if (uid == gms_uid) {
// Check /proc/uid/cmdline to see if it's SAFETYNET_PROCESS
if (!is_snet(pid))
return true;
// Check whether process name match hide list
const char *process = nullptr;
for (auto &proc : it->second)
if (proc_name_match(pid, proc.data()))
process = proc.data();
LOGD("proc_monitor: " SAFETYNET_PROCESS "\n");
}
if (!process)
return true;
// Send pause signal ASAP
if (kill(pid, SIGSTOP) == -1)
return true;
pid_ns_map[pid] = ns.st_ino;
LOGI("proc_monitor: UID=[%d] PID=[%d] ns=[%llu]\n", uid, pid, ns.st_ino);
LOGI("proc_monitor: [%s] UID=[%d] PID=[%d] ns=[%llu]\n", process, uid, pid, ns.st_ino);
/*
* The setns system call do not support multithread processes
@ -202,29 +180,34 @@ static int xinotify_add_watch(int fd, const char* path, uint32_t mask) {
return ret;
}
static int new_inotify;
static const string_view APK_EXT(".apk");
static vector<string> hide_apks;
static bool parse_packages_xml(string_view &s) {
static const string_view APK_EXT(".apk");
if (!str_starts(s, "<package "))
return true;
/* <package key1="value1" key2="value2"....> */
char *start = (char *) s.data();
start[s.length() - 2] = '\0'; /* Remove trailing '>' */
char key[32], value[1024];
char *pkg = nullptr;
char *tok;
start += 9; /* Skip '<package ' */
while ((tok = strtok_r(nullptr, " ", &start))) {
sscanf(tok, "%[^=]=\"%[^\"]", key, value);
string_view key_view(key);
string_view value_view(value);
if (strcmp(key, "name") == 0) {
if (std::count(hide_list.begin(), hide_list.end(), value_view) == 0)
if (key_view == "name") {
for (auto &hide : hide_map) {
if (hide.second == value_view) {
pkg = hide.second.data();
break;
}
}
if (!pkg)
return true;
} else if (strcmp(key, "codePath") == 0) {
} else if (key_view == "codePath") {
if (ends_with(value_view, APK_EXT)) {
// Directly add to inotify list
hide_apks.emplace_back(value);
xinotify_add_watch(inotify_fd, value, IN_OPEN);
} else {
DIR *dir = opendir(value);
if (dir == nullptr)
@ -232,36 +215,29 @@ static bool parse_packages_xml(string_view &s) {
struct dirent *entry;
while ((entry = xreaddir(dir))) {
if (ends_with(entry->d_name, APK_EXT)) {
strcpy(value + value_view.length(), "/");
value[value_view.length()] = '/';
strcpy(value + value_view.length() + 1, entry->d_name);
hide_apks.emplace_back(value);
xinotify_add_watch(inotify_fd, value, IN_OPEN);
break;
}
}
closedir(dir);
}
} else if (strcmp(key, "userId") == 0 || strcmp(key, "sharedUserId") == 0) {
hide_uid.insert(parse_int(value));
} else if (key_view == "userId" || key_view == "sharedUserId") {
int uid = parse_int(value);
for (auto &hide : hide_map) {
if (hide.second == pkg)
uid_proc_map[uid].emplace_back(hide.first);
}
}
}
return true;
}
void update_inotify_mask() {
new_inotify = inotify_init();
if (new_inotify < 0) {
LOGE("proc_monitor: Cannot initialize inotify: %s\n", strerror(errno));
int new_inotify = xinotify_init1(IN_CLOEXEC);
if (new_inotify < 0)
term_thread(TERM_THREAD);
}
fcntl(new_inotify, F_SETFD, FD_CLOEXEC);
LOGD("proc_monitor: Updating inotify list\n");
hide_apks.clear();
{
MutexGuard lock(list_lock);
hide_uid.clear();
file_readline("/data/system/packages.xml", parse_packages_xml, true);
}
// Swap out and close old inotify_fd
int tmp = inotify_fd;
@ -269,11 +245,29 @@ void update_inotify_mask() {
if (tmp >= 0)
close(tmp);
for (auto apk : hide_apks)
xinotify_add_watch(inotify_fd, apk.data(), IN_OPEN);
LOGD("proc_monitor: Updating inotify list\n");
{
MutexGuard lock(map_lock);
uid_proc_map.clear();
file_readline("/data/system/packages.xml", parse_packages_xml, true);
}
xinotify_add_watch(inotify_fd, "/data/system", IN_CLOSE_WRITE);
}
// Workaround for the lack of pthread_cancel
static void term_thread(int) {
LOGD("proc_monitor: cleaning up\n");
hide_map.clear();
uid_proc_map.clear();
pid_ns_map.clear();
hide_enabled = false;
pthread_mutex_destroy(&map_lock);
close(inotify_fd);
inotify_fd = -1;
LOGD("proc_monitor: terminate\n");
pthread_exit(nullptr);
}
void proc_monitor() {
// Unblock user signals
sigset_t block_set;
@ -296,8 +290,8 @@ void proc_monitor() {
if (event->mask & IN_OPEN) {
// Since we're just watching files,
// extracting file name is not possible from querying event
MutexGuard lock(list_lock);
crawl_procfs(process_pid);
MutexGuard lock(map_lock);
crawl_procfs(check_pid);
} else if ((event->mask & IN_CLOSE_WRITE) && strcmp(event->name, "packages.xml") == 0) {
LOGD("proc_monitor: /data/system/packages.xml updated\n");
update_inotify_mask();